Linux/GCC Compat, added some includes, ported inline assembly stuff to gnu syntax (and misc. cleanup in ia32.cpp)

This was SVN commit r2702.
This commit is contained in:
Simon Brenner 2005-09-12 23:37:52 +00:00
parent 3fdf41bd54
commit a0a01e7a7d
30 changed files with 244 additions and 155 deletions

View File

@ -413,11 +413,11 @@ void CModel::SetTransform(const CMatrix3D& transform)
{
// call base class to set transform on this object
CRenderableObject::SetTransform(transform);
m_BoneMatricesValid=false;
InvalidateBounds();
GenerateBoneMatrices();
// now set transforms on props
const CMatrix3D* bonematrices=GetBoneMatrices();// TODO2: this or m_BoneMatrices? // (GetBoneMatrices updates m_BoneMatrices (when necessary) and returns it)
for (size_t i=0;i<m_Props.size();i++) {
const Prop& prop=m_Props[i];

View File

@ -64,7 +64,7 @@ bool CObjectEntry::BuildRandomVariant(const CObjectBase::variation_key& vars, CO
// Get the correct variant
u8 var_id = *vars_it++;
if (var_id < 0 || var_id >= grp->size())
if (var_id >= grp->size())
{
LOG(ERROR, LOG_CATEGORY, "Internal error (BuildRandomVariant: %d not in 0..%d)", var_id, grp->size()-1);
// Carry on as best we can, by using some arbitrary variant (rather

View File

@ -54,6 +54,7 @@ CTextureEntry* CTextureManager::FindTexture(CStr tag)
return m_TextureEntries[i];
}
LOG(WARNING, LOG_CATEGORY, "TextureManager: Couldn't find terrain %s\n", tag.c_str());
return 0;
}
@ -94,7 +95,9 @@ void CTextureManager::LoadTextures(CTerrainProperties *props, CStr path, const c
{
Handle dir=vfs_dir_open(path.c_str());
DirEnt dent;
path += '/';
if (dir > 0)
{
while (vfs_dir_next_ent(dir, &dent, fileext_filter) == 0)
@ -126,7 +129,7 @@ void CTextureManager::LoadTextures(CTerrainProperties *props, CStr path, const c
void CTextureManager::RecurseDirectory(CTerrainProperties *parentProps, CStr path)
{
LOG(NORMAL, LOG_CATEGORY, "CTextureManager::RecurseDirectory(%s)", path.c_str());
//LOG(NORMAL, LOG_CATEGORY, "CTextureManager::RecurseDirectory(%s)", path.c_str());
// Load terrains.xml first, if it exists
CTerrainProperties *props=NULL;
@ -144,17 +147,12 @@ void CTextureManager::RecurseDirectory(CTerrainProperties *parentProps, CStr pat
// Recurse once for each subdirectory
Handle dir=vfs_dir_open(path.c_str());
DirEnt dent;
vector<CStr> folders;
VFSUtil::FindFiles(path.c_str(), "/", folders);
if (dir > 0)
for (uint i=0;i<folders.size();i++)
{
while (vfs_dir_next_ent(dir, &dent, "/") == 0)
{
RecurseDirectory(props, path+dent.name+"/");
}
vfs_dir_close(dir);
RecurseDirectory(props, folders[i]);
}
for (int i=0;i<ARRAY_SIZE(SupportedTextureFormats);i++)
@ -166,7 +164,7 @@ void CTextureManager::RecurseDirectory(CTerrainProperties *parentProps, CStr pat
int CTextureManager::LoadTerrainTextures()
{
RecurseDirectory(NULL, "art/textures/terrain/types/");
RecurseDirectory(NULL, "art/textures/terrain/types");
return 0;
}

View File

@ -249,11 +249,18 @@
# define HAVE_X 0
#endif
// __asm{} blocks (Intel syntax)
// MSVC/ICC-style __asm{} blocks (Intel syntax)
#if MSC_VERSION
# define HAVE_ASM 1
# define HAVE_MS_ASM 1
#else
# define HAVE_ASM 0
# define HAVE_MS_ASM 0
#endif
// GNU-style __asm() blocks (AT&T syntax)
#if __GNUC__
# define HAVE_GNU_ASM 1
#else
# define HAVE_GNU_ASM 0
#endif
// precompiled headers (affects what precompiled.h pulls in; see there)

View File

@ -497,7 +497,9 @@ void debug_set_thread_name(const char* name)
WARN_ERR(pthread_setspecific(tls_key, name));
#if OS_WIN
wdbg_set_thread_name(name);
#endif
}

View File

@ -29,7 +29,7 @@
#include "sdl.h"
#if CPU_IA32
extern void ia32_get_cpu_info();
#include "sysdep/ia32.h"
#endif
#if OS_WIN

View File

@ -155,7 +155,7 @@ int ilog2(uint n)
{
int bit_index; // return value
#if CPU_IA32
#if CPU_IA32 && HAVE_MS_ASM
__asm
{

View File

@ -296,6 +296,9 @@ enum LibError
#define UNREACHABLE __assume(0)
#endif
#ifdef __GNUC__
#define UNREACHABLE 0
#endif
#define ARRAY_SIZE(name) (sizeof(name) / sizeof(name[0]))

View File

@ -75,6 +75,8 @@ need only be renamed (e.g. _open, _stat).
#include <errno.h>
#include <dirent.h>
#include <sys/utsname.h>
#include <dlfcn.h>
#include <sys/socket.h>
#include <netdb.h>
#include <netinet/in.h>

View File

@ -38,7 +38,7 @@
#include "lib/string_s.h" // CRT secure string
#include "lib/debug.h"
#include "ps/Pyrogenesis.h" // MICROLOG and old error system
#include <assert.h> // assert()
//
// memory headers

View File

@ -102,7 +102,7 @@ extern int file_rel_chdir(const char* argv0, const char* rel_path);
// instantiate this.
struct DirIterator
{
char opaque[512];
char opaque[PATH_MAX+32];
};
// information about a directory entry filled by dir_next_ent.

View File

@ -7,6 +7,9 @@
#include "zip.h"
#include "sysdep/dir_watch.h"
#include <deque>
#include <list>
struct Stats
{
size_t mounted_dirs;
@ -29,17 +32,6 @@ void vfs_dump_stats()
}
enum MountType
{
// the relative ordering of values expresses efficiency of the sources
// (e.g. archives are faster than loose files). mount_should_replace
// makes use of this.
MT_NONE = 0,
MT_FILE = 1,
MT_ARCHIVE = 2
};
// location of a file: either archive or a real directory.
// not many instances => don't worry about efficiency.
struct Mount
@ -1094,4 +1086,4 @@ int x_io_close(XIo* xio)
debug_warn("VIo_dtor: invalid type");
return ERR_CORRUPTED;
}
}
}

View File

@ -7,8 +7,19 @@ extern void mount_shutdown();
struct Mount;
// If it was possible to forward-declare enums in gcc, this one wouldn't be in
// the header. Don't use.
enum MountType
{
// the relative ordering of values expresses efficiency of the sources
// (e.g. archives are faster than loose files). mount_should_replace
// makes use of this.
MT_NONE = 0,
MT_FILE = 1,
MT_ARCHIVE = 2
};
enum MountType;
struct TFile;
#include "file.h"

View File

@ -423,6 +423,7 @@ static inline Key GetKey(const T t)
void TDir::init()
{
flags = 0;
rd.m = 0;
rd.watch = 0;
children.init();

View File

@ -461,6 +461,7 @@ int tex_write(const char* fn, uint w, uint h, uint bpp, uint flags, void* in_img
const TexCodecVTbl* c;
CHECK_ERR(tex_codec_for_filename(fn, &c));
const size_t rounded_size = round_up(da.cur_size, FILE_BLOCK_SIZE);
// encode
int err = c->encode(&t, &da);
if(err < 0)
@ -469,7 +470,6 @@ int tex_write(const char* fn, uint w, uint h, uint bpp, uint flags, void* in_img
debug_warn("tex_writefailed");
goto fail;
}
const size_t rounded_size = round_up(da.cur_size, FILE_BLOCK_SIZE);
CHECK_ERR(da_set_size(&da, rounded_size));
WARN_ERR(vfs_store(fn, da.base, da.pos));
err = 0;

View File

@ -9,6 +9,8 @@ extern "C" {
#include "lib.h"
#include "lib/res/res.h"
#include "tex_codec.h"
#include <setjmp.h>
#if MSC_VERSION
# ifdef NDEBUG

View File

@ -13,8 +13,6 @@
#include "lib/res/res.h"
#include "tex_codec.h"
#if MSC_VERSION
// squelch "dtor / setjmp interaction" warnings.

View File

@ -415,4 +415,4 @@ ssize_t mem_size(void* p)
void mem_shutdown()
{
ptr_to_h_shutdown();
}
}

View File

@ -35,10 +35,12 @@
#include <vector>
#include <algorithm>
#if HAVE_ASM
#if !HAVE_MS_ASM && !HAVE_GNU_ASM
#error ia32.cpp needs inline assembly support!
#endif
// replace pathetic MS libc implementation.
#if OS_WIN
#if HAVE_MS_ASM
double _ceil(double f)
{
double r;
@ -64,6 +66,7 @@ __asm
inline u64 rdtsc()
{
u64 c;
#if HAVE_MS_ASM
__asm
{
cpuid
@ -71,6 +74,13 @@ __asm
mov dword ptr [c], eax
mov dword ptr [c+4], edx
}
#elif HAVE_GNU_ASM
__asm__ __volatile__ (
"cpuid; rdtsc"
: "=A" (c)
: /* no input */
: "ebx", "ecx" /* cpuid clobbers ebx and ecx */);
#endif
return c;
}
@ -78,6 +88,7 @@ __asm
// change FPU control word (used to set precision)
uint ia32_control87(uint new_cw, uint mask)
{
#if HAVE_MS_ASM
__asm
{
push eax
@ -93,21 +104,25 @@ __asm
fldcw [esp]
pop eax
}
UNUSED2(new_cw);
UNUSED2(mask);
#elif HAVE_GNU_ASM
uint cw;
asm ("fnstcw %0": "=m" (cw));
cw = (cw & ~mask) | (new_cw & mask);
asm ("fldcw %0": : "m" (cw));
#endif
return 0;
}
#if OS_WIN && HAVE_MS_ASM
void ia32_debug_break()
{
__asm int 3
}
/*
conclusions:
@ -173,18 +188,18 @@ void org_memcpy_amd(u8 *dest, const u8 *src, size_t n)
cmp ecx, TINY_BLOCK_COPY
jb $memcpy_ic_3 ; tiny? skip mmx copy
cmp ecx, 32*1024 ; don't align between 32k-64k because
cmp ecx, 32*1024 ;// don't align between 32k-64k because
jbe $memcpy_do_align ; it appears to be slower
cmp ecx, 64*1024
jbe $memcpy_align_done
$memcpy_do_align:
mov ecx, 8 ; a trick that's faster than rep movsb...
mov ecx, 8 ;// a trick that's faster than rep movsb...
sub ecx, edi ; align destination to qword
and ecx, 111b ; get the low bits
sub ebx, ecx ; update copy count
neg ecx ; set up to jump into the array
add ecx, offset $memcpy_align_done
jmp ecx ; jump to array of movsb's
jmp ecx ;// jump to array of movsb's
align 4
movsb
@ -241,7 +256,7 @@ $memcpy_ic_3:
and ecx, 1111b ; only look at the "remainder" bits
neg ecx ; set up to jump into the array
add ecx, offset $memcpy_last_few
jmp ecx ; jump to array of movsd's
jmp ecx ;// jump to array of movsd's
$memcpy_uc_test:
cmp ecx, UNCACHED_COPY/64 ; big enough? use block prefetch copy
@ -304,7 +319,7 @@ $memcpy_bp_2:
dec eax ; count down the cache lines
jnz $memcpy_bp_2 ; keep grabbing more lines into cache
mov eax, CACHEBLOCK ; now that it's in cache, do the copy
mov eax, CACHEBLOCK ; now that it is in cache, do the copy
align 16
$memcpy_bp_3:
movq mm0, [esi ] ; read 64 bits
@ -352,10 +367,10 @@ align 4
movsd
movsd
$memcpy_last_few: ; dword aligned from before movsd's
$memcpy_last_few: ;// dword aligned from before movsd's
mov ecx, ebx ; has valid low 2 bits of the byte count
and ecx, 11b ; the last few cows must come home
jz $memcpy_final ; no more, let's leave
jz $memcpy_final ; no more, just leave
rep movsb ; the last 1, 2, or 3 bytes
$memcpy_final:
@ -367,14 +382,6 @@ $memcpy_final:
}
// align to 8 bytes
// this align may be slower in [32kb, 64kb]
// rationale: always called - it may speed up TINY as well
@ -711,7 +718,7 @@ __asm
shr ecx, 2 ; dword count
neg ecx
add ecx, offset $movsd_table_end
jmp ecx ; jump to array of movsd's
jmp ecx ; jump to array of movsd:s
// The smallest copy uses the X86 "movsd" instruction, in an optimized
@ -737,7 +744,7 @@ $movsd_table_end:
mov ecx, ebx ; has valid low 2 bits of the byte count
and ecx, 11b ; the last few cows must come home
jz $memcpy_final ; no more, let's leave
jz $memcpy_final ; no more, skip tail
rep movsb ; the last 1, 2, or 3 bytes
$memcpy_final:
END
@ -760,7 +767,7 @@ __asm
shr ecx, 2 ; dword count
neg ecx
add ecx, offset $movsd_table_end
jmp ecx ; jump to array of movsd's
jmp ecx ; jump to array of movsd:s
// The smallest copy uses the X86 "movsd" instruction, in an optimized
@ -788,7 +795,7 @@ $movsd_table_end:
and ecx, 11b ; the last few cows must come home
neg ecx
add ecx, offset $movsb_table_end
jmp ecx ; jump to array of movsb's
jmp ecx ; jump to array of movsb:s
movsb
movsb
@ -1013,54 +1020,22 @@ static int test()
}
//int dummy = test();
#endif // OS_WIN && HAVE_MS_ASM
void ia32_memcpy(void* dst, const void* src, size_t nbytes)
{
#if OS_WIN
// large
if(nbytes >= 64*KiB)
ia32_memcpy_nt(dst, src, nbytes);
// small
// TODO: implement small memcpy
else
memcpy(dst, src, nbytes);
#else
memcpy(dst, src, nbytes);
#endif
}
//-----------------------------------------------------------------------------
@ -1086,6 +1061,8 @@ static int test()
// note: don't use __declspec(naked) because we need to access one parameter
// from C code and VC can't handle that correctly.
#if HAVE_MS_ASM
bool __cdecl CAS_(uintptr_t* location, uintptr_t expected, uintptr_t new_value)
{
// try to see if caller isn't passing in an address
@ -1124,35 +1101,21 @@ $no_lock:
}
}
// enforce strong memory ordering.
void mfence()
{
// Pentium IV
if(ia32_cap(SSE2))
__asm mfence
}
void serialize()
{
__asm cpuid
}
#else // i.e. #if !HAVE_ASM
#else // #if HAVE_MS_ASM
bool CAS_(uintptr_t* location, uintptr_t expected, uintptr_t new_value)
{
uintptr_t prev;
debug_assert(location >= (uintptr_t*)0x10000);
ASSERT(location >= (uintptr_t*)0x10000);
__asm__ __volatile__("lock; cmpxchgl %1,%2"
: "=a"(prev) // %0: Result in eax should be stored in prev
: "q"(new_value), // %1: new_value -> e[abcd]x
"m"(*location), // %2: Memory operand
"0"(expected) // Stored in same place as %0
: "memory"); // We make changes in memory
__asm__ __volatile__(
"lock; cmpxchgl %1,%2"
: "=a"(prev) // %0: Result in eax should be stored in prev
: "q"(new_value), // %1: new_value -> e[abcd]x
"m"(*location), // %2: Memory operand
"0"(expected) // Stored in same place as %0
: "memory"); // We make changes in memory
return prev == expected;
}
@ -1170,22 +1133,30 @@ void atomic_add(intptr_t* location, intptr_t increment)
: "memory"); /* clobbers memory (*location) */
}
#endif // #if HAVE_MS_ASM
// enforce strong memory ordering.
void mfence()
{
// no cpu caps stored in gcc compiles, so we can't check for SSE2 support
/*
if (ia32_cap(SSE2))
// Pentium IV
if(ia32_cap(SSE2))
#if HAVE_MS_ASM
__asm mfence
#elif HAVE_GNU_ASM
__asm__ __volatile__ ("mfence");
*/
#endif
}
void serialize()
{
#if HAVE_MS_ASM
__asm cpuid
#elif HAVE_GNU_ASM
__asm__ __volatile__ ("cpuid");
#endif
}
#endif // #if HAVE_ASM
//-----------------------------------------------------------------------------
// CPU / feature detect
@ -1231,6 +1202,7 @@ static bool cpuid(u32 func, u32* regs)
return false;
// (optimized for size)
#if HAVE_MS_ASM
__asm
{
mov eax, [func]
@ -1244,11 +1216,20 @@ __asm
xchg eax, edx
stosd
}
#elif HAVE_GNU_ASM
asm("cpuid"
: "=a" (regs[0]),
"=b" (regs[1]),
"=c" (regs[2]),
"=d" (regs[3])
: "a" (func));
#endif
return true;
}
#if HAVE_MS_ASM
// (optimized for size)
static void cpuid()
{
@ -1327,7 +1308,7 @@ $1: lea eax, [0x80000004+esi] ;// 0x80000002 .. 4
no_brand_str:
;// get extended feature flags
mov eax, [0x80000001]
mov eax, 0x80000001
cpuid
mov [caps+8], ecx
mov [caps+12], edx
@ -1340,6 +1321,60 @@ no_cpuid:
} // __asm
} // cpuid()
#elif HAVE_GNU_ASM
// optimized for readability ;-)
static void cpuid()
{
u32 regs[4];
u32 flags;
/*
Try to set bit 21 in flags, and check if the cpu implemented the
*/
asm("pushfl; "
"orb $32, 2(%%esp); " /* bit 16+5 = 21 */
"popfl; "
"pushfl; "
"popl %%eax; "
: "=a" (flags));
if (!(flags & (1<<21))) // bit 21 reset? don't have cpuid -> abort
return;
// weird register ordering for vendor string (12 chars in b/d/cx)
// eax is ignored here
asm("xorl %%eax, %%eax; cpuid"
: "=b" (((u32 *)vendor_str)[0]),
"=d" (((u32 *)vendor_str)[1]),
"=c" (((u32 *)vendor_str)[2])
:
: "eax");
cpuid(1, regs);
memcpy(caps, &regs[2], 8);
model = (regs[0] & 0xff) >> 4; // eax[7:4]
family = (regs[0] & 0xf00) >> 8; // eax[11:8]
ext_family = (regs[0] & 0xf000) >> 20; // eax[23:20]
cpuid(0x80000000, regs);
max_ext_func=regs[0];
if (max_ext_func < 0x80000000)
return; /* no ext functions - skip remaining tests */
if (max_ext_func >= 0x80000004)
{
/* get brand string */
cpuid(0x80000002, (u32*)cpu_type);
cpuid(0x80000003, (u32*)(cpu_type+16));
cpuid(0x80000004, (u32*)(cpu_type+32));
have_brand_string = true;
}
cpuid(0x80000001, regs);
memcpy(&caps[2], &regs[2], 8);
} // cpuid()
#endif
bool ia32_cap(CpuCap cap)
{
@ -1520,6 +1555,7 @@ static void measure_cpu_freq()
int get_cur_processor_id()
{
int apic_id;
#if HAVE_MS_ASM
__asm {
push 1
pop eax
@ -1527,6 +1563,13 @@ int get_cur_processor_id()
shr ebx, 24
mov [apic_id], ebx ; ebx[31:24]
}
#elif HAVE_GNU_ASM
asm("cpuid; "
"shr $24, %%ebx"
: "=b" (apic_id)
: "a" (1)
: "ecx", "edx");
#endif
return apic_id;
}
@ -1563,6 +1606,7 @@ static void check_smp()
// get number of logical CPUs per package
// (the same for all packages on this system)
int log_cpus_per_package;
#if HAVE_MS_ASM
__asm {
push 1
pop eax
@ -1571,11 +1615,19 @@ static void check_smp()
and ebx, 0xff
mov log_cpus_per_package, ebx ; ebx[23:16]
}
#elif HAVE_GNU_ASM
asm("cpuid; "
"shrl $16, %%ebx; "
"andl $0xff, %%ebx"
: "=b" (log_cpus_per_package)
: "a" (1)
: "ecx", "edx");
#endif
// logical CPUs are initialized after one another =>
// they have the same physical ID.
const int id = get_cur_processor_id();
const int phys_shift = log2(log_cpus_per_package);
const int phys_shift = ilog2(log_cpus_per_package);
const int phys_id = id >> phys_shift;
// more than 1 physical CPU found
@ -1617,7 +1669,10 @@ void ia32_get_cpu_info()
get_cpu_type();
check_speedstep();
// linux doesn't have CPU affinity API:s (that we've found...)
#if OS_WIN
on_each_cpu(check_smp);
#endif
measure_cpu_freq();

View File

@ -19,7 +19,7 @@
#define IA32_H
#if !CPU_IA32
#error "including ia32.h without CPU_IA32=1
#error "including ia32.h without CPU_IA32=1"
#endif
#include "lib/types.h"

View File

@ -4,7 +4,7 @@
#include <string>
#include "lib.h"
#include "res/file.h"
#include "res/file/file.h"
#include <fam.h>

View File

@ -3,6 +3,7 @@
#include "lib.h"
#include "timer.h"
#include "sysdep/sysdep.h"
#include "debug.h"
#include <stdarg.h>
#include <sys/types.h>
@ -102,7 +103,7 @@ void udbg_launch_debugger()
}
}
void* debug_get_nth_caller(uint n)
void* debug_get_nth_caller(uint n, void *context)
{
// bt[0] == debug_get_nth_caller
// bt[1] == caller of get_nth_caller
@ -390,7 +391,7 @@ int debug_write_crashlog(const char* file, wchar_t* header, void* context)
abort();
}
int debug_is_bogus_pointer(const void* p)
int debug_is_pointer_bogus(const void* p)
{
return false;
}

View File

@ -90,7 +90,7 @@ ErrorReaction display_error_impl(const wchar_t* text, int flags)
// loading) OpenGL cursor.
int sys_cursor_create(int UNUSED(w), int UNUSED(h), void* UNUSED(img),
int UNUSED(hx), UNUSED(int hy), void** cursor)
int UNUSED(hx), int UNUSED(hy), void** cursor)
{
*cursor = 0;
return 0;

View File

@ -1194,7 +1194,7 @@ int CRenderer::LoadAlphaMaps()
m_AlphaMapCoords[i].v1=1.0f;
}
for (i=0;i<NumAlphaMaps;i++)
for (int i=0;i<NumAlphaMaps;i++)
ogl_tex_free(textures[i]);
// upload the composite texture

View File

@ -1,4 +1,5 @@
#include <list>
#include <map>
namespace AtlasMessage
{
@ -57,9 +58,9 @@ struct DataCommand : public Command // so commands can optionally override (De|C
#define END_COMMAND(t) \
}; \
namespace CAT2(hndlr_, __LINE__) { struct init { init() { \
bool notAlreadyRegisted = GetCmdHandlers().insert(std::pair<std::string, cmdHandler>("c"#t, &c##t##::Create)).second; \
namespace CAT2(t, __LINE__) { struct init { init() { \
bool notAlreadyRegisted = GetCmdHandlers().insert(std::pair<std::string, cmdHandler>("c"#t, &c##t ::Create)).second; \
assert(notAlreadyRegisted); \
} } init; };
}
}

View File

@ -4,7 +4,7 @@
#include "MessagePasserImpl.h"
#include "Messages.h"
#include "handlers/MessageHandler.h"
#include "Handlers/MessageHandler.h"
#include "InputProcessor.h"
@ -13,10 +13,15 @@
#include "lib/timer.h"
#include "ps/CLogger.h"
#include <assert.h>
using namespace AtlasMessage;
extern void Render_();
#define __declspec(spec_)
#define __stdcall
extern "C" { __declspec(dllimport) int __stdcall SwapBuffers(void*); }
// HACK (and not exactly portable)
//
@ -159,7 +164,9 @@ bool BeginAtlas(int argc, char* argv[], void* dll)
{
Render_();
glFinish();
#if OS_WIN
SwapBuffers((void*)state.currentDC);
#endif
}
// Be nice to the processor if we're not doing anything useful, but

View File

@ -3,6 +3,8 @@
#include "MessageHandler.h"
#include "../GameLoop.h"
#include <assert.h>
namespace AtlasMessage {
@ -21,4 +23,4 @@ void fScrollConstant(IMessage* msg)
}
REGISTER(ScrollConstant);
}
}

View File

@ -24,10 +24,12 @@ void fCommandString_init(IMessage*)
oglInit();
Init_(g_GameLoop->argc, g_GameLoop->argv, false);
#if OS_WIN
// HACK (to stop things looking very ugly when scrolling) - should
// use proper config system.
if(oglHaveExtension("WGL_EXT_swap_control"))
wglSwapIntervalEXT(1);
#endif
// Set attributes for the game:
// Start without a map
@ -80,9 +82,10 @@ REGISTER(CommandString_render_disable);
void fSetContext(IMessage* msg)
{
mSetContext* cmd = static_cast<mSetContext*>(msg);
// TODO: portability
#if OS_WIN
wglMakeCurrent((HDC)cmd->hdc, (HGLRC)cmd->hglrc);
g_GameLoop->currentDC = cmd->hdc;
#endif
}
REGISTER(SetContext);

View File

@ -1,5 +1,7 @@
#include "../Messages.h"
#include <map>
namespace AtlasMessage
{

View File

@ -5,6 +5,8 @@
#include "ps/Game.h"
#include "graphics/Camera.h"
#include <assert.h>
bool InputProcessor::ProcessInput(GameLoopState* state)
{
if (! g_Game)
@ -24,9 +26,9 @@ bool InputProcessor::ProcessInput(GameLoopState* state)
float l;
l = forwards.GetLength();
assert(abs(l - 1.f) < 0.0001f);
assert(fabsf(l - 1.f) < 0.0001f);
l = leftwards.GetLength();
assert(abs(l - 1.f) < 0.0001f);
assert(fabsf(l - 1.f) < 0.0001f);
bool moved = false;