From 94dca529b1bdacc75437e57d276200a5d9eac49b Mon Sep 17 00:00:00 2001 From: Ykkrosh Date: Tue, 23 Nov 2010 19:20:03 +0000 Subject: [PATCH] Fix OS X build errors This was SVN commit r8684. --- build/premake/premake.lua | 6 ++++++ source/lib/sysdep/arch/ia32/ia32.cpp | 29 ++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/build/premake/premake.lua b/build/premake/premake.lua index e2bf9309ef..7502865a9d 100644 --- a/build/premake/premake.lua +++ b/build/premake/premake.lua @@ -216,6 +216,12 @@ function package_set_build_flags() if arch == "x86" then tinsert(package.buildoptions, "-march=i686") end + + -- We don't want to require SSE2 everywhere yet, but OS X headers do + -- require it (and Intel Macs always have it) so enable it here + if OS == "macosx" then + tinsert(package.buildoptions, "-msse2") + end end tinsert(package.buildoptions, { diff --git a/source/lib/sysdep/arch/ia32/ia32.cpp b/source/lib/sysdep/arch/ia32/ia32.cpp index f6390b9c09..a5d864ed06 100644 --- a/source/lib/sysdep/arch/ia32/ia32.cpp +++ b/source/lib/sysdep/arch/ia32/ia32.cpp @@ -174,6 +174,35 @@ intptr_t cpu_AtomicAdd(volatile intptr_t* location, intptr_t increment) return _InterlockedExchangeAdd((P32)location, increment); } +#elif OS_MACOSX + +#include + +#if ARCH_IA32 +intptr_t cpu_AtomicAdd(volatile intptr_t* location, intptr_t increment) +{ + cassert(sizeof(intptr_t) == sizeof(int32_t)); + return OSAtomicAdd32Barrier(increment, (volatile int32_t*)location); +} +#else +intptr_t cpu_AtomicAdd(volatile intptr_t* location, intptr_t increment) +{ + cassert(sizeof(intptr_t) == sizeof(int64_t)); + return OSAtomicAdd64Barrier(increment, (volatile int64_t*)location); +} +#endif + +bool cpu_CAS(volatile intptr_t* location, intptr_t expected, intptr_t newValue) +{ + cassert(sizeof(intptr_t) == sizeof(void*)); + return OSAtomicCompareAndSwapPtrBarrier((void*)expected, (void*)newValue, (void* volatile*)location); +} + +bool cpu_CAS64(volatile i64* location, i64 expected, i64 newValue) +{ + return OSAtomicCompareAndSwap64Barrier(expected, newValue, location); +} + #elif GCC_VERSION intptr_t cpu_AtomicAdd(volatile intptr_t* location, intptr_t increment)