diff options
author | blacksirius <blacksirius@54d463be-8e91-2dee-dedb-b68131a5f0ec> | 2012-06-11 23:31:19 +0000 |
---|---|---|
committer | blacksirius <blacksirius@54d463be-8e91-2dee-dedb-b68131a5f0ec> | 2012-06-11 23:31:19 +0000 |
commit | 0598cc569db02ee93d7fc0470defecb64e995f5c (patch) | |
tree | 6b011ade431b1186329e5c2a468abd6c77fe5e65 /src/common/atomic.h | |
parent | b43920212e20b2169254b567266037f15485a02a (diff) | |
download | hercules-0598cc569db02ee93d7fc0470defecb64e995f5c.tar.gz hercules-0598cc569db02ee93d7fc0470defecb64e995f5c.tar.bz2 hercules-0598cc569db02ee93d7fc0470defecb64e995f5c.tar.xz hercules-0598cc569db02ee93d7fc0470defecb64e995f5c.zip |
Fixed bugreport:5990 OSX compiling error / warnings
Bug in Detail:
- Misplaced LDFLAGS uage in some Makefile templates (by passing them to the compiler not to the linker.)
- Wrong usage of volatile (a functions return value couldnt be declared to return volatile :D )
- Unsupported Inline ASM using lock xchg operation on llvm-gcc .. (apple loves this ..)
Fixed by:
- Removed unnecessary LDFLAGS
- Removed unnecessary volatile declarator
- Replaced InterlockedExchange and InterlockedExchange64 with gcc intrinsics.
git-svn-id: https://rathena.svn.sourceforge.net/svnroot/rathena/trunk@16270 54d463be-8e91-2dee-dedb-b68131a5f0ec
Diffstat (limited to 'src/common/atomic.h')
-rw-r--r-- | src/common/atomic.h | 44 |
1 files changed, 13 insertions, 31 deletions
diff --git a/src/common/atomic.h b/src/common/atomic.h index c09d8d386..b1a4bda92 100644 --- a/src/common/atomic.h +++ b/src/common/atomic.h @@ -86,71 +86,53 @@ forceinline volatile int64 InterlockedExchange64(volatile int64 *target, int64 v #error Your Target Platfrom is not supported #endif -static forceinline volatile int64 InterlockedExchangeAdd64(volatile int64 *addend, int64 increment){ +static forceinline int64 InterlockedExchangeAdd64(volatile int64 *addend, int64 increment){ return __sync_fetch_and_add(addend, increment); }//end: InterlockedExchangeAdd64() -static forceinline volatile int32 InterlockedExchangeAdd(volatile int32 *addend, int32 increment){ +static forceinline int32 InterlockedExchangeAdd(volatile int32 *addend, int32 increment){ return __sync_fetch_and_add(addend, increment); }//end: InterlockedExchangeAdd() -static forceinline volatile int64 InterlockedIncrement64(volatile int64 *addend){ +static forceinline int64 InterlockedIncrement64(volatile int64 *addend){ return __sync_add_and_fetch(addend, 1); }//end: InterlockedIncrement64() -static forceinline volatile int32 InterlockedIncrement(volatile int32 *addend){ +static forceinline int32 InterlockedIncrement(volatile int32 *addend){ return __sync_add_and_fetch(addend, 1); }//end: InterlockedIncrement() -static forceinline volatile int64 InterlockedDecrement64(volatile int64 *addend){ +static forceinline int64 InterlockedDecrement64(volatile int64 *addend){ return __sync_sub_and_fetch(addend, 1); }//end: InterlockedDecrement64() -static forceinline volatile int32 InterlockedDecrement(volatile int32 *addend){ +static forceinline int32 InterlockedDecrement(volatile int32 *addend){ return __sync_sub_and_fetch(addend, 1); }//end: InterlockedDecrement() -static forceinline volatile int64 InterlockedCompareExchange64(volatile int64 *dest, int64 exch, int64 cmp){ +static forceinline int64 InterlockedCompareExchange64(volatile int64 *dest, int64 exch, int64 cmp){ return __sync_val_compare_and_swap(dest, cmp, exch); }//end: InterlockedCompareExchange64() -static forceinline volatile int32 InterlockedCompareExchange(volatile int32 *dest, int32 exch, int32 cmp){ - return __sync_val_compare_and_swap(dest, cmp, exch); +static forceinline int32 InterlockedCompareExchange(volatile int32 *dest, int32 exch, int32 cmp){ + return __sync_val_compare_and_swap(dest, cmp, exch); }//end: InterlockedCompareExchnage() -static forceinline volatile int64 InterlockedExchange64(volatile int64 *target, int64 val){ - int ret; - - __asm__ __volatile__( - "lock xchg %2, (%1)" - :"=r" (ret) - :"r" (target), "0" (val) - :"memory" - ); - - return ret; +static forceinline int64 InterlockedExchange64(volatile int64 *target, int64 val){ + return __sync_lock_test_and_set(target, val); }//end: InterlockedExchange64() -static forceinline volatile int32 InterlockedExchange(volatile int32 *target, int32 val){ - int ret; - - __asm__ __volatile__( - "lock xchgl %2, (%1)" - :"=r" (ret) - :"r" (target), "0" (val) - :"memory" - ); - - return ret; +static forceinline int32 InterlockedExchange(volatile int32 *target, int32 val){ + return __sync_lock_test_and_set(target, val); }//end: InterlockedExchange() |