diff options
author | blacksirius <blacksirius@54d463be-8e91-2dee-dedb-b68131a5f0ec> | 2012-06-03 18:53:02 +0000 |
---|---|---|
committer | blacksirius <blacksirius@54d463be-8e91-2dee-dedb-b68131a5f0ec> | 2012-06-03 18:53:02 +0000 |
commit | 4feeab8c61334ec73172fa01cda951dafde2505f (patch) | |
tree | 078521eec3b26bd7fcfd42c9d1615d35be6b3ec9 /src/common/atomic.h | |
parent | 40ede8fd21bdb39c01665c90aa420a03a712c96c (diff) | |
download | hercules-4feeab8c61334ec73172fa01cda951dafde2505f.tar.gz hercules-4feeab8c61334ec73172fa01cda951dafde2505f.tar.bz2 hercules-4feeab8c61334ec73172fa01cda951dafde2505f.tar.xz hercules-4feeab8c61334ec73172fa01cda951dafde2505f.zip |
feature merge bs-coreoptimize->trunk: Atomic Operations, Threading, Spinlock implemnetation. [commit 1/2, windows will followup]
- Added Abstractions for Atomic Operations (lock instructions.. windows guy's may now this as Interlocked* stuff ..)
- Added Threading api abstraction for Pthread based OS's and Windows
- Added Spinlock Implementation (uses CAS / if you need more informations - just read the source - its simple.)
- Due to Interlocked(Compare)Exchange64 .. we now require at least i686 (Pentium Pro) for 32Bit Builds :)
youll also may feel some performance improvements when using 32bit builsd due to "newer" minimal arch
the compiler is now able to use CMOV's ....
================================================================
= Important Warning:
================================================================
Dont use threading at the moment athena is not threadsafe!
you'll mess up everthing when accessing data from other threads .., no synchronization is provided.
A way to process tasks asynchronously will come up after / with the new socket system.
git-svn-id: https://rathena.svn.sourceforge.net/svnroot/rathena/trunk@16221 54d463be-8e91-2dee-dedb-b68131a5f0ec
Diffstat (limited to 'src/common/atomic.h')
-rw-r--r-- | src/common/atomic.h | 157 |
1 files changed, 157 insertions, 0 deletions
diff --git a/src/common/atomic.h b/src/common/atomic.h new file mode 100644 index 000000000..7a9e8c4cc --- /dev/null +++ b/src/common/atomic.h @@ -0,0 +1,157 @@ +#ifndef _rA_ATOMIC_H_ +#define _rA_ATOMIC_H_ + +// Atomic Operations +// (Interlocked CompareExchange, Add .. and so on ..) +// +// Implementation varies / depends on: +// - Architecture +// - Compiler +// - Operating System +// +// our Abstraction is fully API-Compatible to Microsofts implementation @ NT5.0+ +// +#include "../common/cbasetypes.h" + +#if defined(_MSC_VER) +#include "../common/winapi.h" + +#if !defined(_M_X64) +// When compiling for windows 32bit, the 8byte interlocked operations are not provided by microsoft +// (because they need at least i586 so its not generic enough.. ... ) +forceinline int64 InterlockedCompareExchange64(volatile int64 *dest, int64 exch, int64 _cmp){ + _asm{ + lea esi,_cmp; + lea edi,exch; + + mov eax,[esi]; + mov edx,4[esi]; + mov ebx,[edi]; + mov ecx,4[edi]; + mov esi,dest; + + lock CMPXCHG8B [esi]; + } +} + + +forceinline volatile int64 InterlockedIncrement64(volatile int64 *addend){ + __int64 old; + do{ + old = *addend; + }while(InterlockedCompareExchange64(addend, (old+1), old) != old); + + return (old + 1); +} + + + +forceinline volatile int64 InterlockedDecrement64(volatile int64 *addend){ + __int64 old; + + do{ + old = *addend; + }while(InterlockedCompareExchange64(addend, (old-1), old) != old); + + return (old - 1); +} + +forceinline volatile int64 InterlockedExchangeAdd64(volatile int64 *addend, int64 increment){ + __int64 old; + + do{ + old = *addend; + }while(InterlockedCompareExchange64(addend, (old + increment), old) != old); + + return old; +} + +forceinline volatile int64 InterlockedExchange64(volatile int64 *target, int64 val){ + __int64 old; + do{ + old = *target; + }while(InterlockedCompareExchange64(target, val, old) != old); + + return old; +} + +#endif //endif 32bit windows + +#elif defined(__GNUC__) + +#if !defined(__x86_64__) && !defined(__i386__) +#error Your Target Platfrom is not supported +#endif + +static forceinline volatile int64 InterlockedExchangeAdd64(volatile int64 *addend, int64 increment){ + return __sync_fetch_and_add(addend, increment); +}//end: InterlockedExchangeAdd64() + + +static forceinline volatile int32 InterlockedExchangeAdd(volatile int32 *addend, int32 increment){ + return __sync_fetch_and_add(addend, increment); +}//end: InterlockedExchangeAdd() + + +static forceinline volatile int64 InterlockedIncrement64(volatile int64 *addend){ + return __sync_add_and_fetch(addend, 1); +}//end: InterlockedIncrement64() + + +static forceinline volatile int32 InterlockedIncrement(volatile int32 *addend){ + return __sync_add_and_fetch(addend, 1); +}//end: InterlockedIncrement() + + +static forceinline volatile int64 InterlockedDecrement64(volatile int64 *addend){ + return __sync_sub_and_fetch(addend, 1); +}//end: InterlockedDecrement64() + + +static forceinline volatile int32 InterlockedDecrement(volatile int32 *addend){ + return __sync_sub_and_fetch(addend, 1); +}//end: InterlockedDecrement() + + +static forceinline volatile int64 InterlockedCompareExchange64(volatile int64 *dest, int64 exch, int64 cmp){ + return __sync_val_compare_and_swap(dest, cmp, exch); +}//end: InterlockedCompareExchange64() + + +static forceinline volatile int32 InterlockedCompareExchange(volatile int32 *dest, int32 exch, int32 cmp){ + return __sync_val_compare_and_swap(dest, cmp, exch); +}//end: InterlockedCompareExchnage() + + +static forceinline volatile int64 InterlockedExchange64(volatile int64 *target, int64 val){ + int ret; + + __asm__ __volatile__( + "lock xchg %2, (%1)" + :"=r" (ret) + :"r" (target), "0" (val) + :"memory" + ); + + return ret; +}//end: InterlockedExchange64() + + +static forceinline volatile int32 InterlockedExchange(volatile int32 *target, int32 val){ + int ret; + + __asm__ __volatile__( + "lock xchgl %2, (%1)" + :"=r" (ret) + :"r" (target), "0" (val) + :"memory" + ); + + return ret; +}//end: InterlockedExchange() + + +#endif //endif compiler decission + + +#endif |