1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
|
#ifndef _COMMON_SPINLOCK_H_
#define _COMMON_SPINLOCK_H_
//
// CAS based Spinlock Implementation
//
// CamelCase names are chosen to be consistent with Microsoft's WinAPI
// which implements CriticalSection by this naming...
//
// Author: Florian Wilkemeyer <fw@f-ws.de>
//
// Copyright (c) rAthena Project (www.rathena.org) - Licensed under GNU GPL
// For more information, see LICENCE in the main folder
//
//
#include "../common/atomic.h"
#include "../common/cbasetypes.h"
#include "../common/thread.h"
#ifdef WIN32
#include "../common/winapi.h"
#endif
#ifdef WIN32
typedef struct __declspec( align(64) ) SPIN_LOCK{
volatile LONG lock;
volatile LONG nest;
volatile LONG sync_lock;
} SPIN_LOCK;
#else
typedef struct SPIN_LOCK{
volatile int32 lock;
volatile int32 nest; // nesting level.
volatile int32 sync_lock;
} __attribute__((aligned(64))) SPIN_LOCK;
#endif
static forceinline void InitializeSpinLock(SPIN_LOCK *lck){
lck->lock = 0;
lck->nest = 0;
lck->sync_lock = 0;
}
static forceinline void FinalizeSpinLock(SPIN_LOCK *lck){
return;
}
#define getsynclock(l) do { if(InterlockedCompareExchange((l), 1, 0) == 0) break; rathread_yield(); } while(/*always*/1)
#define dropsynclock(l) do { InterlockedExchange((l), 0); } while(0)
static forceinline void EnterSpinLock(SPIN_LOCK *lck){
int tid = rathread_get_tid();
// Get Sync Lock && Check if the requester thread already owns the lock.
// if it owns, increase nesting level
getsynclock(&lck->sync_lock);
if(InterlockedCompareExchange(&lck->lock, tid, tid) == tid){
InterlockedIncrement(&lck->nest);
dropsynclock(&lck->sync_lock);
return; // Got Lock
}
// drop sync lock
dropsynclock(&lck->sync_lock);
// Spin until we've got it !
while(1){
if(InterlockedCompareExchange(&lck->lock, tid, 0) == 0){
InterlockedIncrement(&lck->nest);
return; // Got Lock
}
rathread_yield(); // Force ctxswitch to another thread.
}
}
static forceinline void LeaveSpinLock(SPIN_LOCK *lck){
int tid = rathread_get_tid();
getsynclock(&lck->sync_lock);
if(InterlockedCompareExchange(&lck->lock, tid, tid) == tid){ // this thread owns the lock.
if(InterlockedDecrement(&lck->nest) == 0)
InterlockedExchange(&lck->lock, 0); // Unlock!
}
dropsynclock(&lck->sync_lock);
}
#endif /* _COMMON_SPINLOCK_H_ */
|