1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
|
/**
* This file is part of Hercules.
* http://herc.ws - http://github.com/HerculesWS/Hercules
*
* Copyright (C) 2012-2018 Hercules Dev Team
* Copyright (C) rAthena Project (www.rathena.org)
*
* Hercules is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef COMMON_SPINLOCK_H
#define COMMON_SPINLOCK_H
// CAS based Spinlock Implementation
//
// CamelCase names are chosen to be consistent with Microsoft's WinAPI
// which implements CriticalSection by this naming...
//
// Author: Florian Wilkemeyer <fw@f-ws.de>
#include "common/atomic.h"
#include "common/cbasetypes.h"
#include "common/thread.h"
#ifdef WIN32
#include "common/winapi.h"
#endif
#ifdef WIN32
struct __declspec(align(64)) spin_lock {
volatile LONG lock;
volatile LONG nest;
volatile LONG sync_lock;
};
#else
struct spin_lock {
volatile int32 lock;
volatile int32 nest; // nesting level.
volatile int32 sync_lock;
} __attribute__((aligned(64)));
#endif
#ifdef HERCULES_CORE
static forceinline void InitializeSpinLock(struct spin_lock *lck)
{
lck->lock = 0;
lck->nest = 0;
lck->sync_lock = 0;
}
static forceinline void FinalizeSpinLock(struct spin_lock *lck)
{
return;
}
#define getsynclock(l) do { if(InterlockedCompareExchange((l), 1, 0) == 0) break; thread->yield(); } while(/*always*/1)
#define dropsynclock(l) do { InterlockedExchange((l), 0); } while(0)
static forceinline void EnterSpinLock(struct spin_lock *lck)
{
int tid = thread->get_tid();
// Get Sync Lock && Check if the requester thread already owns the lock.
// if it owns, increase nesting level
getsynclock(&lck->sync_lock);
if (InterlockedCompareExchange(&lck->lock, tid, tid) == tid) {
InterlockedIncrement(&lck->nest);
dropsynclock(&lck->sync_lock);
return; // Got Lock
}
// drop sync lock
dropsynclock(&lck->sync_lock);
// Spin until we've got it !
while (true) {
if (InterlockedCompareExchange(&lck->lock, tid, 0) == 0) {
InterlockedIncrement(&lck->nest);
return; // Got Lock
}
thread->yield(); // Force ctxswitch to another thread.
}
}
static forceinline void LeaveSpinLock(struct spin_lock *lck)
{
int tid = thread->get_tid();
getsynclock(&lck->sync_lock);
if (InterlockedCompareExchange(&lck->lock, tid, tid) == tid) { // this thread owns the lock.
if (InterlockedDecrement(&lck->nest) == 0)
InterlockedExchange(&lck->lock, 0); // Unlock!
}
dropsynclock(&lck->sync_lock);
}
#endif // HERCULES_CORE
#endif /* COMMON_SPINLOCK_H */
|