forked from zhangnaifan/BzTree
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathebr.cpp
200 lines (178 loc) · 4.61 KB
/
ebr.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
#include "ebr.h"
#include "PMwCAS.h"
#include <assert.h>
#include <atomic>
#include <stdlib.h>
#define ACTIVE_FLAG (0x80000000U)
typedef struct ebr_tls {
/*
* - A local epoch counter for each thread.
* - The epoch counter may have the "active" flag set.
* - Thread list entry (pointer).
*/
unsigned local_epoch;
struct ebr_tls * next;
} ebr_tls_t;
thread_local ebr_tls_t * local_ebr = nullptr;
struct ebr {
/*
* - There is a global epoch counter which can be 0, 1 or 2.
* - TLS with a list of the registered threads.
*/
unsigned global_epoch;
ebr_tls_t * list;
};
ebr_t *
ebr_create(void)
{
ebr_t *ebr;
if ((ebr = (ebr_t *)calloc(1, sizeof(ebr_t))) == NULL) {
return NULL;
}
return ebr;
}
void
ebr_destroy(ebr_t *ebr)
{
free(ebr);
}
/*
* ebr_register: register the current worker (thread/process) for EBR.
*
* => Returns 0 on success and errno on failure.
*/
int
ebr_register(ebr_t *ebr)
{
ebr_tls_t *head;
if (local_ebr)
return 0;
if ((local_ebr = (ebr_tls_t *)malloc(sizeof(ebr_tls_t))) == NULL) {
return -1;
}
//cout << "alloc addr " << hex << local_ebr << endl;
memset(local_ebr, 0, sizeof(ebr_tls_t));
uint64_t r;
do {
head = ebr->list;
//cout << "ini read " << hex << ebr->list << endl;
local_ebr->next = head;
r = __sync_val_compare_and_swap((uint64_t *)&ebr->list,
(uint64_t)head,
(uint64_t)local_ebr);
//cout << "r CAS " << hex << r << endl;
} while (r != (uint64_t)head);
// cout << "final " << hex << ebr->list << endl;
return 0;
}
/*
* ebr_enter: mark the entrance to the critical path.
*/
void
ebr_enter(ebr_t *ebr)
{
assert(local_ebr);
/*
* Set the "active" flag and set the local epoch to global
* epoch (i.e. observe the global epoch). Ensure that the
* epoch is observed before any loads in the critical path.
*/
local_ebr->local_epoch = ebr->global_epoch | ACTIVE_FLAG;
std::atomic_thread_fence(std::memory_order_seq_cst);
}
/*
* ebr_exit: mark the exit of the critical path.
*/
void
ebr_exit(ebr_t *ebr)
{
ebr_tls_t *t;
t = local_ebr;
assert(t != NULL);
/*
* Clear the "active" flag. Must ensure that any stores in
* the critical path reach global visibility before that.
*/
std::atomic_thread_fence(std::memory_order_seq_cst);
assert(t->local_epoch & ACTIVE_FLAG);
t->local_epoch = 0;
}
/*
* ebr_sync: attempt to synchronise and announce a new epoch.
*
* => Synchronisation points must be serialised.
* => Return true if a new epoch was announced.
* => Return the epoch ready for reclamation.
*/
bool
ebr_sync(ebr_t *ebr, unsigned *gc_epoch)
{
unsigned epoch;
ebr_tls_t *t;
/*
* Ensure that any loads or stores on the writer side reach
* the global visibility. We want to allow the callers to
* assume that the ebr_sync() call serves as a full barrier.
*/
epoch = ebr->global_epoch;
std::atomic_thread_fence(std::memory_order_seq_cst);
/*
* Check whether all active workers observed the global epoch.
*/
t = ebr->list;
while (t) {
const unsigned local_epoch = t->local_epoch; // atomic fetch
const bool active = (local_epoch & ACTIVE_FLAG) != 0;
if (active && (local_epoch != (epoch | ACTIVE_FLAG))) {
/* No, not ready. */
*gc_epoch = ebr_gc_epoch(ebr);
return false;
}
t = t->next;
}
/* Yes: increment and announce a new global epoch. */
ebr->global_epoch = (epoch + 1) % 3;
/*
* Let the new global epoch be 'e'. At this point:
*
* => Active workers: might still be running in the critical path
* in the e-1 epoch or might be already entering a new critical
* path and observing the new epoch e.
*
* => Inactive workers: might become active by entering a critical
* path before or after the global epoch counter was incremented,
* observing either e-1 or e.
*
* => Note that the active workers cannot have a stale observation
* of the e-2 epoch at this point (there is no ABA problem using
* the clock arithmetics).
*
* => Therefore, there can be no workers still running the critical
* path in the e-2 epoch. This is the epoch ready for G/C.
*/
*gc_epoch = ebr_gc_epoch(ebr);
return true;
}
/*
* ebr_staging_epoch: return the epoch where objects can be staged
* for reclamation.
*/
unsigned
ebr_staging_epoch(ebr_t *ebr)
{
/* The current epoch. */
return ebr->global_epoch;
}
/*
* ebr_gc_epoch: return the epoch where objects are ready to be
* reclaimed i.e. it is guaranteed to be safe to destroy them.
*/
unsigned
ebr_gc_epoch(ebr_t *ebr)
{
/*
* Since we use only 3 epochs, e-2 is just the next global
* epoch with clock arithmetics.
*/
return (ebr->global_epoch + 1) % 3;
}