2 * Copyright 2011-2015 Samy Al Bahra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * The implementation here is inspired from the work described in:
29 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
30 * of Cambridge Computing Laboratory.
33 #include <ck_backoff.h>
38 #include <ck_stdbool.h>
39 #include <ck_string.h>
42 * Only three distinct values are used for reclamation, but reclamation occurs
43 * at e+2 rather than e+1. Any thread in a "critical section" would have
44 * acquired some snapshot (e) of the global epoch value (e_g) and set an active
45 * flag. Any hazardous references will only occur after a full memory barrier.
46 * For example, assume an initial e_g value of 1, e value of 0 and active value
54 * Any serialized reads may observe e = 0 or e = 1 with active = 0, or e = 0 or
55 * e = 1 with active = 1. The e_g value can only go from 1 to 2 if every thread
56 * has already observed the value of "1" (or the value we are incrementing
57 * from). This guarantees us that for any given value e_g, any threads with-in
58 * critical sections (referred to as "active" threads from here on) would have
59 * an e value of e_g-1 or e_g. This also means that hazardous references may be
60 * shared in both e_g-1 and e_g even if they are logically deleted in e_g.
62 * For example, assume all threads have an e value of e_g. Another thread may
63 * increment to e_g to e_g+1. Older threads may have a reference to an object
64 * which is only deleted in e_g+1. It could be that reader threads are
65 * executing some hash table look-ups, while some other writer thread (which
66 * causes epoch counter tick) actually deletes the same items that reader
67 * threads are looking up (this writer thread having an e value of e_g+1).
68 * This is possible if the writer thread re-observes the epoch after the
71 * Psuedo-code for writer:
79 * Psuedo-code for reader:
82 * ck_pr_inc(&x->value);
85 * Of course, it is also possible for references logically deleted at e_g-1 to
86 * still be accessed at e_g as threads are "active" at the same time
87 * (real-world time) mutating shared objects.
89 * Now, if the epoch counter is ticked to e_g+1, then no new hazardous
90 * references could exist to objects logically deleted at e_g-1. The reason for
91 * this is that at e_g+1, all epoch read-side critical sections started at
92 * e_g-1 must have been completed. If any epoch read-side critical sections at
93 * e_g-1 were still active, then we would never increment to e_g+1 (active != 0
94 * ^ e != e_g). Additionally, e_g may still have hazardous references to
95 * objects logically deleted at e_g-1 which means objects logically deleted at
96 * e_g-1 cannot be deleted at e_g+1 unless all threads have observed e_g+1
97 * (since it is valid for active threads to be at e_g and threads at e_g still
98 * require safe memory accesses).
100 * However, at e_g+2, all active threads must be either at e_g+1 or e_g+2.
101 * Though e_g+2 may share hazardous references with e_g+1, and e_g+1 shares
102 * hazardous references to e_g, no active threads are at e_g or e_g-1. This
103 * means no hazardous references could exist to objects deleted at e_g-1 (at
106 * To summarize these important points,
107 * 1) Active threads will always have a value of e_g or e_g-1.
108 * 2) Items that are logically deleted e_g or e_g-1 cannot be physically
110 * 3) Objects logically deleted at e_g-1 can be physically destroyed at e_g+2
111 * or at e_g+1 if no threads are at e_g.
113 * Last but not least, if we are at e_g+2, then no active thread is at e_g
114 * which means it is safe to apply modulo-3 arithmetic to e_g value in order to
115 * re-use e_g to represent the e_g+3 state. This means it is sufficient to
116 * represent e_g using only the values 0, 1 or 2. Every time a thread re-visits
117 * a e_g (which can be determined with a non-empty deferral list) it can assume
118 * objects in the e_g deferral list involved at least three e_g transitions and
119 * are thus, safe, for physical deletion.
121 * Blocking semantics for epoch reclamation have additional restrictions.
122 * Though we only require three deferral lists, reasonable blocking semantics
123 * must be able to more gracefully handle bursty write work-loads which could
124 * easily cause e_g wrap-around if modulo-3 arithmetic is used. This allows for
125 * easy-to-trigger live-lock situations. The work-around to this is to not
126 * apply modulo arithmetic to e_g but only to deferral list indexing.
128 #define CK_EPOCH_GRACE 3U
131 CK_EPOCH_STATE_USED = 0,
132 CK_EPOCH_STATE_FREE = 1
135 CK_STACK_CONTAINER(struct ck_epoch_record, record_next,
136 ck_epoch_record_container)
137 CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry,
138 ck_epoch_entry_container)
140 #define CK_EPOCH_SENSE_MASK (CK_EPOCH_SENSE - 1)
143 _ck_epoch_delref(struct ck_epoch_record *record,
144 struct ck_epoch_section *section)
146 struct ck_epoch_ref *current, *other;
147 unsigned int i = section->bucket;
149 current = &record->local.bucket[i];
152 if (current->count > 0)
156 * If the current bucket no longer has any references, then
157 * determine whether we have already transitioned into a newer
158 * epoch. If so, then make sure to update our shared snapshot
159 * to allow for forward progress.
161 * If no other active bucket exists, then the record will go
162 * inactive in order to allow for forward progress.
164 other = &record->local.bucket[(i + 1) &
165 CK_EPOCH_SENSE_MASK];
166 if (other->count > 0 &&
167 ((int)(current->epoch - other->epoch) < 0)) {
169 * The other epoch value is actually the newest,
172 ck_pr_store_uint(&record->epoch, other->epoch);
179 _ck_epoch_addref(struct ck_epoch_record *record,
180 struct ck_epoch_section *section)
182 struct ck_epoch *global = record->global;
183 struct ck_epoch_ref *ref;
184 unsigned int epoch, i;
186 epoch = ck_pr_load_uint(&global->epoch);
187 i = epoch & CK_EPOCH_SENSE_MASK;
188 ref = &record->local.bucket[i];
190 if (ref->count++ == 0) {
192 struct ck_epoch_ref *previous;
195 * The system has already ticked. If another non-zero bucket
196 * exists, make sure to order our observations with respect
197 * to it. Otherwise, it is possible to acquire a reference
198 * from the previous epoch generation.
200 * On TSO architectures, the monoticity of the global counter
201 * and load-{store, load} ordering are sufficient to guarantee
204 previous = &record->local.bucket[(i + 1) &
205 CK_EPOCH_SENSE_MASK];
206 if (previous->count > 0)
207 ck_pr_fence_acqrel();
208 #endif /* !CK_MD_TSO */
211 * If this is this is a new reference into the current
212 * bucket then cache the associated epoch value.
222 ck_epoch_init(struct ck_epoch *global)
225 ck_stack_init(&global->records);
232 struct ck_epoch_record *
233 ck_epoch_recycle(struct ck_epoch *global)
235 struct ck_epoch_record *record;
236 ck_stack_entry_t *cursor;
239 if (ck_pr_load_uint(&global->n_free) == 0)
242 CK_STACK_FOREACH(&global->records, cursor) {
243 record = ck_epoch_record_container(cursor);
245 if (ck_pr_load_uint(&record->state) == CK_EPOCH_STATE_FREE) {
246 /* Serialize with respect to deferral list clean-up. */
248 state = ck_pr_fas_uint(&record->state,
249 CK_EPOCH_STATE_USED);
250 if (state == CK_EPOCH_STATE_FREE) {
251 ck_pr_dec_uint(&global->n_free);
261 ck_epoch_register(struct ck_epoch *global, struct ck_epoch_record *record)
265 record->global = global;
266 record->state = CK_EPOCH_STATE_USED;
269 record->n_dispatch = 0;
271 record->n_pending = 0;
272 memset(&record->local, 0, sizeof record->local);
274 for (i = 0; i < CK_EPOCH_LENGTH; i++)
275 ck_stack_init(&record->pending[i]);
278 ck_stack_push_upmc(&global->records, &record->record_next);
283 ck_epoch_unregister(struct ck_epoch_record *record)
285 struct ck_epoch *global = record->global;
290 record->n_dispatch = 0;
292 record->n_pending = 0;
293 memset(&record->local, 0, sizeof record->local);
295 for (i = 0; i < CK_EPOCH_LENGTH; i++)
296 ck_stack_init(&record->pending[i]);
299 ck_pr_store_uint(&record->state, CK_EPOCH_STATE_FREE);
300 ck_pr_inc_uint(&global->n_free);
304 static struct ck_epoch_record *
305 ck_epoch_scan(struct ck_epoch *global,
306 struct ck_epoch_record *cr,
310 ck_stack_entry_t *cursor;
313 cursor = CK_STACK_FIRST(&global->records);
316 cursor = &cr->record_next;
320 while (cursor != NULL) {
321 unsigned int state, active;
323 cr = ck_epoch_record_container(cursor);
325 state = ck_pr_load_uint(&cr->state);
326 if (state & CK_EPOCH_STATE_FREE) {
327 cursor = CK_STACK_NEXT(cursor);
331 active = ck_pr_load_uint(&cr->active);
334 if (active != 0 && ck_pr_load_uint(&cr->epoch) != epoch)
337 cursor = CK_STACK_NEXT(cursor);
344 ck_epoch_dispatch(struct ck_epoch_record *record, unsigned int e)
346 unsigned int epoch = e & (CK_EPOCH_LENGTH - 1);
347 ck_stack_entry_t *head, *next, *cursor;
350 head = CK_STACK_FIRST(&record->pending[epoch]);
351 ck_stack_init(&record->pending[epoch]);
353 for (cursor = head; cursor != NULL; cursor = next) {
354 struct ck_epoch_entry *entry =
355 ck_epoch_entry_container(cursor);
357 next = CK_STACK_NEXT(cursor);
358 entry->function(entry);
362 if (record->n_pending > record->n_peak)
363 record->n_peak = record->n_pending;
365 record->n_dispatch += i;
366 record->n_pending -= i;
371 * Reclaim all objects associated with a record.
374 ck_epoch_reclaim(struct ck_epoch_record *record)
378 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
379 ck_epoch_dispatch(record, epoch);
385 * This function must not be called with-in read section.
388 ck_epoch_synchronize(struct ck_epoch_record *record)
390 struct ck_epoch *global = record->global;
391 struct ck_epoch_record *cr;
392 unsigned int delta, epoch, goal, i;
395 ck_pr_fence_memory();
398 * The observation of the global epoch must be ordered with respect to
399 * all prior operations. The re-ordering of loads is permitted given
400 * monoticity of global epoch counter.
402 * If UINT_MAX concurrent mutations were to occur then it is possible
403 * to encounter an ABA-issue. If this is a concern, consider tuning
404 * write-side concurrency.
406 delta = epoch = ck_pr_load_uint(&global->epoch);
407 goal = epoch + CK_EPOCH_GRACE;
409 for (i = 0, cr = NULL; i < CK_EPOCH_GRACE - 1; cr = NULL, i++) {
413 * Determine whether all threads have observed the current
414 * epoch with respect to the updates on invocation.
416 while (cr = ck_epoch_scan(global, cr, delta, &active),
423 * Another writer may have already observed a grace
426 e_d = ck_pr_load_uint(&global->epoch);
434 * If we have observed all threads as inactive, then we assume
435 * we are at a grace period.
441 * Increment current epoch. CAS semantics are used to eliminate
442 * increment operations for synchronization that occurs for the
443 * same global epoch value snapshot.
445 * If we can guarantee there will only be one active barrier or
446 * epoch tick at a given time, then it is sufficient to use an
447 * increment operation. In a multi-barrier workload, however,
448 * it is possible to overflow the epoch value if we apply
449 * modulo-3 arithmetic.
451 r = ck_pr_cas_uint_value(&global->epoch, delta, delta + 1,
454 /* Order subsequent thread active checks. */
455 ck_pr_fence_atomic_load();
458 * If CAS has succeeded, then set delta to latest snapshot.
459 * Otherwise, we have just acquired latest snapshot.
465 if ((goal > epoch) & (delta >= goal)) {
467 * Right now, epoch overflow is handled as an edge
468 * case. If we have already observed an epoch
469 * generation, then we can be sure no hazardous
470 * references exist to objects from this generation. We
471 * can actually avoid an addtional scan step at this
479 * A majority of use-cases will not require full barrier semantics.
480 * However, if non-temporal instructions are used, full barrier
481 * semantics are necessary.
483 ck_pr_fence_memory();
484 record->epoch = delta;
489 ck_epoch_barrier(struct ck_epoch_record *record)
492 ck_epoch_synchronize(record);
493 ck_epoch_reclaim(record);
498 * It may be worth it to actually apply these deferral semantics to an epoch
499 * that was observed at ck_epoch_call time. The problem is that the latter
500 * would require a full fence.
502 * ck_epoch_call will dispatch to the latest epoch snapshot that was observed.
503 * There are cases where it will fail to reclaim as early as it could. If this
504 * becomes a problem, we could actually use a heap for epoch buckets but that
505 * is far from ideal too.
508 ck_epoch_poll(struct ck_epoch_record *record)
512 unsigned int snapshot;
513 struct ck_epoch_record *cr = NULL;
514 struct ck_epoch *global = record->global;
516 epoch = ck_pr_load_uint(&global->epoch);
518 /* Serialize epoch snapshots with respect to global epoch. */
519 ck_pr_fence_memory();
520 cr = ck_epoch_scan(global, cr, epoch, &active);
522 record->epoch = epoch;
526 /* We are at a grace period if all threads are inactive. */
527 if (active == false) {
528 record->epoch = epoch;
529 for (epoch = 0; epoch < CK_EPOCH_LENGTH; epoch++)
530 ck_epoch_dispatch(record, epoch);
535 /* If an active thread exists, rely on epoch observation. */
536 if (ck_pr_cas_uint_value(&global->epoch, epoch, epoch + 1,
537 &snapshot) == false) {
538 record->epoch = snapshot;
540 record->epoch = epoch + 1;
543 ck_epoch_dispatch(record, epoch + 1);