2 * Copyright 2011-2015 Samy Al Bahra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The implementation here is inspired from the work described in:
32 * Fraser, K. 2004. Practical Lock-Freedom. PhD Thesis, University
33 * of Cambridge Computing Laboratory.
40 #include <ck_stdbool.h>
42 #ifndef CK_EPOCH_LENGTH
43 #define CK_EPOCH_LENGTH 4
47 * This is used for sense detection with-respect to concurrent
50 #define CK_EPOCH_SENSE (2)
52 struct ck_epoch_entry;
53 typedef struct ck_epoch_entry ck_epoch_entry_t;
54 typedef void ck_epoch_cb_t(ck_epoch_entry_t *);
57 * This should be embedded into objects you wish to be the target of
58 * ck_epoch_cb_t functions (with ck_epoch_call).
60 struct ck_epoch_entry {
61 ck_epoch_cb_t *function;
62 ck_stack_entry_t stack_entry;
66 * A section object may be passed to every begin-end pair to allow for
67 * forward progress guarantees with-in prolonged active sections.
69 struct ck_epoch_section {
72 typedef struct ck_epoch_section ck_epoch_section_t;
75 * Return pointer to ck_epoch_entry container object.
77 #define CK_EPOCH_CONTAINER(T, M, N) \
78 CK_CC_CONTAINER(struct ck_epoch_entry, T, M, N)
85 struct ck_epoch_record {
86 ck_stack_entry_t record_next;
87 struct ck_epoch *global;
92 struct ck_epoch_ref bucket[CK_EPOCH_SENSE];
93 } local CK_CC_CACHELINE;
94 unsigned int n_pending;
96 unsigned int n_dispatch;
98 ck_stack_t pending[CK_EPOCH_LENGTH];
100 typedef struct ck_epoch_record ck_epoch_record_t;
107 typedef struct ck_epoch ck_epoch_t;
110 * Internal functions.
112 void _ck_epoch_addref(ck_epoch_record_t *, ck_epoch_section_t *);
113 bool _ck_epoch_delref(ck_epoch_record_t *, ck_epoch_section_t *);
115 CK_CC_FORCE_INLINE static void *
116 ck_epoch_record_ct(const ck_epoch_record_t *record)
119 return ck_pr_load_ptr(&record->ct);
123 * Marks the beginning of an epoch-protected section.
125 CK_CC_FORCE_INLINE static void
126 ck_epoch_begin(ck_epoch_record_t *record, ck_epoch_section_t *section)
128 struct ck_epoch *epoch = record->global;
131 * Only observe new epoch if thread is not recursing into a read
134 if (record->active == 0) {
135 unsigned int g_epoch;
138 * It is possible for loads to be re-ordered before the store
139 * is committed into the caller's epoch and active fields.
140 * For this reason, store to load serialization is necessary.
142 #if defined(CK_MD_TSO)
143 ck_pr_fas_uint(&record->active, 1);
144 ck_pr_fence_atomic_load();
146 ck_pr_store_uint(&record->active, 1);
147 ck_pr_fence_memory();
151 * This load is allowed to be re-ordered prior to setting
152 * active flag due to monotonic nature of the global epoch.
153 * However, stale values lead to measurable performance
154 * degradation in some torture tests so we disallow early load
157 g_epoch = ck_pr_load_uint(&epoch->epoch);
158 ck_pr_store_uint(&record->epoch, g_epoch);
160 ck_pr_store_uint(&record->active, record->active + 1);
164 _ck_epoch_addref(record, section);
170 * Marks the end of an epoch-protected section. Returns true if no more
171 * sections exist for the caller.
173 CK_CC_FORCE_INLINE static bool
174 ck_epoch_end(ck_epoch_record_t *record, ck_epoch_section_t *section)
177 ck_pr_fence_release();
178 ck_pr_store_uint(&record->active, record->active - 1);
181 return _ck_epoch_delref(record, section);
183 return record->active == 0;
187 * Defers the execution of the function pointed to by the "cb"
188 * argument until an epoch counter loop. This allows for a
189 * non-blocking deferral.
191 * We can get away without a fence here due to the monotonic nature
192 * of the epoch counter. Worst case, this will result in some delays
193 * before object destruction.
195 CK_CC_FORCE_INLINE static void
196 ck_epoch_call(ck_epoch_record_t *record,
197 ck_epoch_entry_t *entry,
198 ck_epoch_cb_t *function)
200 struct ck_epoch *epoch = record->global;
201 unsigned int e = ck_pr_load_uint(&epoch->epoch);
202 unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
205 entry->function = function;
206 ck_stack_push_spnc(&record->pending[offset], &entry->stack_entry);
211 * Same as ck_epoch_call, but allows for records to be shared and is reentrant.
213 CK_CC_FORCE_INLINE static void
214 ck_epoch_call_strict(ck_epoch_record_t *record,
215 ck_epoch_entry_t *entry,
216 ck_epoch_cb_t *function)
218 struct ck_epoch *epoch = record->global;
219 unsigned int e = ck_pr_load_uint(&epoch->epoch);
220 unsigned int offset = e & (CK_EPOCH_LENGTH - 1);
222 ck_pr_inc_uint(&record->n_pending);
223 entry->function = function;
225 /* Store fence is implied by push operation. */
226 ck_stack_push_upmc(&record->pending[offset], &entry->stack_entry);
231 * This callback is used for synchronize_wait to allow for custom blocking
234 typedef void ck_epoch_wait_cb_t(ck_epoch_t *, ck_epoch_record_t *,
238 * Return latest epoch value. This operation provides load ordering.
240 CK_CC_FORCE_INLINE static unsigned int
241 ck_epoch_value(const ck_epoch_t *ep)
245 return ck_pr_load_uint(&ep->epoch);
248 void ck_epoch_init(ck_epoch_t *);
251 * Attempts to recycle an unused epoch record. If one is successfully
252 * allocated, the record context pointer is also updated.
254 ck_epoch_record_t *ck_epoch_recycle(ck_epoch_t *, void *);
257 * Registers an epoch record. An optional context pointer may be passed that
258 * is retrievable with ck_epoch_record_ct.
260 void ck_epoch_register(ck_epoch_t *, ck_epoch_record_t *, void *);
263 * Marks a record as available for re-use by a subsequent recycle operation.
264 * Note that the record cannot be physically destroyed.
266 void ck_epoch_unregister(ck_epoch_record_t *);
268 bool ck_epoch_poll(ck_epoch_record_t *);
269 bool ck_epoch_poll_deferred(struct ck_epoch_record *record, ck_stack_t *deferred);
270 void ck_epoch_synchronize(ck_epoch_record_t *);
271 void ck_epoch_synchronize_wait(ck_epoch_t *, ck_epoch_wait_cb_t *, void *);
272 void ck_epoch_barrier(ck_epoch_record_t *);
273 void ck_epoch_barrier_wait(ck_epoch_record_t *, ck_epoch_wait_cb_t *, void *);
276 * Reclaim entries associated with a record. This is safe to call only on
277 * the caller's record or records that are using call_strict.
279 void ck_epoch_reclaim(ck_epoch_record_t *);
281 #endif /* CK_EPOCH_H */