2 * Copyright 2010-2015 Samy Al Bahra.
3 * Copyright 2011 David Joseph.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #include <ck_spinlock.h>
35 #include <ck_stddef.h>
37 #ifndef CK_F_FIFO_SPSC
38 #define CK_F_FIFO_SPSC
39 struct ck_fifo_spsc_entry {
41 struct ck_fifo_spsc_entry *next;
43 typedef struct ck_fifo_spsc_entry ck_fifo_spsc_entry_t;
47 struct ck_fifo_spsc_entry *head;
48 char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_spsc_entry *) - sizeof(ck_spinlock_t)];
50 struct ck_fifo_spsc_entry *tail;
51 struct ck_fifo_spsc_entry *head_snapshot;
52 struct ck_fifo_spsc_entry *garbage;
54 typedef struct ck_fifo_spsc ck_fifo_spsc_t;
56 CK_CC_INLINE static bool
57 ck_fifo_spsc_enqueue_trylock(struct ck_fifo_spsc *fifo)
60 return ck_spinlock_trylock(&fifo->m_tail);
63 CK_CC_INLINE static void
64 ck_fifo_spsc_enqueue_lock(struct ck_fifo_spsc *fifo)
67 ck_spinlock_lock(&fifo->m_tail);
71 CK_CC_INLINE static void
72 ck_fifo_spsc_enqueue_unlock(struct ck_fifo_spsc *fifo)
75 ck_spinlock_unlock(&fifo->m_tail);
79 CK_CC_INLINE static bool
80 ck_fifo_spsc_dequeue_trylock(struct ck_fifo_spsc *fifo)
83 return ck_spinlock_trylock(&fifo->m_head);
86 CK_CC_INLINE static void
87 ck_fifo_spsc_dequeue_lock(struct ck_fifo_spsc *fifo)
90 ck_spinlock_lock(&fifo->m_head);
94 CK_CC_INLINE static void
95 ck_fifo_spsc_dequeue_unlock(struct ck_fifo_spsc *fifo)
98 ck_spinlock_unlock(&fifo->m_head);
102 CK_CC_INLINE static void
103 ck_fifo_spsc_init(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry *stub)
106 ck_spinlock_init(&fifo->m_head);
107 ck_spinlock_init(&fifo->m_tail);
110 fifo->head = fifo->tail = fifo->head_snapshot = fifo->garbage = stub;
114 CK_CC_INLINE static void
115 ck_fifo_spsc_deinit(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry **garbage)
118 *garbage = fifo->head;
119 fifo->head = fifo->tail = NULL;
123 CK_CC_INLINE static void
124 ck_fifo_spsc_enqueue(struct ck_fifo_spsc *fifo,
125 struct ck_fifo_spsc_entry *entry,
129 entry->value = value;
132 /* If stub->next is visible, guarantee that entry is consistent. */
134 ck_pr_store_ptr(&fifo->tail->next, entry);
139 CK_CC_INLINE static bool
140 ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
142 struct ck_fifo_spsc_entry *entry;
145 * The head pointer is guaranteed to always point to a stub entry.
146 * If the stub entry does not point to an entry, then the queue is
149 entry = ck_pr_load_ptr(&fifo->head->next);
153 /* If entry is visible, guarantee store to value is visible. */
154 ck_pr_store_ptr_unsafe(value, entry->value);
156 ck_pr_store_ptr(&fifo->head, entry);
161 * Recycle a node. This technique for recycling nodes is based on
162 * Dmitriy Vyukov's work.
164 CK_CC_INLINE static struct ck_fifo_spsc_entry *
165 ck_fifo_spsc_recycle(struct ck_fifo_spsc *fifo)
167 struct ck_fifo_spsc_entry *garbage;
169 if (fifo->head_snapshot == fifo->garbage) {
170 fifo->head_snapshot = ck_pr_load_ptr(&fifo->head);
171 if (fifo->head_snapshot == fifo->garbage)
175 garbage = fifo->garbage;
176 fifo->garbage = garbage->next;
180 CK_CC_INLINE static bool
181 ck_fifo_spsc_isempty(struct ck_fifo_spsc *fifo)
183 struct ck_fifo_spsc_entry *head = ck_pr_load_ptr(&fifo->head);
184 return ck_pr_load_ptr(&head->next) == NULL;
187 #define CK_FIFO_SPSC_ISEMPTY(f) ((f)->head->next == NULL)
188 #define CK_FIFO_SPSC_FIRST(f) ((f)->head->next)
189 #define CK_FIFO_SPSC_NEXT(m) ((m)->next)
190 #define CK_FIFO_SPSC_SPARE(f) ((f)->head)
191 #define CK_FIFO_SPSC_FOREACH(fifo, entry) \
192 for ((entry) = CK_FIFO_SPSC_FIRST(fifo); \
194 (entry) = CK_FIFO_SPSC_NEXT(entry))
195 #define CK_FIFO_SPSC_FOREACH_SAFE(fifo, entry, T) \
196 for ((entry) = CK_FIFO_SPSC_FIRST(fifo); \
197 (entry) != NULL && ((T) = (entry)->next, 1); \
200 #endif /* CK_F_FIFO_SPSC */
202 #ifdef CK_F_PR_CAS_PTR_2
203 #ifndef CK_F_FIFO_MPMC
204 #define CK_F_FIFO_MPMC
205 struct ck_fifo_mpmc_entry;
206 struct ck_fifo_mpmc_pointer {
207 struct ck_fifo_mpmc_entry *pointer;
208 char *generation CK_CC_PACKED;
211 struct ck_fifo_mpmc_entry {
213 struct ck_fifo_mpmc_pointer next;
215 typedef struct ck_fifo_mpmc_entry ck_fifo_mpmc_entry_t;
217 struct ck_fifo_mpmc {
218 struct ck_fifo_mpmc_pointer head;
219 char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_mpmc_pointer)];
220 struct ck_fifo_mpmc_pointer tail;
222 typedef struct ck_fifo_mpmc ck_fifo_mpmc_t;
224 CK_CC_INLINE static void
225 ck_fifo_mpmc_init(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry *stub)
228 stub->next.pointer = NULL;
229 stub->next.generation = NULL;
230 fifo->head.pointer = fifo->tail.pointer = stub;
231 fifo->head.generation = fifo->tail.generation = NULL;
235 CK_CC_INLINE static void
236 ck_fifo_mpmc_deinit(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry **garbage)
239 *garbage = fifo->head.pointer;
240 fifo->head.pointer = fifo->tail.pointer = NULL;
244 CK_CC_INLINE static void
245 ck_fifo_mpmc_enqueue(struct ck_fifo_mpmc *fifo,
246 struct ck_fifo_mpmc_entry *entry,
249 struct ck_fifo_mpmc_pointer tail, next, update;
252 * Prepare the upcoming node and make sure to commit the updates
255 entry->value = value;
256 entry->next.pointer = NULL;
257 entry->next.generation = 0;
258 ck_pr_fence_store_atomic();
261 tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
263 tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
264 next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
266 next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
268 if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
271 if (next.pointer != NULL) {
273 * If the tail pointer has an entry following it then
274 * it needs to be forwarded to the next entry. This
275 * helps us guarantee we are always operating on the
278 update.pointer = next.pointer;
279 update.generation = tail.generation + 1;
280 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
283 * Attempt to commit new entry to the end of the
286 update.pointer = entry;
287 update.generation = next.generation + 1;
288 if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == true)
293 ck_pr_fence_atomic();
295 /* After a successful insert, forward the tail to the new entry. */
296 update.generation = tail.generation + 1;
297 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
301 CK_CC_INLINE static bool
302 ck_fifo_mpmc_tryenqueue(struct ck_fifo_mpmc *fifo,
303 struct ck_fifo_mpmc_entry *entry,
306 struct ck_fifo_mpmc_pointer tail, next, update;
308 entry->value = value;
309 entry->next.pointer = NULL;
310 entry->next.generation = 0;
312 ck_pr_fence_store_atomic();
314 tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
316 tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
317 next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
319 next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
321 if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
324 if (next.pointer != NULL) {
326 * If the tail pointer has an entry following it then
327 * it needs to be forwarded to the next entry. This
328 * helps us guarantee we are always operating on the
331 update.pointer = next.pointer;
332 update.generation = tail.generation + 1;
333 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
337 * Attempt to commit new entry to the end of the
340 update.pointer = entry;
341 update.generation = next.generation + 1;
342 if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == false)
346 ck_pr_fence_atomic();
348 /* After a successful insert, forward the tail to the new entry. */
349 update.generation = tail.generation + 1;
350 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
354 CK_CC_INLINE static bool
355 ck_fifo_mpmc_dequeue(struct ck_fifo_mpmc *fifo,
357 struct ck_fifo_mpmc_entry **garbage)
359 struct ck_fifo_mpmc_pointer head, tail, next, update;
362 head.generation = ck_pr_load_ptr(&fifo->head.generation);
364 head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
365 tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
367 tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
369 next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
371 next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
373 update.pointer = next.pointer;
374 if (head.pointer == tail.pointer) {
376 * The head is guaranteed to always point at a stub
377 * entry. If the stub entry has no references then the
380 if (next.pointer == NULL)
383 /* Forward the tail pointer if necessary. */
384 update.generation = tail.generation + 1;
385 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
388 * It is possible for head snapshot to have been
389 * re-used. Avoid deferencing during enqueue
392 if (next.pointer == NULL)
395 /* Save value before commit. */
396 *(void **)value = ck_pr_load_ptr(&next.pointer->value);
398 /* Forward the head pointer to the next entry. */
399 update.generation = head.generation + 1;
400 if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == true)
405 *garbage = head.pointer;
409 CK_CC_INLINE static bool
410 ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
412 struct ck_fifo_mpmc_entry **garbage)
414 struct ck_fifo_mpmc_pointer head, tail, next, update;
416 head.generation = ck_pr_load_ptr(&fifo->head.generation);
418 head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
420 tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
422 tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
424 next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
426 next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
428 update.pointer = next.pointer;
429 if (head.pointer == tail.pointer) {
431 * The head is guaranteed to always point at a stub
432 * entry. If the stub entry has no references then the
435 if (next.pointer == NULL)
438 /* Forward the tail pointer if necessary. */
439 update.generation = tail.generation + 1;
440 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
444 * It is possible for head snapshot to have been
445 * re-used. Avoid deferencing during enqueue.
447 if (next.pointer == NULL)
450 /* Save value before commit. */
451 *(void **)value = ck_pr_load_ptr(&next.pointer->value);
453 /* Forward the head pointer to the next entry. */
454 update.generation = head.generation + 1;
455 if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == false)
459 *garbage = head.pointer;
463 #define CK_FIFO_MPMC_ISEMPTY(f) ((f)->head.pointer->next.pointer == NULL)
464 #define CK_FIFO_MPMC_FIRST(f) ((f)->head.pointer->next.pointer)
465 #define CK_FIFO_MPMC_NEXT(m) ((m)->next.pointer)
466 #define CK_FIFO_MPMC_FOREACH(fifo, entry) \
467 for ((entry) = CK_FIFO_MPMC_FIRST(fifo); \
469 (entry) = CK_FIFO_MPMC_NEXT(entry))
470 #define CK_FIFO_MPMC_FOREACH_SAFE(fifo, entry, T) \
471 for ((entry) = CK_FIFO_MPMC_FIRST(fifo); \
472 (entry) != NULL && ((T) = (entry)->next.pointer, 1); \
475 #endif /* CK_F_FIFO_MPMC */
476 #endif /* CK_F_PR_CAS_PTR_2 */
478 #endif /* CK_FIFO_H */