]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/contrib/ck/include/ck_fifo.h
Merge bmake-20180512
[FreeBSD/FreeBSD.git] / sys / contrib / ck / include / ck_fifo.h
1 /*
2  * Copyright 2010-2015 Samy Al Bahra.
3  * Copyright 2011 David Joseph.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #ifndef CK_FIFO_H
29 #define CK_FIFO_H
30
31 #include <ck_cc.h>
32 #include <ck_md.h>
33 #include <ck_pr.h>
34 #include <ck_spinlock.h>
35 #include <ck_stddef.h>
36
37 #ifndef CK_F_FIFO_SPSC
38 #define CK_F_FIFO_SPSC
39 struct ck_fifo_spsc_entry {
40         void *value;
41         struct ck_fifo_spsc_entry *next;
42 };
43 typedef struct ck_fifo_spsc_entry ck_fifo_spsc_entry_t;
44
45 struct ck_fifo_spsc {
46         ck_spinlock_t m_head;
47         struct ck_fifo_spsc_entry *head;
48         char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_spsc_entry *) - sizeof(ck_spinlock_t)];
49         ck_spinlock_t m_tail;
50         struct ck_fifo_spsc_entry *tail;
51         struct ck_fifo_spsc_entry *head_snapshot;
52         struct ck_fifo_spsc_entry *garbage;
53 };
54 typedef struct ck_fifo_spsc ck_fifo_spsc_t;
55
56 CK_CC_INLINE static bool
57 ck_fifo_spsc_enqueue_trylock(struct ck_fifo_spsc *fifo)
58 {
59
60         return ck_spinlock_trylock(&fifo->m_tail);
61 }
62
63 CK_CC_INLINE static void
64 ck_fifo_spsc_enqueue_lock(struct ck_fifo_spsc *fifo)
65 {
66
67         ck_spinlock_lock(&fifo->m_tail);
68         return;
69 }
70
71 CK_CC_INLINE static void
72 ck_fifo_spsc_enqueue_unlock(struct ck_fifo_spsc *fifo)
73 {
74
75         ck_spinlock_unlock(&fifo->m_tail);
76         return;
77 }
78
79 CK_CC_INLINE static bool
80 ck_fifo_spsc_dequeue_trylock(struct ck_fifo_spsc *fifo)
81 {
82
83         return ck_spinlock_trylock(&fifo->m_head);
84 }
85
86 CK_CC_INLINE static void
87 ck_fifo_spsc_dequeue_lock(struct ck_fifo_spsc *fifo)
88 {
89
90         ck_spinlock_lock(&fifo->m_head);
91         return;
92 }
93
94 CK_CC_INLINE static void
95 ck_fifo_spsc_dequeue_unlock(struct ck_fifo_spsc *fifo)
96 {
97
98         ck_spinlock_unlock(&fifo->m_head);
99         return;
100 }
101
102 CK_CC_INLINE static void
103 ck_fifo_spsc_init(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry *stub)
104 {
105
106         ck_spinlock_init(&fifo->m_head);
107         ck_spinlock_init(&fifo->m_tail);
108
109         stub->next = NULL;
110         fifo->head = fifo->tail = fifo->head_snapshot = fifo->garbage = stub;
111         return;
112 }
113
114 CK_CC_INLINE static void
115 ck_fifo_spsc_deinit(struct ck_fifo_spsc *fifo, struct ck_fifo_spsc_entry **garbage)
116 {
117
118         *garbage = fifo->head;
119         fifo->head = fifo->tail = NULL;
120         return;
121 }
122
123 CK_CC_INLINE static void
124 ck_fifo_spsc_enqueue(struct ck_fifo_spsc *fifo,
125                      struct ck_fifo_spsc_entry *entry,
126                      void *value)
127 {
128
129         entry->value = value;
130         entry->next = NULL;
131
132         /* If stub->next is visible, guarantee that entry is consistent. */
133         ck_pr_fence_store();
134         ck_pr_store_ptr(&fifo->tail->next, entry);
135         fifo->tail = entry;
136         return;
137 }
138
139 CK_CC_INLINE static bool
140 ck_fifo_spsc_dequeue(struct ck_fifo_spsc *fifo, void *value)
141 {
142         struct ck_fifo_spsc_entry *entry;
143
144         /*
145          * The head pointer is guaranteed to always point to a stub entry.
146          * If the stub entry does not point to an entry, then the queue is
147          * empty.
148          */
149         entry = ck_pr_load_ptr(&fifo->head->next);
150         if (entry == NULL)
151                 return false;
152
153         /* If entry is visible, guarantee store to value is visible. */
154         ck_pr_store_ptr_unsafe(value, entry->value);
155         ck_pr_fence_store();
156         ck_pr_store_ptr(&fifo->head, entry);
157         return true;
158 }
159
160 /*
161  * Recycle a node. This technique for recycling nodes is based on
162  * Dmitriy Vyukov's work.
163  */
164 CK_CC_INLINE static struct ck_fifo_spsc_entry *
165 ck_fifo_spsc_recycle(struct ck_fifo_spsc *fifo)
166 {
167         struct ck_fifo_spsc_entry *garbage;
168
169         if (fifo->head_snapshot == fifo->garbage) {
170                 fifo->head_snapshot = ck_pr_load_ptr(&fifo->head);
171                 if (fifo->head_snapshot == fifo->garbage)
172                         return NULL;
173         }
174
175         garbage = fifo->garbage;
176         fifo->garbage = garbage->next;
177         return garbage;
178 }
179
180 CK_CC_INLINE static bool
181 ck_fifo_spsc_isempty(struct ck_fifo_spsc *fifo)
182 {
183         struct ck_fifo_spsc_entry *head = ck_pr_load_ptr(&fifo->head);
184         return ck_pr_load_ptr(&head->next) == NULL;
185 }
186
187 #define CK_FIFO_SPSC_ISEMPTY(f) ((f)->head->next == NULL)
188 #define CK_FIFO_SPSC_FIRST(f)   ((f)->head->next)
189 #define CK_FIFO_SPSC_NEXT(m)    ((m)->next)
190 #define CK_FIFO_SPSC_SPARE(f)   ((f)->head)
191 #define CK_FIFO_SPSC_FOREACH(fifo, entry)                       \
192         for ((entry) = CK_FIFO_SPSC_FIRST(fifo);                \
193              (entry) != NULL;                                   \
194              (entry) = CK_FIFO_SPSC_NEXT(entry))
195 #define CK_FIFO_SPSC_FOREACH_SAFE(fifo, entry, T)               \
196         for ((entry) = CK_FIFO_SPSC_FIRST(fifo);                \
197              (entry) != NULL && ((T) = (entry)->next, 1);       \
198              (entry) = (T))
199
200 #endif /* CK_F_FIFO_SPSC */
201
202 #ifdef CK_F_PR_CAS_PTR_2
203 #ifndef CK_F_FIFO_MPMC
204 #define CK_F_FIFO_MPMC
205 struct ck_fifo_mpmc_entry;
206 struct ck_fifo_mpmc_pointer {
207         struct ck_fifo_mpmc_entry *pointer;
208         char *generation CK_CC_PACKED;
209 } CK_CC_ALIGN(16);
210
211 struct ck_fifo_mpmc_entry {
212         void *value;
213         struct ck_fifo_mpmc_pointer next;
214 };
215 typedef struct ck_fifo_mpmc_entry ck_fifo_mpmc_entry_t;
216
217 struct ck_fifo_mpmc {
218         struct ck_fifo_mpmc_pointer head;
219         char pad[CK_MD_CACHELINE - sizeof(struct ck_fifo_mpmc_pointer)];
220         struct ck_fifo_mpmc_pointer tail;
221 };
222 typedef struct ck_fifo_mpmc ck_fifo_mpmc_t;
223
224 CK_CC_INLINE static void
225 ck_fifo_mpmc_init(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry *stub)
226 {
227
228         stub->next.pointer = NULL;
229         stub->next.generation = NULL;
230         fifo->head.pointer = fifo->tail.pointer = stub;
231         fifo->head.generation = fifo->tail.generation = NULL;
232         return;
233 }
234
235 CK_CC_INLINE static void
236 ck_fifo_mpmc_deinit(struct ck_fifo_mpmc *fifo, struct ck_fifo_mpmc_entry **garbage)
237 {
238
239         *garbage = fifo->head.pointer;
240         fifo->head.pointer = fifo->tail.pointer = NULL;
241         return;
242 }
243
244 CK_CC_INLINE static void
245 ck_fifo_mpmc_enqueue(struct ck_fifo_mpmc *fifo,
246                      struct ck_fifo_mpmc_entry *entry,
247                      void *value)
248 {
249         struct ck_fifo_mpmc_pointer tail, next, update;
250
251         /*
252          * Prepare the upcoming node and make sure to commit the updates
253          * before publishing.
254          */
255         entry->value = value;
256         entry->next.pointer = NULL;
257         entry->next.generation = 0;
258         ck_pr_fence_store_atomic();
259
260         for (;;) {
261                 tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
262                 ck_pr_fence_load();
263                 tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
264                 next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
265                 ck_pr_fence_load();
266                 next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
267
268                 if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
269                         continue;
270
271                 if (next.pointer != NULL) {
272                         /*
273                          * If the tail pointer has an entry following it then
274                          * it needs to be forwarded to the next entry. This
275                          * helps us guarantee we are always operating on the
276                          * last entry.
277                          */
278                         update.pointer = next.pointer;
279                         update.generation = tail.generation + 1;
280                         ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
281                 } else {
282                         /*
283                          * Attempt to commit new entry to the end of the
284                          * current tail.
285                          */
286                         update.pointer = entry;
287                         update.generation = next.generation + 1;
288                         if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == true)
289                                 break;
290                 }
291         }
292
293         ck_pr_fence_atomic();
294
295         /* After a successful insert, forward the tail to the new entry. */
296         update.generation = tail.generation + 1;
297         ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
298         return;
299 }
300
301 CK_CC_INLINE static bool
302 ck_fifo_mpmc_tryenqueue(struct ck_fifo_mpmc *fifo,
303                         struct ck_fifo_mpmc_entry *entry,
304                         void *value)
305 {
306         struct ck_fifo_mpmc_pointer tail, next, update;
307
308         entry->value = value;
309         entry->next.pointer = NULL;
310         entry->next.generation = 0;
311
312         ck_pr_fence_store_atomic();
313
314         tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
315         ck_pr_fence_load();
316         tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
317         next.generation = ck_pr_load_ptr(&tail.pointer->next.generation);
318         ck_pr_fence_load();
319         next.pointer = ck_pr_load_ptr(&tail.pointer->next.pointer);
320
321         if (ck_pr_load_ptr(&fifo->tail.generation) != tail.generation)
322                 return false;
323
324         if (next.pointer != NULL) {
325                 /*
326                  * If the tail pointer has an entry following it then
327                  * it needs to be forwarded to the next entry. This
328                  * helps us guarantee we are always operating on the
329                  * last entry.
330                  */
331                 update.pointer = next.pointer;
332                 update.generation = tail.generation + 1;
333                 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
334                 return false;
335         } else {
336                 /*
337                  * Attempt to commit new entry to the end of the
338                  * current tail.
339                  */
340                 update.pointer = entry;
341                 update.generation = next.generation + 1;
342                 if (ck_pr_cas_ptr_2(&tail.pointer->next, &next, &update) == false)
343                         return false;
344         }
345
346         ck_pr_fence_atomic();
347
348         /* After a successful insert, forward the tail to the new entry. */
349         update.generation = tail.generation + 1;
350         ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
351         return true;
352 }
353
354 CK_CC_INLINE static bool
355 ck_fifo_mpmc_dequeue(struct ck_fifo_mpmc *fifo,
356                      void *value,
357                      struct ck_fifo_mpmc_entry **garbage)
358 {
359         struct ck_fifo_mpmc_pointer head, tail, next, update;
360
361         for (;;) {
362                 head.generation = ck_pr_load_ptr(&fifo->head.generation);
363                 ck_pr_fence_load();
364                 head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
365                 tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
366                 ck_pr_fence_load();
367                 tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
368
369                 next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
370                 ck_pr_fence_load();
371                 next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
372
373                 update.pointer = next.pointer;
374                 if (head.pointer == tail.pointer) {
375                         /*
376                          * The head is guaranteed to always point at a stub
377                          * entry. If the stub entry has no references then the
378                          * queue is empty.
379                          */
380                         if (next.pointer == NULL)
381                                 return false;
382
383                         /* Forward the tail pointer if necessary. */
384                         update.generation = tail.generation + 1;
385                         ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
386                 } else {
387                         /*
388                          * It is possible for head snapshot to have been
389                          * re-used. Avoid deferencing during enqueue
390                          * re-use.
391                          */
392                         if (next.pointer == NULL)
393                                 continue;
394
395                         /* Save value before commit. */
396                         *(void **)value = ck_pr_load_ptr(&next.pointer->value);
397
398                         /* Forward the head pointer to the next entry. */
399                         update.generation = head.generation + 1;
400                         if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == true)
401                                 break;
402                 }
403         }
404
405         *garbage = head.pointer;
406         return true;
407 }
408
409 CK_CC_INLINE static bool
410 ck_fifo_mpmc_trydequeue(struct ck_fifo_mpmc *fifo,
411                         void *value,
412                         struct ck_fifo_mpmc_entry **garbage)
413 {
414         struct ck_fifo_mpmc_pointer head, tail, next, update;
415
416         head.generation = ck_pr_load_ptr(&fifo->head.generation);
417         ck_pr_fence_load();
418         head.pointer = ck_pr_load_ptr(&fifo->head.pointer);
419
420         tail.generation = ck_pr_load_ptr(&fifo->tail.generation);
421         ck_pr_fence_load();
422         tail.pointer = ck_pr_load_ptr(&fifo->tail.pointer);
423
424         next.generation = ck_pr_load_ptr(&head.pointer->next.generation);
425         ck_pr_fence_load();
426         next.pointer = ck_pr_load_ptr(&head.pointer->next.pointer);
427
428         update.pointer = next.pointer;
429         if (head.pointer == tail.pointer) {
430                 /*
431                  * The head is guaranteed to always point at a stub
432                  * entry. If the stub entry has no references then the
433                  * queue is empty.
434                  */
435                 if (next.pointer == NULL)
436                         return false;
437
438                 /* Forward the tail pointer if necessary. */
439                 update.generation = tail.generation + 1;
440                 ck_pr_cas_ptr_2(&fifo->tail, &tail, &update);
441                 return false;
442         } else {
443                 /*
444                  * It is possible for head snapshot to have been
445                  * re-used. Avoid deferencing during enqueue.
446                  */
447                 if (next.pointer == NULL)
448                         return false;
449
450                 /* Save value before commit. */
451                 *(void **)value = ck_pr_load_ptr(&next.pointer->value);
452
453                 /* Forward the head pointer to the next entry. */
454                 update.generation = head.generation + 1;
455                 if (ck_pr_cas_ptr_2(&fifo->head, &head, &update) == false)
456                         return false;
457         }
458
459         *garbage = head.pointer;
460         return true;
461 }
462
463 #define CK_FIFO_MPMC_ISEMPTY(f) ((f)->head.pointer->next.pointer == NULL)
464 #define CK_FIFO_MPMC_FIRST(f)   ((f)->head.pointer->next.pointer)
465 #define CK_FIFO_MPMC_NEXT(m)    ((m)->next.pointer)
466 #define CK_FIFO_MPMC_FOREACH(fifo, entry)                               \
467         for ((entry) = CK_FIFO_MPMC_FIRST(fifo);                        \
468              (entry) != NULL;                                           \
469              (entry) = CK_FIFO_MPMC_NEXT(entry))
470 #define CK_FIFO_MPMC_FOREACH_SAFE(fifo, entry, T)                       \
471         for ((entry) = CK_FIFO_MPMC_FIRST(fifo);                        \
472              (entry) != NULL && ((T) = (entry)->next.pointer, 1);       \
473              (entry) = (T))
474
475 #endif /* CK_F_FIFO_MPMC */
476 #endif /* CK_F_PR_CAS_PTR_2 */
477
478 #endif /* CK_FIFO_H */