2 * Copyright 2012-2015 Samy Al Bahra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Copyright (c) 1991, 1993
29 * The Regents of the University of California. All rights reserved.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 4. Neither the name of the University nor the names of its contributors
40 * may be used to endorse or promote products derived from this software
41 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * @(#)queue.h 8.5 (Berkeley) 8/20/94
56 * $FreeBSD: release/9.0.0/sys/sys/queue.h 221843 2011-05-13 15:49:23Z mdf $
65 * This file defines three types of data structures: singly-linked lists,
66 * singly-linked tail queues and lists.
68 * A singly-linked list is headed by a single forward pointer. The elements
69 * are singly linked for minimum space and pointer manipulation overhead at
70 * the expense of O(n) removal for arbitrary elements. New elements can be
71 * added to the list after an existing element or at the head of the list.
72 * Elements being removed from the head of the list should use the explicit
73 * macro for this purpose for optimum efficiency. A singly-linked list may
74 * only be traversed in the forward direction. Singly-linked lists are ideal
75 * for applications with large datasets and few or no removals or for
76 * implementing a LIFO queue.
78 * A singly-linked tail queue is headed by a pair of pointers, one to the
79 * head of the list and the other to the tail of the list. The elements are
80 * singly linked for minimum space and pointer manipulation overhead at the
81 * expense of O(n) removal for arbitrary elements. New elements can be added
82 * to the list after an existing element, at the head of the list, or at the
83 * end of the list. Elements being removed from the head of the tail queue
84 * should use the explicit macro for this purpose for optimum efficiency.
85 * A singly-linked tail queue may only be traversed in the forward direction.
86 * Singly-linked tail queues are ideal for applications with large datasets
87 * and few or no removals or for implementing a FIFO queue.
89 * A list is headed by a single forward pointer (or an array of forward
90 * pointers for a hash table header). The elements are doubly linked
91 * so that an arbitrary element can be removed without a need to
92 * traverse the list. New elements can be added to the list before
93 * or after an existing element or at the head of the list. A list
94 * may only be traversed in the forward direction.
96 * It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent
97 * modifications to the list. Writers to these lists must, on the other hand,
98 * implement writer-side synchronization. The _SWAP operations are not atomic.
99 * This facility is currently unsupported on architectures such as the Alpha
100 * which require load-depend memory fences.
102 * CK_SLIST CK_LIST CK_STAILQ
104 * _HEAD_INITIALIZER + + +
111 * _FOREACH_SAFE + + +
113 * _INSERT_BEFORE - + -
114 * _INSERT_AFTER + + +
116 * _REMOVE_AFTER + - +
124 * Singly-linked List declarations.
126 #define CK_SLIST_HEAD(name, type) \
128 struct type *cslh_first; /* first element */ \
131 #define CK_SLIST_HEAD_INITIALIZER(head) \
134 #define CK_SLIST_ENTRY(type) \
136 struct type *csle_next; /* next element */ \
140 * Singly-linked List functions.
142 #define CK_SLIST_EMPTY(head) \
143 (ck_pr_load_ptr(&(head)->cslh_first) == NULL)
145 #define CK_SLIST_FIRST(head) \
146 (ck_pr_load_ptr(&(head)->cslh_first))
148 #define CK_SLIST_NEXT(elm, field) \
149 ck_pr_load_ptr(&((elm)->field.csle_next))
151 #define CK_SLIST_FOREACH(var, head, field) \
152 for ((var) = CK_SLIST_FIRST((head)); \
154 (var) = CK_SLIST_NEXT((var), field))
156 #define CK_SLIST_FOREACH_FROM(var, head, field) \
157 for ((var) = ((var) != NULL ? (var) : CK_SLIST_FIRST((head))); \
159 (var) = CK_SLIST_NEXT((var), field))
161 #define CK_SLIST_FOREACH_SAFE(var, head, field, tvar) \
162 for ((var) = CK_SLIST_FIRST(head); \
163 (var) && ((tvar) = CK_SLIST_NEXT(var, field), 1); \
166 #define CK_SLIST_FOREACH_PREVPTR(var, varp, head, field) \
167 for ((varp) = &(head)->cslh_first; \
168 ((var) = ck_pr_load_ptr(varp)) != NULL; \
169 (varp) = &(var)->field.csle_next)
171 #define CK_SLIST_INIT(head) do { \
172 ck_pr_store_ptr(&(head)->cslh_first, NULL); \
173 ck_pr_fence_store(); \
176 #define CK_SLIST_INSERT_AFTER(a, b, field) do { \
177 (b)->field.csle_next = (a)->field.csle_next; \
178 ck_pr_fence_store(); \
179 ck_pr_store_ptr(&(a)->field.csle_next, b); \
182 #define CK_SLIST_INSERT_HEAD(head, elm, field) do { \
183 (elm)->field.csle_next = (head)->cslh_first; \
184 ck_pr_fence_store(); \
185 ck_pr_store_ptr(&(head)->cslh_first, elm); \
188 #define CK_SLIST_INSERT_PREVPTR(prevp, slistelm, elm, field) do { \
189 (elm)->field.csle_next = (slistelm); \
190 ck_pr_fence_store(); \
191 ck_pr_store_ptr(prevp, elm); \
194 #define CK_SLIST_REMOVE_AFTER(elm, field) do { \
195 ck_pr_store_ptr(&(elm)->field.csle_next, \
196 (elm)->field.csle_next->field.csle_next); \
199 #define CK_SLIST_REMOVE(head, elm, type, field) do { \
200 if ((head)->cslh_first == (elm)) { \
201 CK_SLIST_REMOVE_HEAD((head), field); \
203 struct type *curelm = (head)->cslh_first; \
204 while (curelm->field.csle_next != (elm)) \
205 curelm = curelm->field.csle_next; \
206 CK_SLIST_REMOVE_AFTER(curelm, field); \
210 #define CK_SLIST_REMOVE_HEAD(head, field) do { \
211 ck_pr_store_ptr(&(head)->cslh_first, \
212 (head)->cslh_first->field.csle_next); \
215 #define CK_SLIST_REMOVE_PREVPTR(prevp, elm, field) do { \
216 ck_pr_store_ptr(prevptr, (elm)->field.csle_next); \
219 #define CK_SLIST_MOVE(head1, head2, field) do { \
220 ck_pr_store_ptr(&(head1)->cslh_first, (head2)->cslh_first); \
224 * This operation is not applied atomically.
226 #define CK_SLIST_SWAP(a, b, type) do { \
227 struct type *swap_first = (a)->cslh_first; \
228 (a)->cslh_first = (b)->cslh_first; \
229 (b)->cslh_first = swap_first; \
233 * Singly-linked Tail queue declarations.
235 #define CK_STAILQ_HEAD(name, type) \
237 struct type *cstqh_first;/* first element */ \
238 struct type **cstqh_last;/* addr of last next element */ \
241 #define CK_STAILQ_HEAD_INITIALIZER(head) \
242 { NULL, &(head).cstqh_first }
244 #define CK_STAILQ_ENTRY(type) \
246 struct type *cstqe_next; /* next element */ \
250 * Singly-linked Tail queue functions.
252 #define CK_STAILQ_CONCAT(head1, head2) do { \
253 if ((head2)->cstqh_first != NULL) { \
254 ck_pr_store_ptr((head1)->cstqh_last, (head2)->cstqh_first); \
255 ck_pr_fence_store(); \
256 (head1)->cstqh_last = (head2)->cstqh_last; \
257 CK_STAILQ_INIT((head2)); \
261 #define CK_STAILQ_EMPTY(head) (ck_pr_load_ptr(&(head)->cstqh_first) == NULL)
263 #define CK_STAILQ_FIRST(head) (ck_pr_load_ptr(&(head)->cstqh_first))
265 #define CK_STAILQ_FOREACH(var, head, field) \
266 for((var) = CK_STAILQ_FIRST((head)); \
268 (var) = CK_STAILQ_NEXT((var), field))
270 #define CK_STAILQ_FOREACH_FROM(var, head, field) \
271 for ((var) = ((var) != NULL ? (var) : CK_STAILQ_FIRST((head))); \
273 (var) = CK_STAILQ_NEXT((var), field))
275 #define CK_STAILQ_FOREACH_SAFE(var, head, field, tvar) \
276 for ((var) = CK_STAILQ_FIRST((head)); \
278 CK_STAILQ_NEXT((var), field), 1); \
281 #define CK_STAILQ_INIT(head) do { \
282 ck_pr_store_ptr(&(head)->cstqh_first, NULL); \
283 ck_pr_fence_store(); \
284 (head)->cstqh_last = &(head)->cstqh_first; \
287 #define CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
288 (elm)->field.cstqe_next = (tqelm)->field.cstqe_next; \
289 ck_pr_fence_store(); \
290 ck_pr_store_ptr(&(tqelm)->field.cstqe_next, elm); \
291 if ((elm)->field.cstqe_next == NULL) \
292 (head)->cstqh_last = &(elm)->field.cstqe_next; \
295 #define CK_STAILQ_INSERT_HEAD(head, elm, field) do { \
296 (elm)->field.cstqe_next = (head)->cstqh_first; \
297 ck_pr_fence_store(); \
298 ck_pr_store_ptr(&(head)->cstqh_first, elm); \
299 if ((elm)->field.cstqe_next == NULL) \
300 (head)->cstqh_last = &(elm)->field.cstqe_next; \
303 #define CK_STAILQ_INSERT_TAIL(head, elm, field) do { \
304 (elm)->field.cstqe_next = NULL; \
305 ck_pr_fence_store(); \
306 ck_pr_store_ptr((head)->cstqh_last, (elm)); \
307 (head)->cstqh_last = &(elm)->field.cstqe_next; \
310 #define CK_STAILQ_NEXT(elm, field) \
311 (ck_pr_load_ptr(&(elm)->field.cstqe_next))
313 #define CK_STAILQ_REMOVE(head, elm, type, field) do { \
314 if ((head)->cstqh_first == (elm)) { \
315 CK_STAILQ_REMOVE_HEAD((head), field); \
317 struct type *curelm = (head)->cstqh_first; \
318 while (curelm->field.cstqe_next != (elm)) \
319 curelm = curelm->field.cstqe_next; \
320 CK_STAILQ_REMOVE_AFTER(head, curelm, field); \
324 #define CK_STAILQ_REMOVE_AFTER(head, elm, field) do { \
325 ck_pr_store_ptr(&(elm)->field.cstqe_next, \
326 (elm)->field.cstqe_next->field.cstqe_next); \
327 if ((elm)->field.cstqe_next == NULL) \
328 (head)->cstqh_last = &(elm)->field.cstqe_next; \
331 #define CK_STAILQ_REMOVE_HEAD(head, field) do { \
332 ck_pr_store_ptr(&(head)->cstqh_first, \
333 (head)->cstqh_first->field.cstqe_next); \
334 if ((head)->cstqh_first == NULL) \
335 (head)->cstqh_last = &(head)->cstqh_first; \
338 #define CK_STAILQ_MOVE(head1, head2, field) do { \
339 ck_pr_store_ptr(&(head1)->cstqh_first, (head2)->cstqh_first); \
340 (head1)->cstqh_last = (head2)->cstqh_last; \
341 if ((head2)->cstqh_last == &(head2)->cstqh_first) \
342 (head1)->cstqh_last = &(head1)->cstqh_first; \
346 * This operation is not applied atomically.
348 #define CK_STAILQ_SWAP(head1, head2, type) do { \
349 struct type *swap_first = CK_STAILQ_FIRST(head1); \
350 struct type **swap_last = (head1)->cstqh_last; \
351 CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2); \
352 (head1)->cstqh_last = (head2)->cstqh_last; \
353 CK_STAILQ_FIRST(head2) = swap_first; \
354 (head2)->cstqh_last = swap_last; \
355 if (CK_STAILQ_EMPTY(head1)) \
356 (head1)->cstqh_last = &(head1)->cstqh_first; \
357 if (CK_STAILQ_EMPTY(head2)) \
358 (head2)->cstqh_last = &(head2)->cstqh_first; \
364 #define CK_LIST_HEAD(name, type) \
366 struct type *clh_first; /* first element */ \
369 #define CK_LIST_HEAD_INITIALIZER(head) \
372 #define CK_LIST_ENTRY(type) \
374 struct type *cle_next; /* next element */ \
375 struct type **cle_prev; /* address of previous next element */ \
378 #define CK_LIST_FIRST(head) ck_pr_load_ptr(&(head)->clh_first)
379 #define CK_LIST_EMPTY(head) (CK_LIST_FIRST(head) == NULL)
380 #define CK_LIST_NEXT(elm, field) ck_pr_load_ptr(&(elm)->field.cle_next)
382 #define CK_LIST_FOREACH(var, head, field) \
383 for ((var) = CK_LIST_FIRST((head)); \
385 (var) = CK_LIST_NEXT((var), field))
387 #define CK_LIST_FOREACH_FROM(var, head, field) \
388 for ((var) = ((var) != NULL ? (var) : CK_LIST_FIRST((head))); \
390 (var) = CK_LIST_NEXT((var), field))
392 #define CK_LIST_FOREACH_SAFE(var, head, field, tvar) \
393 for ((var) = CK_LIST_FIRST((head)); \
394 (var) && ((tvar) = CK_LIST_NEXT((var), field), 1); \
397 #define CK_LIST_INIT(head) do { \
398 ck_pr_store_ptr(&(head)->clh_first, NULL); \
399 ck_pr_fence_store(); \
402 #define CK_LIST_INSERT_AFTER(listelm, elm, field) do { \
403 (elm)->field.cle_next = (listelm)->field.cle_next; \
404 (elm)->field.cle_prev = &(listelm)->field.cle_next; \
405 ck_pr_fence_store(); \
406 if ((listelm)->field.cle_next != NULL) \
407 (listelm)->field.cle_next->field.cle_prev = &(elm)->field.cle_next;\
408 ck_pr_store_ptr(&(listelm)->field.cle_next, elm); \
411 #define CK_LIST_INSERT_BEFORE(listelm, elm, field) do { \
412 (elm)->field.cle_prev = (listelm)->field.cle_prev; \
413 (elm)->field.cle_next = (listelm); \
414 ck_pr_fence_store(); \
415 ck_pr_store_ptr((listelm)->field.cle_prev, (elm)); \
416 (listelm)->field.cle_prev = &(elm)->field.cle_next; \
419 #define CK_LIST_INSERT_HEAD(head, elm, field) do { \
420 (elm)->field.cle_next = (head)->clh_first; \
421 ck_pr_fence_store(); \
422 if ((elm)->field.cle_next != NULL) \
423 (head)->clh_first->field.cle_prev = &(elm)->field.cle_next; \
424 ck_pr_store_ptr(&(head)->clh_first, elm); \
425 (elm)->field.cle_prev = &(head)->clh_first; \
428 #define CK_LIST_REMOVE(elm, field) do { \
429 ck_pr_store_ptr((elm)->field.cle_prev, (elm)->field.cle_next); \
430 if ((elm)->field.cle_next != NULL) \
431 (elm)->field.cle_next->field.cle_prev = (elm)->field.cle_prev; \
434 #define CK_LIST_MOVE(head1, head2, field) do { \
435 ck_pr_store_ptr(&(head1)->clh_first, (head2)->clh_first); \
436 if ((head1)->clh_first != NULL) \
437 (head1)->clh_first->field.cle_prev = &(head1)->clh_first; \
441 * This operation is not applied atomically.
443 #define CK_LIST_SWAP(head1, head2, type, field) do { \
444 struct type *swap_tmp = (head1)->clh_first; \
445 (head1)->clh_first = (head2)->clh_first; \
446 (head2)->clh_first = swap_tmp; \
447 if ((swap_tmp = (head1)->clh_first) != NULL) \
448 swap_tmp->field.cle_prev = &(head1)->clh_first; \
449 if ((swap_tmp = (head2)->clh_first) != NULL) \
450 swap_tmp->field.cle_prev = &(head2)->clh_first; \
453 #endif /* CK_QUEUE_H */