2 * Copyright (c) 2007-2009 Kip Macy <kmacy@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef _SYS_BUF_RING_H_
31 #define _SYS_BUF_RING_H_
33 #include <machine/cpu.h>
35 #if defined(INVARIANTS) && !defined(DEBUG_BUFRING)
36 #define DEBUG_BUFRING 1
41 #include <sys/mutex.h>
45 volatile uint32_t br_prod_head;
46 volatile uint32_t br_prod_tail;
50 volatile uint32_t br_cons_head __aligned(CACHE_LINE_SIZE);
51 volatile uint32_t br_cons_tail;
57 void *br_ring[0] __aligned(CACHE_LINE_SIZE);
61 * multi-producer safe lock-free ring buffer enqueue
65 buf_ring_enqueue(struct buf_ring *br, void *buf)
67 uint32_t prod_head, prod_next, cons_tail;
70 for (i = br->br_cons_head; i != br->br_prod_head;
71 i = ((i + 1) & br->br_cons_mask))
72 if(br->br_ring[i] == buf)
73 panic("buf=%p already enqueue at %d prod=%d cons=%d",
74 buf, i, br->br_prod_tail, br->br_cons_tail);
78 prod_head = br->br_prod_head;
79 prod_next = (prod_head + 1) & br->br_prod_mask;
80 cons_tail = br->br_cons_tail;
82 if (prod_next == cons_tail) {
84 if (prod_head == br->br_prod_head &&
85 cons_tail == br->br_cons_tail) {
92 } while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next));
94 if (br->br_ring[prod_head] != NULL)
95 panic("dangling value in enqueue");
97 br->br_ring[prod_head] = buf;
100 * If there are other enqueues in progress
101 * that preceded us, we need to wait for them
104 while (br->br_prod_tail != prod_head)
106 atomic_store_rel_int(&br->br_prod_tail, prod_next);
112 * multi-consumer safe dequeue
115 static __inline void *
116 buf_ring_dequeue_mc(struct buf_ring *br)
118 uint32_t cons_head, cons_next;
123 cons_head = br->br_cons_head;
124 cons_next = (cons_head + 1) & br->br_cons_mask;
126 if (cons_head == br->br_prod_tail) {
130 } while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next));
132 buf = br->br_ring[cons_head];
134 br->br_ring[cons_head] = NULL;
137 * If there are other dequeues in progress
138 * that preceded us, we need to wait for them
141 while (br->br_cons_tail != cons_head)
144 atomic_store_rel_int(&br->br_cons_tail, cons_next);
151 * single-consumer dequeue
152 * use where dequeue is protected by a lock
153 * e.g. a network driver's tx queue lock
155 static __inline void *
156 buf_ring_dequeue_sc(struct buf_ring *br)
158 uint32_t cons_head, cons_next;
159 #ifdef PREFETCH_DEFINED
160 uint32_t cons_next_next;
166 * This is a workaround to allow using buf_ring on ARM and ARM64.
167 * ARM64TODO: Fix buf_ring in a generic way.
168 * REMARKS: It is suspected that br_cons_head does not require
169 * load_acq operation, but this change was extensively tested
170 * and confirmed it's working. To be reviewed once again in
173 * Preventing following situation:
175 * Core(0) - buf_ring_enqueue() Core(1) - buf_ring_dequeue_sc()
176 * ----------------------------------------- ----------------------------------------------
178 * cons_head = br->br_cons_head;
179 * atomic_cmpset_acq_32(&br->br_prod_head, ...));
180 * buf = br->br_ring[cons_head]; <see <1>>
181 * br->br_ring[prod_head] = buf;
182 * atomic_store_rel_32(&br->br_prod_tail, ...);
183 * prod_tail = br->br_prod_tail;
184 * if (cons_head == prod_tail)
186 * <condition is false and code uses invalid(old) buf>`
188 * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered (speculative readed) by CPU.
190 #if defined(__arm__) || defined(__aarch64__)
191 cons_head = atomic_load_acq_32(&br->br_cons_head);
193 cons_head = br->br_cons_head;
195 prod_tail = atomic_load_acq_32(&br->br_prod_tail);
197 cons_next = (cons_head + 1) & br->br_cons_mask;
198 #ifdef PREFETCH_DEFINED
199 cons_next_next = (cons_head + 2) & br->br_cons_mask;
202 if (cons_head == prod_tail)
205 #ifdef PREFETCH_DEFINED
206 if (cons_next != prod_tail) {
207 prefetch(br->br_ring[cons_next]);
208 if (cons_next_next != prod_tail)
209 prefetch(br->br_ring[cons_next_next]);
212 br->br_cons_head = cons_next;
213 buf = br->br_ring[cons_head];
216 br->br_ring[cons_head] = NULL;
217 if (!mtx_owned(br->br_lock))
218 panic("lock not held on single consumer dequeue");
219 if (br->br_cons_tail != cons_head)
220 panic("inconsistent list cons_tail=%d cons_head=%d",
221 br->br_cons_tail, cons_head);
223 br->br_cons_tail = cons_next;
228 * single-consumer advance after a peek
229 * use where it is protected by a lock
230 * e.g. a network driver's tx queue lock
233 buf_ring_advance_sc(struct buf_ring *br)
235 uint32_t cons_head, cons_next;
238 cons_head = br->br_cons_head;
239 prod_tail = br->br_prod_tail;
241 cons_next = (cons_head + 1) & br->br_cons_mask;
242 if (cons_head == prod_tail)
244 br->br_cons_head = cons_next;
246 br->br_ring[cons_head] = NULL;
248 br->br_cons_tail = cons_next;
252 * Used to return a buffer (most likely already there)
253 * to the top od the ring. The caller should *not*
254 * have used any dequeue to pull it out of the ring
255 * but instead should have used the peek() function.
256 * This is normally used where the transmit queue
257 * of a driver is full, and an mubf must be returned.
258 * Most likely whats in the ring-buffer is what
259 * is being put back (since it was not removed), but
260 * sometimes the lower transmit function may have
261 * done a pullup or other function that will have
262 * changed it. As an optimzation we always put it
263 * back (since jhb says the store is probably cheaper),
264 * if we have to do a multi-queue version we will need
265 * the compare and an atomic.
268 buf_ring_putback_sc(struct buf_ring *br, void *new)
270 KASSERT(br->br_cons_head != br->br_prod_tail,
271 ("Buf-Ring has none in putback")) ;
272 br->br_ring[br->br_cons_head] = new;
276 * return a pointer to the first entry in the ring
277 * without modifying it, or NULL if the ring is empty
278 * race-prone if not protected by a lock
280 static __inline void *
281 buf_ring_peek(struct buf_ring *br)
285 if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
286 panic("lock not held on single consumer dequeue");
289 * I believe it is safe to not have a memory barrier
290 * here because we control cons and tail is worst case
291 * a lagging indicator so we worst case we might
292 * return NULL immediately after a buffer has been enqueued
294 if (br->br_cons_head == br->br_prod_tail)
297 return (br->br_ring[br->br_cons_head]);
300 static __inline void *
301 buf_ring_peek_clear_sc(struct buf_ring *br)
306 if (!mtx_owned(br->br_lock))
307 panic("lock not held on single consumer dequeue");
310 * I believe it is safe to not have a memory barrier
311 * here because we control cons and tail is worst case
312 * a lagging indicator so we worst case we might
313 * return NULL immediately after a buffer has been enqueued
315 if (br->br_cons_head == br->br_prod_tail)
320 * Single consumer, i.e. cons_head will not move while we are
321 * running, so atomic_swap_ptr() is not necessary here.
323 ret = br->br_ring[br->br_cons_head];
324 br->br_ring[br->br_cons_head] = NULL;
327 return (br->br_ring[br->br_cons_head]);
332 buf_ring_full(struct buf_ring *br)
335 return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
339 buf_ring_empty(struct buf_ring *br)
342 return (br->br_cons_head == br->br_prod_tail);
346 buf_ring_count(struct buf_ring *br)
349 return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
353 struct buf_ring *buf_ring_alloc(int count, struct malloc_type *type, int flags,
355 void buf_ring_free(struct buf_ring *br, struct malloc_type *type);