2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
36 #include <sys/mutex.h>
37 #include <sys/malloc.h>
38 #include <machine/cpu.h>
41 #define atomic_cmpset_acq_64 atomic_cmpset_64
42 #define atomic_cmpset_rel_64 atomic_cmpset_64
45 #include <net/mp_ring.h>
58 IDLE = 0, /* consumer ran to completion, nothing more to do. */
59 BUSY, /* consumer is running already, or will be shortly. */
60 STALLED, /* consumer stopped due to lack of resources. */
61 ABDICATED, /* consumer stopped even though there was work to be
62 done because it wants another thread to take over. */
65 static inline uint16_t
66 space_available(struct ifmp_ring *r, union ring_state s)
68 uint16_t x = r->size - 1;
70 if (s.cidx == s.pidx_head)
72 else if (s.cidx > s.pidx_head)
73 return (s.cidx - s.pidx_head - 1);
75 return (x - s.pidx_head + s.cidx);
78 static inline uint16_t
79 increment_idx(struct ifmp_ring *r, uint16_t idx, uint16_t n)
81 int x = r->size - idx;
84 return (x > n ? idx + n : n - x);
87 /* Consumer is about to update the ring's state to s */
88 static inline uint16_t
89 state_to_flags(union ring_state s, int abdicate)
92 if (s.cidx == s.pidx_tail)
94 else if (abdicate && s.pidx_tail != s.pidx_head)
100 #ifdef MP_RING_NO_64BIT_ATOMICS
102 drain_ring_locked(struct ifmp_ring *r, union ring_state os, uint16_t prev, int budget)
105 int n, pending, total;
106 uint16_t cidx = os.cidx;
107 uint16_t pidx = os.pidx_tail;
109 MPASS(os.flags == BUSY);
113 counter_u64_add(r->starts, 1);
117 while (cidx != pidx) {
119 /* Items from cidx to pidx are available for consumption. */
120 n = r->drain(r, cidx, pidx);
122 os.state = ns.state = r->state;
127 counter_u64_add(r->stalls, 1);
128 else if (total > 0) {
129 counter_u64_add(r->restarts, 1);
130 counter_u64_add(r->stalls, 1);
134 cidx = increment_idx(r, cidx, n);
139 * We update the cidx only if we've caught up with the pidx, the
140 * real cidx is getting too far ahead of the one visible to
141 * everyone else, or we have exceeded our budget.
143 if (cidx != pidx && pending < 64 && total < budget)
146 os.state = ns.state = r->state;
148 ns.flags = state_to_flags(ns, total >= budget);
151 if (ns.flags == ABDICATED)
152 counter_u64_add(r->abdications, 1);
153 if (ns.flags != BUSY) {
154 /* Wrong loop exit if we're going to stall. */
155 MPASS(ns.flags != STALLED);
156 if (prev == STALLED) {
158 counter_u64_add(r->restarts, 1);
164 * The acquire style atomic above guarantees visibility of items
165 * associated with any pidx change that we notice here.
173 * Caller passes in a state, with a guarantee that there is work to do and that
174 * all items up to the pidx_tail in the state are visible.
177 drain_ring_lockless(struct ifmp_ring *r, union ring_state os, uint16_t prev, int budget)
180 int n, pending, total;
181 uint16_t cidx = os.cidx;
182 uint16_t pidx = os.pidx_tail;
184 MPASS(os.flags == BUSY);
188 counter_u64_add(r->starts, 1);
192 while (cidx != pidx) {
194 /* Items from cidx to pidx are available for consumption. */
195 n = r->drain(r, cidx, pidx);
199 os.state = ns.state = r->state;
202 } while (atomic_cmpset_64(&r->state, os.state,
206 counter_u64_add(r->stalls, 1);
207 else if (total > 0) {
208 counter_u64_add(r->restarts, 1);
209 counter_u64_add(r->stalls, 1);
213 cidx = increment_idx(r, cidx, n);
218 * We update the cidx only if we've caught up with the pidx, the
219 * real cidx is getting too far ahead of the one visible to
220 * everyone else, or we have exceeded our budget.
222 if (cidx != pidx && pending < 64 && total < budget)
226 os.state = ns.state = r->state;
228 ns.flags = state_to_flags(ns, total >= budget);
229 } while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
232 if (ns.flags == ABDICATED)
233 counter_u64_add(r->abdications, 1);
234 if (ns.flags != BUSY) {
235 /* Wrong loop exit if we're going to stall. */
236 MPASS(ns.flags != STALLED);
237 if (prev == STALLED) {
239 counter_u64_add(r->restarts, 1);
245 * The acquire style atomic above guarantees visibility of items
246 * associated with any pidx change that we notice here.
255 ifmp_ring_alloc(struct ifmp_ring **pr, int size, void *cookie, mp_ring_drain_t drain,
256 mp_ring_can_drain_t can_drain, struct malloc_type *mt, int flags)
260 /* All idx are 16b so size can be 65536 at most */
261 if (pr == NULL || size < 2 || size > 65536 || drain == NULL ||
265 flags &= M_NOWAIT | M_WAITOK;
268 r = malloc(__offsetof(struct ifmp_ring, items[size]), mt, flags | M_ZERO);
275 r->can_drain = can_drain;
276 r->enqueues = counter_u64_alloc(flags);
277 r->drops = counter_u64_alloc(flags);
278 r->starts = counter_u64_alloc(flags);
279 r->stalls = counter_u64_alloc(flags);
280 r->restarts = counter_u64_alloc(flags);
281 r->abdications = counter_u64_alloc(flags);
282 if (r->enqueues == NULL || r->drops == NULL || r->starts == NULL ||
283 r->stalls == NULL || r->restarts == NULL ||
284 r->abdications == NULL) {
290 #ifdef MP_RING_NO_64BIT_ATOMICS
291 mtx_init(&r->lock, "mp_ring lock", NULL, MTX_DEF);
297 ifmp_ring_free(struct ifmp_ring *r)
303 if (r->enqueues != NULL)
304 counter_u64_free(r->enqueues);
305 if (r->drops != NULL)
306 counter_u64_free(r->drops);
307 if (r->starts != NULL)
308 counter_u64_free(r->starts);
309 if (r->stalls != NULL)
310 counter_u64_free(r->stalls);
311 if (r->restarts != NULL)
312 counter_u64_free(r->restarts);
313 if (r->abdications != NULL)
314 counter_u64_free(r->abdications);
320 * Enqueue n items and maybe drain the ring for some time.
324 #ifdef MP_RING_NO_64BIT_ATOMICS
326 ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdicate)
328 union ring_state os, ns;
329 uint16_t pidx_start, pidx_stop;
332 MPASS(items != NULL);
337 * Reserve room for the new items. Our reservation, if successful, is
338 * from 'pidx_start' to 'pidx_stop'.
341 if (n >= space_available(r, os)) {
342 counter_u64_add(r->drops, n);
343 MPASS(os.flags != IDLE);
344 mtx_unlock(&r->lock);
345 if (os.flags == STALLED)
346 ifmp_ring_check_drainage(r, 0);
350 ns.pidx_head = increment_idx(r, os.pidx_head, n);
352 pidx_start = os.pidx_head;
353 pidx_stop = ns.pidx_head;
356 * Wait for other producers who got in ahead of us to enqueue their
357 * items, one producer at a time. It is our turn when the ring's
358 * pidx_tail reaches the beginning of our reservation (pidx_start).
360 while (ns.pidx_tail != pidx_start) {
365 /* Now it is our turn to fill up the area we reserved earlier. */
368 r->items[i] = *items++;
369 if (__predict_false(++i == r->size))
371 } while (i != pidx_stop);
374 * Update the ring's pidx_tail. The release style atomic guarantees
375 * that the items are visible to any thread that sees the updated pidx.
377 os.state = ns.state = r->state;
378 ns.pidx_tail = pidx_stop;
380 if (os.flags == IDLE)
381 ns.flags = ABDICATED;
387 counter_u64_add(r->enqueues, n);
391 * Turn into a consumer if some other thread isn't active as a consumer
394 if (os.flags != BUSY)
395 drain_ring_locked(r, ns, os.flags, budget);
398 mtx_unlock(&r->lock);
404 ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget, int abdicate)
406 union ring_state os, ns;
407 uint16_t pidx_start, pidx_stop;
410 MPASS(items != NULL);
414 * Reserve room for the new items. Our reservation, if successful, is
415 * from 'pidx_start' to 'pidx_stop'.
419 if (n >= space_available(r, os)) {
420 counter_u64_add(r->drops, n);
421 MPASS(os.flags != IDLE);
422 if (os.flags == STALLED)
423 ifmp_ring_check_drainage(r, 0);
427 ns.pidx_head = increment_idx(r, os.pidx_head, n);
429 if (atomic_cmpset_64(&r->state, os.state, ns.state))
434 pidx_start = os.pidx_head;
435 pidx_stop = ns.pidx_head;
438 * Wait for other producers who got in ahead of us to enqueue their
439 * items, one producer at a time. It is our turn when the ring's
440 * pidx_tail reaches the beginning of our reservation (pidx_start).
442 while (ns.pidx_tail != pidx_start) {
447 /* Now it is our turn to fill up the area we reserved earlier. */
450 r->items[i] = *items++;
451 if (__predict_false(++i == r->size))
453 } while (i != pidx_stop);
456 * Update the ring's pidx_tail. The release style atomic guarantees
457 * that the items are visible to any thread that sees the updated pidx.
460 os.state = ns.state = r->state;
461 ns.pidx_tail = pidx_stop;
463 if (os.flags == IDLE)
464 ns.flags = ABDICATED;
469 } while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
471 counter_u64_add(r->enqueues, n);
475 * Turn into a consumer if some other thread isn't active as a consumer
478 if (os.flags != BUSY)
479 drain_ring_lockless(r, ns, os.flags, budget);
487 ifmp_ring_check_drainage(struct ifmp_ring *r, int budget)
489 union ring_state os, ns;
492 if ((os.flags != STALLED && os.flags != ABDICATED) || // Only continue in STALLED and ABDICATED
493 os.pidx_head != os.pidx_tail || // Require work to be available
494 (os.flags != ABDICATED && r->can_drain(r) == 0)) // Can either drain, or everyone left
497 MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */
502 #ifdef MP_RING_NO_64BIT_ATOMICS
504 if (r->state != os.state) {
505 mtx_unlock(&r->lock);
509 drain_ring_locked(r, ns, os.flags, budget);
510 mtx_unlock(&r->lock);
513 * The acquire style atomic guarantees visibility of items associated
514 * with the pidx that we read here.
516 if (!atomic_cmpset_acq_64(&r->state, os.state, ns.state))
520 drain_ring_lockless(r, ns, os.flags, budget);
525 ifmp_ring_reset_stats(struct ifmp_ring *r)
528 counter_u64_zero(r->enqueues);
529 counter_u64_zero(r->drops);
530 counter_u64_zero(r->starts);
531 counter_u64_zero(r->stalls);
532 counter_u64_zero(r->restarts);
533 counter_u64_zero(r->abdications);
537 ifmp_ring_is_idle(struct ifmp_ring *r)
542 if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx &&
550 ifmp_ring_is_stalled(struct ifmp_ring *r)
555 if (s.pidx_head == s.pidx_tail && s.flags == STALLED)