2 * Copyright (c) 2014 Chelsio Communications, Inc.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
36 #include <sys/malloc.h>
37 #include <machine/cpu.h>
39 #include "t4_mp_ring.h"
42 #define atomic_cmpset_acq_64 atomic_cmpset_64
43 #define atomic_cmpset_rel_64 atomic_cmpset_64
57 IDLE = 0, /* consumer ran to completion, nothing more to do. */
58 BUSY, /* consumer is running already, or will be shortly. */
59 STALLED, /* consumer stopped due to lack of resources. */
60 ABDICATED, /* consumer stopped even though there was work to be
61 done because it wants another thread to take over. */
64 static inline uint16_t
65 space_available(struct mp_ring *r, union ring_state s)
67 uint16_t x = r->size - 1;
69 if (s.cidx == s.pidx_head)
71 else if (s.cidx > s.pidx_head)
72 return (s.cidx - s.pidx_head - 1);
74 return (x - s.pidx_head + s.cidx);
77 static inline uint16_t
78 increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n)
80 int x = r->size - idx;
83 return (x > n ? idx + n : n - x);
86 /* Consumer is about to update the ring's state to s */
87 static inline uint16_t
88 state_to_flags(union ring_state s, int abdicate)
91 if (s.cidx == s.pidx_tail)
93 else if (abdicate && s.pidx_tail != s.pidx_head)
100 * Caller passes in a state, with a guarantee that there is work to do and that
101 * all items up to the pidx_tail in the state are visible.
104 drain_ring(struct mp_ring *r, union ring_state os, uint16_t prev, int budget)
107 int n, pending, total;
108 uint16_t cidx = os.cidx;
109 uint16_t pidx = os.pidx_tail;
111 MPASS(os.flags == BUSY);
115 counter_u64_add(r->starts, 1);
119 while (cidx != pidx) {
121 /* Items from cidx to pidx are available for consumption. */
122 n = r->drain(r, cidx, pidx);
126 os.state = ns.state = r->state;
129 } while (atomic_cmpset_64(&r->state, os.state,
133 counter_u64_add(r->stalls, 1);
134 else if (total > 0) {
135 counter_u64_add(r->restarts, 1);
136 counter_u64_add(r->stalls, 1);
140 cidx = increment_idx(r, cidx, n);
145 * We update the cidx only if we've caught up with the pidx, the
146 * real cidx is getting too far ahead of the one visible to
147 * everyone else, or we have exceeded our budget.
149 if (cidx != pidx && pending < 64 && total < budget)
153 os.state = ns.state = r->state;
155 ns.flags = state_to_flags(ns, total >= budget);
156 } while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
159 if (ns.flags == ABDICATED)
160 counter_u64_add(r->abdications, 1);
161 if (ns.flags != BUSY) {
162 /* Wrong loop exit if we're going to stall. */
163 MPASS(ns.flags != STALLED);
164 if (prev == STALLED) {
166 counter_u64_add(r->restarts, 1);
172 * The acquire style atomic above guarantees visibility of items
173 * associated with any pidx change that we notice here.
181 mp_ring_alloc(struct mp_ring **pr, int size, void *cookie, ring_drain_t drain,
182 ring_can_drain_t can_drain, struct malloc_type *mt, int flags)
186 /* All idx are 16b so size can be 65536 at most */
187 if (pr == NULL || size < 2 || size > 65536 || drain == NULL ||
191 flags &= M_NOWAIT | M_WAITOK;
194 r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO);
201 r->can_drain = can_drain;
202 r->enqueues = counter_u64_alloc(flags);
203 r->drops = counter_u64_alloc(flags);
204 r->starts = counter_u64_alloc(flags);
205 r->stalls = counter_u64_alloc(flags);
206 r->restarts = counter_u64_alloc(flags);
207 r->abdications = counter_u64_alloc(flags);
208 if (r->enqueues == NULL || r->drops == NULL || r->starts == NULL ||
209 r->stalls == NULL || r->restarts == NULL ||
210 r->abdications == NULL) {
221 mp_ring_free(struct mp_ring *r)
227 if (r->enqueues != NULL)
228 counter_u64_free(r->enqueues);
229 if (r->drops != NULL)
230 counter_u64_free(r->drops);
231 if (r->starts != NULL)
232 counter_u64_free(r->starts);
233 if (r->stalls != NULL)
234 counter_u64_free(r->stalls);
235 if (r->restarts != NULL)
236 counter_u64_free(r->restarts);
237 if (r->abdications != NULL)
238 counter_u64_free(r->abdications);
244 * Enqueue n items and maybe drain the ring for some time.
249 mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
251 union ring_state os, ns;
252 uint16_t pidx_start, pidx_stop;
255 MPASS(items != NULL);
259 * Reserve room for the new items. Our reservation, if successful, is
260 * from 'pidx_start' to 'pidx_stop'.
264 if (n >= space_available(r, os)) {
265 counter_u64_add(r->drops, n);
266 MPASS(os.flags != IDLE);
267 if (os.flags == STALLED)
268 mp_ring_check_drainage(r, 0);
272 ns.pidx_head = increment_idx(r, os.pidx_head, n);
274 if (atomic_cmpset_64(&r->state, os.state, ns.state))
279 pidx_start = os.pidx_head;
280 pidx_stop = ns.pidx_head;
283 * Wait for other producers who got in ahead of us to enqueue their
284 * items, one producer at a time. It is our turn when the ring's
285 * pidx_tail reaches the beginning of our reservation (pidx_start).
287 while (ns.pidx_tail != pidx_start) {
292 /* Now it is our turn to fill up the area we reserved earlier. */
295 r->items[i] = *items++;
296 if (__predict_false(++i == r->size))
298 } while (i != pidx_stop);
301 * Update the ring's pidx_tail. The release style atomic guarantees
302 * that the items are visible to any thread that sees the updated pidx.
305 os.state = ns.state = r->state;
306 ns.pidx_tail = pidx_stop;
308 } while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
310 counter_u64_add(r->enqueues, n);
313 * Turn into a consumer if some other thread isn't active as a consumer
316 if (os.flags != BUSY)
317 drain_ring(r, ns, os.flags, budget);
323 mp_ring_check_drainage(struct mp_ring *r, int budget)
325 union ring_state os, ns;
328 if (os.flags != STALLED || os.pidx_head != os.pidx_tail ||
329 r->can_drain(r) == 0)
332 MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */
337 * The acquire style atomic guarantees visibility of items associated
338 * with the pidx that we read here.
340 if (!atomic_cmpset_acq_64(&r->state, os.state, ns.state))
343 drain_ring(r, ns, os.flags, budget);
347 mp_ring_reset_stats(struct mp_ring *r)
350 counter_u64_zero(r->enqueues);
351 counter_u64_zero(r->drops);
352 counter_u64_zero(r->starts);
353 counter_u64_zero(r->stalls);
354 counter_u64_zero(r->restarts);
355 counter_u64_zero(r->abdications);
359 mp_ring_is_idle(struct mp_ring *r)
364 if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx &&