2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * From: @(#)if.h 8.1 (Berkeley) 6/10/93
37 #include <sys/mbuf.h> /* ifqueue only? */
38 #include <sys/buf_ring.h>
41 #include <sys/lock.h> /* XXX */
42 #include <sys/mutex.h> /* struct ifqueue */
44 #define IF_DUNIT_NONE -1
46 #include <altq/if_altq.h>
49 * Structure defining a queue for a network interface.
52 struct mbuf *ifq_head;
53 struct mbuf *ifq_tail;
62 * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
63 * are queues of messages stored on ifqueue structures
64 * (defined above). Entries are added to and deleted from these structures
67 #define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx)
68 #define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx)
69 #define IF_LOCK_ASSERT(ifq) mtx_assert(&(ifq)->ifq_mtx, MA_OWNED)
70 #define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
71 #define _IF_DROP(ifq) ((ifq)->ifq_drops++)
72 #define _IF_QLEN(ifq) ((ifq)->ifq_len)
74 #define _IF_ENQUEUE(ifq, m) do { \
75 (m)->m_nextpkt = NULL; \
76 if ((ifq)->ifq_tail == NULL) \
77 (ifq)->ifq_head = m; \
79 (ifq)->ifq_tail->m_nextpkt = m; \
80 (ifq)->ifq_tail = m; \
84 #define IF_ENQUEUE(ifq, m) do { \
86 _IF_ENQUEUE(ifq, m); \
90 #define _IF_PREPEND(ifq, m) do { \
91 (m)->m_nextpkt = (ifq)->ifq_head; \
92 if ((ifq)->ifq_tail == NULL) \
93 (ifq)->ifq_tail = (m); \
94 (ifq)->ifq_head = (m); \
98 #define IF_PREPEND(ifq, m) do { \
100 _IF_PREPEND(ifq, m); \
104 #define _IF_DEQUEUE(ifq, m) do { \
105 (m) = (ifq)->ifq_head; \
107 if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \
108 (ifq)->ifq_tail = NULL; \
109 (m)->m_nextpkt = NULL; \
114 #define IF_DEQUEUE(ifq, m) do { \
116 _IF_DEQUEUE(ifq, m); \
120 #define _IF_DEQUEUE_ALL(ifq, m) do { \
121 (m) = (ifq)->ifq_head; \
122 (ifq)->ifq_head = (ifq)->ifq_tail = NULL; \
123 (ifq)->ifq_len = 0; \
126 #define IF_DEQUEUE_ALL(ifq, m) do { \
128 _IF_DEQUEUE_ALL(ifq, m); \
132 #define _IF_POLL(ifq, m) ((m) = (ifq)->ifq_head)
133 #define IF_POLL(ifq, m) _IF_POLL(ifq, m)
135 #define _IF_DRAIN(ifq) do { \
138 _IF_DEQUEUE(ifq, m); \
145 #define IF_DRAIN(ifq) do { \
151 int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp,
153 #define IF_HANDOFF(ifq, m, ifp) \
154 if_handoff((struct ifqueue *)ifq, m, ifp, 0)
155 #define IF_HANDOFF_ADJ(ifq, m, ifp, adj) \
156 if_handoff((struct ifqueue *)ifq, m, ifp, adj)
158 void if_start(struct ifnet *);
160 #define IFQ_ENQUEUE(ifq, m, err) \
163 if (ALTQ_IS_ENABLED(ifq)) \
164 ALTQ_ENQUEUE(ifq, m, NULL, err); \
166 if (_IF_QFULL(ifq)) { \
170 _IF_ENQUEUE(ifq, m); \
175 (ifq)->ifq_drops++; \
179 #define IFQ_DEQUEUE_NOLOCK(ifq, m) \
181 if (TBR_IS_ENABLED(ifq)) \
182 (m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE); \
183 else if (ALTQ_IS_ENABLED(ifq)) \
184 ALTQ_DEQUEUE(ifq, m); \
186 _IF_DEQUEUE(ifq, m); \
189 #define IFQ_DEQUEUE(ifq, m) \
192 IFQ_DEQUEUE_NOLOCK(ifq, m); \
196 #define IFQ_POLL_NOLOCK(ifq, m) \
198 if (TBR_IS_ENABLED(ifq)) \
199 (m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL); \
200 else if (ALTQ_IS_ENABLED(ifq)) \
206 #define IFQ_POLL(ifq, m) \
209 IFQ_POLL_NOLOCK(ifq, m); \
213 #define IFQ_PURGE_NOLOCK(ifq) \
215 if (ALTQ_IS_ENABLED(ifq)) { \
221 #define IFQ_PURGE(ifq) \
224 IFQ_PURGE_NOLOCK(ifq); \
228 #define IFQ_SET_READY(ifq) \
229 do { ((ifq)->altq_flags |= ALTQF_READY); } while (0)
231 #define IFQ_LOCK(ifq) IF_LOCK(ifq)
232 #define IFQ_UNLOCK(ifq) IF_UNLOCK(ifq)
233 #define IFQ_LOCK_ASSERT(ifq) IF_LOCK_ASSERT(ifq)
234 #define IFQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0)
235 #define IFQ_INC_LEN(ifq) ((ifq)->ifq_len++)
236 #define IFQ_DEC_LEN(ifq) (--(ifq)->ifq_len)
237 #define IFQ_INC_DROPS(ifq) ((ifq)->ifq_drops++)
238 #define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen = (len))
241 * The IFF_DRV_OACTIVE test should really occur in the device driver, not in
242 * the handoff logic, as that flag is locked by the device driver.
244 #define IFQ_HANDOFF_ADJ(ifp, m, adj, err) \
249 len = (m)->m_pkthdr.len; \
250 mflags = (m)->m_flags; \
251 IFQ_ENQUEUE(&(ifp)->if_snd, m, err); \
253 (ifp)->if_obytes += len + (adj); \
254 if (mflags & M_MCAST) \
255 (ifp)->if_omcasts++; \
256 if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0) \
261 #define IFQ_HANDOFF(ifp, m, err) \
262 IFQ_HANDOFF_ADJ(ifp, m, 0, err)
264 #define IFQ_DRV_DEQUEUE(ifq, m) \
266 (m) = (ifq)->ifq_drv_head; \
268 if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL) \
269 (ifq)->ifq_drv_tail = NULL; \
270 (m)->m_nextpkt = NULL; \
271 (ifq)->ifq_drv_len--; \
274 IFQ_DEQUEUE_NOLOCK(ifq, m); \
275 while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) { \
277 IFQ_DEQUEUE_NOLOCK(ifq, m0); \
280 m0->m_nextpkt = NULL; \
281 if ((ifq)->ifq_drv_tail == NULL) \
282 (ifq)->ifq_drv_head = m0; \
284 (ifq)->ifq_drv_tail->m_nextpkt = m0; \
285 (ifq)->ifq_drv_tail = m0; \
286 (ifq)->ifq_drv_len++; \
292 #define IFQ_DRV_PREPEND(ifq, m) \
294 (m)->m_nextpkt = (ifq)->ifq_drv_head; \
295 if ((ifq)->ifq_drv_tail == NULL) \
296 (ifq)->ifq_drv_tail = (m); \
297 (ifq)->ifq_drv_head = (m); \
298 (ifq)->ifq_drv_len++; \
301 #define IFQ_DRV_IS_EMPTY(ifq) \
302 (((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0))
304 #define IFQ_DRV_PURGE(ifq) \
306 struct mbuf *m, *n = (ifq)->ifq_drv_head; \
307 while((m = n) != NULL) { \
311 (ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL; \
312 (ifq)->ifq_drv_len = 0; \
317 drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m)
322 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
323 IFQ_ENQUEUE(&ifp->if_snd, m, error);
327 error = buf_ring_enqueue(br, m);
335 drbr_putback(struct ifnet *ifp, struct buf_ring *br, struct mbuf *new)
338 * The top of the list needs to be swapped
342 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
344 * Peek in altq case dequeued it
347 IFQ_DRV_PREPEND(&ifp->if_snd, new);
351 buf_ring_putback_sc(br, new);
354 static __inline struct mbuf *
355 drbr_peek(struct ifnet *ifp, struct buf_ring *br)
359 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
361 * Pull it off like a dequeue
362 * since drbr_advance() does nothing
363 * for altq and drbr_putback() will
364 * use the old prepend function.
366 IFQ_DEQUEUE(&ifp->if_snd, m);
370 return(buf_ring_peek(br));
374 drbr_flush(struct ifnet *ifp, struct buf_ring *br)
379 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
380 IFQ_PURGE(&ifp->if_snd);
382 while ((m = buf_ring_dequeue_sc(br)) != NULL)
387 drbr_free(struct buf_ring *br, struct malloc_type *type)
390 drbr_flush(NULL, br);
391 buf_ring_free(br, type);
394 static __inline struct mbuf *
395 drbr_dequeue(struct ifnet *ifp, struct buf_ring *br)
400 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
401 IFQ_DEQUEUE(&ifp->if_snd, m);
405 return (buf_ring_dequeue_sc(br));
409 drbr_advance(struct ifnet *ifp, struct buf_ring *br)
412 /* Nothing to do here since peek dequeues in altq case */
413 if (ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd))
416 return (buf_ring_advance_sc(br));
420 static __inline struct mbuf *
421 drbr_dequeue_cond(struct ifnet *ifp, struct buf_ring *br,
422 int (*func) (struct mbuf *, void *), void *arg)
426 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
427 IFQ_LOCK(&ifp->if_snd);
428 IFQ_POLL_NOLOCK(&ifp->if_snd, m);
429 if (m != NULL && func(m, arg) == 0) {
430 IFQ_UNLOCK(&ifp->if_snd);
433 IFQ_DEQUEUE_NOLOCK(&ifp->if_snd, m);
434 IFQ_UNLOCK(&ifp->if_snd);
438 m = buf_ring_peek(br);
439 if (m == NULL || func(m, arg) == 0)
442 return (buf_ring_dequeue_sc(br));
446 drbr_empty(struct ifnet *ifp, struct buf_ring *br)
449 if (ALTQ_IS_ENABLED(&ifp->if_snd))
450 return (IFQ_IS_EMPTY(&ifp->if_snd));
452 return (buf_ring_empty(br));
456 drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
459 if (ALTQ_IS_ENABLED(&ifp->if_snd))
462 return (!buf_ring_empty(br));
466 drbr_inuse(struct ifnet *ifp, struct buf_ring *br)
469 if (ALTQ_IS_ENABLED(&ifp->if_snd))
470 return (ifp->if_snd.ifq_len);
472 return (buf_ring_count(br));
475 extern int ifqmaxlen;
477 void if_qflush(struct ifnet *);
478 void ifq_init(struct ifaltq *, struct ifnet *ifp);
479 void ifq_delete(struct ifaltq *);
481 #ifdef DEVICE_POLLING
482 enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS };
484 typedef int poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count);
485 int ether_poll_register(poll_handler_t *h, struct ifnet *ifp);
486 int ether_poll_deregister(struct ifnet *ifp);
487 /* The following should be temporary, till all drivers use the driver API */
488 typedef int poll_handler_drv_t(if_t ifh, enum poll_cmd cmd, int count);
489 int ether_poll_register_drv(poll_handler_drv_t *h, if_t ifh);
490 int ether_poll_deregister_drv(if_t ifh);
491 #endif /* DEVICE_POLLING */
494 #endif /* !_NET_IFQ_H_ */