2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2007-2009 Robert N. M. Watson
5 * Copyright (c) 2010-2011 Juniper Networks, Inc.
8 * This software was developed by Robert N. M. Watson under contract
9 * to Juniper Networks, Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 * netisr is a packet dispatch service, allowing synchronous (directly
38 * dispatched) and asynchronous (deferred dispatch) processing of packets by
39 * registered protocol handlers. Callers pass a protocol identifier and
40 * packet to netisr, along with a direct dispatch hint, and work will either
41 * be immediately processed by the registered handler, or passed to a
42 * software interrupt (SWI) thread for deferred dispatch. Callers will
43 * generally select one or the other based on:
45 * - Whether directly dispatching a netisr handler lead to code reentrance or
46 * lock recursion, such as entering the socket code from the socket code.
47 * - Whether directly dispatching a netisr handler lead to recursive
48 * processing, such as when decapsulating several wrapped layers of tunnel
49 * information (IPSEC within IPSEC within ...).
51 * Maintaining ordering for protocol streams is a critical design concern.
52 * Enforcing ordering limits the opportunity for concurrency, but maintains
53 * the strong ordering requirements found in some protocols, such as TCP. Of
54 * related concern is CPU affinity--it is desirable to process all data
55 * associated with a particular stream on the same CPU over time in order to
56 * avoid acquiring locks associated with the connection on different CPUs,
57 * keep connection data in one cache, and to generally encourage associated
58 * user threads to live on the same CPU as the stream. It's also desirable
59 * to avoid lock migration and contention where locks are associated with
62 * netisr supports several policy variations, represented by the
63 * NETISR_POLICY_* constants, allowing protocols to play various roles in
64 * identifying flows, assigning work to CPUs, etc. These are described in
69 #include "opt_device_polling.h"
71 #include <sys/param.h>
73 #include <sys/kernel.h>
74 #include <sys/kthread.h>
75 #include <sys/malloc.h>
76 #include <sys/interrupt.h>
79 #include <sys/mutex.h>
82 #include <sys/rmlock.h>
83 #include <sys/sched.h>
85 #include <sys/socket.h>
86 #include <sys/sysctl.h>
87 #include <sys/systm.h>
93 #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */
95 #include <net/if_var.h>
96 #include <net/netisr.h>
97 #include <net/netisr_internal.h>
101 * Synchronize use and modification of the registered netisr data structures;
102 * acquire a read lock while modifying the set of registered protocols to
103 * prevent partially registered or unregistered protocols from being run.
105 * The following data structures and fields are protected by this lock:
107 * - The netisr_proto array, including all fields of struct netisr_proto.
108 * - The nws array, including all fields of struct netisr_worker.
109 * - The nws_array array.
111 * Note: the NETISR_LOCKING define controls whether read locks are acquired
112 * in packet processing paths requiring netisr registration stability. This
113 * is disabled by default as it can lead to measurable performance
114 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
115 * because netisr registration and unregistration is extremely rare at
116 * runtime. If it becomes more common, this decision should be revisited.
118 * XXXRW: rmlocks don't support assertions.
120 static struct rmlock netisr_rmlock;
121 #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \
123 #define NETISR_LOCK_ASSERT()
124 #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker))
125 #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker))
126 #define NETISR_WLOCK() rm_wlock(&netisr_rmlock)
127 #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock)
128 /* #define NETISR_LOCKING */
130 static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
134 * Three global direct dispatch policies are supported:
136 * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of
137 * context (may be overriden by protocols).
139 * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch,
140 * and we're running on the CPU the work would be performed on, then direct
141 * dispatch it if it wouldn't violate ordering constraints on the workstream.
143 * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch,
144 * always direct dispatch. (The default.)
146 * Notice that changing the global policy could lead to short periods of
147 * misordered processing, but this is considered acceptable as compared to
148 * the complexity of enforcing ordering during policy changes. Protocols can
149 * override the global policy (when they're not doing that, they select
150 * NETISR_DISPATCH_DEFAULT).
152 #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT
153 #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */
154 static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT;
155 static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS);
156 SYSCTL_PROC(_net_isr, OID_AUTO, dispatch,
157 CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
158 0, 0, sysctl_netisr_dispatch_policy, "A",
159 "netisr dispatch policy");
162 * Allow the administrator to limit the number of threads (CPUs) to use for
163 * netisr. We don't check netisr_maxthreads before creating the thread for
164 * CPU 0. This must be set at boot. We will create at most one thread per CPU.
165 * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and
166 * therefore only 1 workstream. If set to -1, netisr would use all cpus
167 * (mp_ncpus) and therefore would have those many workstreams. One workstream
170 static int netisr_maxthreads = 1; /* Max number of threads. */
171 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
172 &netisr_maxthreads, 0,
173 "Use at most this many CPUs for netisr processing");
175 static int netisr_bindthreads = 0; /* Bind threads to CPUs. */
176 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
177 &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
180 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
181 * both for initial configuration and later modification using
182 * netisr_setqlimit().
184 #define NETISR_DEFAULT_MAXQLIMIT 10240
185 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
186 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
187 &netisr_maxqlimit, 0,
188 "Maximum netisr per-protocol, per-CPU queue depth.");
191 * The default per-workstream mbuf queue limit for protocols that don't
192 * initialize the nh_qlimit field of their struct netisr_handler. If this is
193 * set above netisr_maxqlimit, we truncate it to the maximum during boot.
195 #define NETISR_DEFAULT_DEFAULTQLIMIT 256
196 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
197 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
198 &netisr_defaultqlimit, 0,
199 "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
202 * Store and export the compile-time constant NETISR_MAXPROT limit on the
203 * number of protocols that can register with netisr at a time. This is
204 * required for crashdump analysis, as it sizes netisr_proto[].
206 static u_int netisr_maxprot = NETISR_MAXPROT;
207 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
209 "Compile-time limit on the number of protocols supported by netisr.");
212 * The netisr_proto array describes all registered protocols, indexed by
213 * protocol number. See netisr_internal.h for more details.
215 static struct netisr_proto netisr_proto[NETISR_MAXPROT];
219 * The netisr_enable array describes a per-VNET flag for registered
220 * protocols on whether this netisr is active in this VNET or not.
221 * netisr_register() will automatically enable the netisr for the
222 * default VNET and all currently active instances.
223 * netisr_unregister() will disable all active VNETs, including vnet0.
224 * Individual network stack instances can be enabled/disabled by the
225 * netisr_(un)register _vnet() functions.
226 * With this we keep the one netisr_proto per protocol but add a
227 * mechanism to stop netisr processing for vnet teardown.
228 * Apart from that we expect a VNET to always be enabled.
230 VNET_DEFINE_STATIC(u_int, netisr_enable[NETISR_MAXPROT]);
231 #define V_netisr_enable VNET(netisr_enable)
235 * Per-CPU workstream data. See netisr_internal.h for more details.
237 DPCPU_DEFINE(struct netisr_workstream, nws);
240 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
241 * accessing workstreams. This allows constructions of the form
242 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
244 static u_int nws_array[MAXCPU];
247 * Number of registered workstreams. Will be at most the number of running
248 * CPUs once fully started.
250 static u_int nws_count;
251 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
252 &nws_count, 0, "Number of extant netisr threads.");
255 * Synchronization for each workstream: a mutex protects all mutable fields
256 * in each stream, including per-protocol state (mbuf queues). The SWI is
257 * woken up if asynchronous dispatch is required.
259 #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx)
260 #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED)
261 #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx)
262 #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0)
265 * Utility routines for protocols that implement their own mapping of flows
269 netisr_get_cpucount(void)
276 netisr_get_cpuid(u_int cpunumber)
279 return (nws_array[cpunumber % nws_count]);
283 * The default implementation of flow -> CPU ID mapping.
285 * Non-static so that protocols can use it to map their own work to specific
286 * CPUs in a manner consistent to netisr for affinity purposes.
289 netisr_default_flow2cpu(u_int flowid)
292 return (nws_array[flowid % nws_count]);
296 * Dispatch tunable and sysctl configuration.
298 struct netisr_dispatch_table_entry {
300 const char *ndte_policy_str;
302 static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = {
303 { NETISR_DISPATCH_DEFAULT, "default" },
304 { NETISR_DISPATCH_DEFERRED, "deferred" },
305 { NETISR_DISPATCH_HYBRID, "hybrid" },
306 { NETISR_DISPATCH_DIRECT, "direct" },
310 netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer,
313 const struct netisr_dispatch_table_entry *ndtep;
318 for (i = 0; i < nitems(netisr_dispatch_table); i++) {
319 ndtep = &netisr_dispatch_table[i];
320 if (ndtep->ndte_policy == dispatch_policy) {
321 str = ndtep->ndte_policy_str;
325 snprintf(buffer, buflen, "%s", str);
329 netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp)
331 const struct netisr_dispatch_table_entry *ndtep;
334 for (i = 0; i < nitems(netisr_dispatch_table); i++) {
335 ndtep = &netisr_dispatch_table[i];
336 if (strcmp(ndtep->ndte_policy_str, str) == 0) {
337 *dispatch_policyp = ndtep->ndte_policy;
345 sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS)
347 char tmp[NETISR_DISPATCH_POLICY_MAXSTR];
349 u_int dispatch_policy;
352 netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp,
355 * netisr is initialised very early during the boot when malloc isn't
356 * available yet so we can't use sysctl_handle_string() to process
357 * any non-default value that was potentially set via loader.
359 if (req->newptr != NULL) {
360 len = req->newlen - req->newidx;
361 if (len >= NETISR_DISPATCH_POLICY_MAXSTR)
363 error = SYSCTL_IN(req, tmp, len);
366 error = netisr_dispatch_policy_from_str(tmp,
369 dispatch_policy == NETISR_DISPATCH_DEFAULT)
372 netisr_dispatch_policy = dispatch_policy;
375 error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req);
381 * Register a new netisr handler, which requires initializing per-protocol
382 * fields for each workstream. All netisr work is briefly suspended while
383 * the protocol is installed.
386 netisr_register(const struct netisr_handler *nhp)
388 VNET_ITERATOR_DECL(vnet_iter);
389 struct netisr_work *npwp;
393 proto = nhp->nh_proto;
397 * Test that the requested registration is valid.
399 KASSERT(nhp->nh_name != NULL,
400 ("%s: nh_name NULL for %u", __func__, proto));
401 KASSERT(nhp->nh_handler != NULL,
402 ("%s: nh_handler NULL for %s", __func__, name));
403 KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
404 nhp->nh_policy == NETISR_POLICY_FLOW ||
405 nhp->nh_policy == NETISR_POLICY_CPU,
406 ("%s: unsupported nh_policy %u for %s", __func__,
407 nhp->nh_policy, name));
408 KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
409 nhp->nh_m2flow == NULL,
410 ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
412 KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
413 ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
415 KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
416 ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
418 KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT ||
419 nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED ||
420 nhp->nh_dispatch == NETISR_DISPATCH_HYBRID ||
421 nhp->nh_dispatch == NETISR_DISPATCH_DIRECT,
422 ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch));
424 KASSERT(proto < NETISR_MAXPROT,
425 ("%s(%u, %s): protocol too big", __func__, proto, name));
428 * Test that no existing registration exists for this protocol.
431 KASSERT(netisr_proto[proto].np_name == NULL,
432 ("%s(%u, %s): name present", __func__, proto, name));
433 KASSERT(netisr_proto[proto].np_handler == NULL,
434 ("%s(%u, %s): handler present", __func__, proto, name));
436 netisr_proto[proto].np_name = name;
437 netisr_proto[proto].np_handler = nhp->nh_handler;
438 netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
439 netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
440 netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
441 if (nhp->nh_qlimit == 0)
442 netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
443 else if (nhp->nh_qlimit > netisr_maxqlimit) {
444 printf("%s: %s requested queue limit %u capped to "
445 "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
447 netisr_proto[proto].np_qlimit = netisr_maxqlimit;
449 netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
450 netisr_proto[proto].np_policy = nhp->nh_policy;
451 netisr_proto[proto].np_dispatch = nhp->nh_dispatch;
453 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
454 bzero(npwp, sizeof(*npwp));
455 npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
460 * Test that we are in vnet0 and have a curvnet set.
462 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__));
463 KASSERT(IS_DEFAULT_VNET(curvnet), ("%s: curvnet %p is not vnet0 %p",
464 __func__, curvnet, vnet0));
465 VNET_LIST_RLOCK_NOSLEEP();
466 VNET_FOREACH(vnet_iter) {
467 CURVNET_SET(vnet_iter);
468 V_netisr_enable[proto] = 1;
471 VNET_LIST_RUNLOCK_NOSLEEP();
477 * Clear drop counters across all workstreams for a protocol.
480 netisr_clearqdrops(const struct netisr_handler *nhp)
482 struct netisr_work *npwp;
488 proto = nhp->nh_proto;
492 KASSERT(proto < NETISR_MAXPROT,
493 ("%s(%u): protocol too big for %s", __func__, proto, name));
496 KASSERT(netisr_proto[proto].np_handler != NULL,
497 ("%s(%u): protocol not registered for %s", __func__, proto,
501 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
508 * Query current drop counters across all workstreams for a protocol.
511 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
513 struct netisr_work *npwp;
514 struct rm_priotracker tracker;
521 proto = nhp->nh_proto;
525 KASSERT(proto < NETISR_MAXPROT,
526 ("%s(%u): protocol too big for %s", __func__, proto, name));
528 NETISR_RLOCK(&tracker);
529 KASSERT(netisr_proto[proto].np_handler != NULL,
530 ("%s(%u): protocol not registered for %s", __func__, proto,
534 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
535 *qdropp += npwp->nw_qdrops;
537 NETISR_RUNLOCK(&tracker);
541 * Query current per-workstream queue limit for a protocol.
544 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
546 struct rm_priotracker tracker;
552 proto = nhp->nh_proto;
556 KASSERT(proto < NETISR_MAXPROT,
557 ("%s(%u): protocol too big for %s", __func__, proto, name));
559 NETISR_RLOCK(&tracker);
560 KASSERT(netisr_proto[proto].np_handler != NULL,
561 ("%s(%u): protocol not registered for %s", __func__, proto,
563 *qlimitp = netisr_proto[proto].np_qlimit;
564 NETISR_RUNLOCK(&tracker);
568 * Update the queue limit across per-workstream queues for a protocol. We
569 * simply change the limits, and don't drain overflowed packets as they will
570 * (hopefully) take care of themselves shortly.
573 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
575 struct netisr_work *npwp;
581 if (qlimit > netisr_maxqlimit)
584 proto = nhp->nh_proto;
588 KASSERT(proto < NETISR_MAXPROT,
589 ("%s(%u): protocol too big for %s", __func__, proto, name));
592 KASSERT(netisr_proto[proto].np_handler != NULL,
593 ("%s(%u): protocol not registered for %s", __func__, proto,
596 netisr_proto[proto].np_qlimit = qlimit;
598 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
599 npwp->nw_qlimit = qlimit;
606 * Drain all packets currently held in a particular protocol work queue.
609 netisr_drain_proto(struct netisr_work *npwp)
614 * We would assert the lock on the workstream but it's not passed in.
616 while ((m = npwp->nw_head) != NULL) {
617 npwp->nw_head = m->m_nextpkt;
619 if (npwp->nw_head == NULL)
620 npwp->nw_tail = NULL;
624 KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
625 KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
629 * Remove the registration of a network protocol, which requires clearing
630 * per-protocol fields across all workstreams, including freeing all mbufs in
631 * the queues at time of unregister. All work in netisr is briefly suspended
632 * while this takes place.
635 netisr_unregister(const struct netisr_handler *nhp)
637 VNET_ITERATOR_DECL(vnet_iter);
638 struct netisr_work *npwp;
644 proto = nhp->nh_proto;
648 KASSERT(proto < NETISR_MAXPROT,
649 ("%s(%u): protocol too big for %s", __func__, proto, name));
652 KASSERT(netisr_proto[proto].np_handler != NULL,
653 ("%s(%u): protocol not registered for %s", __func__, proto,
657 VNET_LIST_RLOCK_NOSLEEP();
658 VNET_FOREACH(vnet_iter) {
659 CURVNET_SET(vnet_iter);
660 V_netisr_enable[proto] = 0;
663 VNET_LIST_RUNLOCK_NOSLEEP();
666 netisr_proto[proto].np_name = NULL;
667 netisr_proto[proto].np_handler = NULL;
668 netisr_proto[proto].np_m2flow = NULL;
669 netisr_proto[proto].np_m2cpuid = NULL;
670 netisr_proto[proto].np_qlimit = 0;
671 netisr_proto[proto].np_policy = 0;
673 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
674 netisr_drain_proto(npwp);
675 bzero(npwp, sizeof(*npwp));
682 netisr_register_vnet(const struct netisr_handler *nhp)
686 proto = nhp->nh_proto;
688 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__));
689 KASSERT(proto < NETISR_MAXPROT,
690 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name));
692 KASSERT(netisr_proto[proto].np_handler != NULL,
693 ("%s(%u): protocol not registered for %s", __func__, proto,
696 V_netisr_enable[proto] = 1;
701 netisr_drain_proto_vnet(struct vnet *vnet, u_int proto)
703 struct netisr_workstream *nwsp;
704 struct netisr_work *npwp;
705 struct mbuf *m, *mp, *n, *ne;
708 KASSERT(vnet != NULL, ("%s: vnet is NULL", __func__));
709 NETISR_LOCK_ASSERT();
712 nwsp = DPCPU_ID_PTR(i, nws);
713 if (nwsp->nws_intr_event == NULL)
715 npwp = &nwsp->nws_work[proto];
719 * Rather than dissecting and removing mbufs from the middle
720 * of the chain, we build a new chain if the packet stays and
721 * update the head and tail pointers at the end. All packets
722 * matching the given vnet are freed.
729 mp->m_nextpkt = NULL;
730 if (mp->m_pkthdr.rcvif->if_vnet != vnet) {
739 /* This is a packet in the selected vnet. Free it. */
750 netisr_unregister_vnet(const struct netisr_handler *nhp)
754 proto = nhp->nh_proto;
756 KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__));
757 KASSERT(proto < NETISR_MAXPROT,
758 ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name));
760 KASSERT(netisr_proto[proto].np_handler != NULL,
761 ("%s(%u): protocol not registered for %s", __func__, proto,
764 V_netisr_enable[proto] = 0;
766 netisr_drain_proto_vnet(curvnet, proto);
772 * Compose the global and per-protocol policies on dispatch, and return the
773 * dispatch policy to use.
776 netisr_get_dispatch(struct netisr_proto *npp)
780 * Protocol-specific configuration overrides the global default.
782 if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT)
783 return (npp->np_dispatch);
784 return (netisr_dispatch_policy);
788 * Look up the workstream given a packet and source identifier. Do this by
789 * checking the protocol's policy, and optionally call out to the protocol
790 * for assistance if required.
793 netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy,
794 uintptr_t source, struct mbuf *m, u_int *cpuidp)
799 NETISR_LOCK_ASSERT();
802 * In the event we have only one worker, shortcut and deliver to it
803 * without further ado.
805 if (nws_count == 1) {
806 *cpuidp = nws_array[0];
811 * What happens next depends on the policy selected by the protocol.
812 * If we want to support per-interface policies, we should do that
815 policy = npp->np_policy;
816 if (policy == NETISR_POLICY_CPU) {
817 m = npp->np_m2cpuid(m, source, cpuidp);
822 * It's possible for a protocol not to have a good idea about
823 * where to process a packet, in which case we fall back on
824 * the netisr code to decide. In the hybrid case, return the
825 * current CPU ID, which will force an immediate direct
826 * dispatch. In the queued case, fall back on the SOURCE
829 if (*cpuidp != NETISR_CPUID_NONE) {
830 *cpuidp = netisr_get_cpuid(*cpuidp);
833 if (dispatch_policy == NETISR_DISPATCH_HYBRID) {
834 *cpuidp = netisr_get_cpuid(curcpu);
837 policy = NETISR_POLICY_SOURCE;
840 if (policy == NETISR_POLICY_FLOW) {
841 if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE &&
842 npp->np_m2flow != NULL) {
843 m = npp->np_m2flow(m, source);
847 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
849 netisr_default_flow2cpu(m->m_pkthdr.flowid);
852 policy = NETISR_POLICY_SOURCE;
855 KASSERT(policy == NETISR_POLICY_SOURCE,
856 ("%s: invalid policy %u for %s", __func__, npp->np_policy,
859 MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
860 ifp = m->m_pkthdr.rcvif;
862 *cpuidp = nws_array[(ifp->if_index + source) % nws_count];
864 *cpuidp = nws_array[source % nws_count];
869 * Process packets associated with a workstream and protocol. For reasons of
870 * fairness, we process up to one complete netisr queue at a time, moving the
871 * queue to a stack-local queue for processing, but do not loop refreshing
872 * from the global queue. The caller is responsible for deciding whether to
873 * loop, and for setting the NWS_RUNNING flag. The passed workstream will be
874 * locked on entry and relocked before return, but will be released while
875 * processing. The number of packets processed is returned.
878 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
880 struct netisr_work local_npw, *npwp;
884 NETISR_LOCK_ASSERT();
885 NWS_LOCK_ASSERT(nwsp);
887 KASSERT(nwsp->nws_flags & NWS_RUNNING,
888 ("%s(%u): not running", __func__, proto));
889 KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
890 ("%s(%u): invalid proto\n", __func__, proto));
892 npwp = &nwsp->nws_work[proto];
893 if (npwp->nw_len == 0)
897 * Move the global work queue to a thread-local work queue.
899 * Notice that this means the effective maximum length of the queue
900 * is actually twice that of the maximum queue length specified in
901 * the protocol registration call.
903 handled = npwp->nw_len;
905 npwp->nw_head = NULL;
906 npwp->nw_tail = NULL;
908 nwsp->nws_pendingbits &= ~(1 << proto);
910 while ((m = local_npw.nw_head) != NULL) {
911 local_npw.nw_head = m->m_nextpkt;
913 if (local_npw.nw_head == NULL)
914 local_npw.nw_tail = NULL;
916 VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
917 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
918 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
919 netisr_proto[proto].np_handler(m);
922 KASSERT(local_npw.nw_len == 0,
923 ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
924 if (netisr_proto[proto].np_drainedcpu)
925 netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
927 npwp->nw_handled += handled;
932 * SWI handler for netisr -- processes packets in a set of workstreams that
933 * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already
934 * being direct dispatched, go back to sleep and wait for the dispatching
935 * thread to wake us up again.
940 #ifdef NETISR_LOCKING
941 struct rm_priotracker tracker;
943 struct netisr_workstream *nwsp;
948 #ifdef DEVICE_POLLING
949 KASSERT(nws_count == 1,
950 ("%s: device_polling but nws_count != 1", __func__));
953 #ifdef NETISR_LOCKING
954 NETISR_RLOCK(&tracker);
957 KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
958 if (nwsp->nws_flags & NWS_DISPATCHING)
960 nwsp->nws_flags |= NWS_RUNNING;
961 nwsp->nws_flags &= ~NWS_SCHEDULED;
962 while ((bits = nwsp->nws_pendingbits) != 0) {
963 while ((prot = ffs(bits)) != 0) {
965 bits &= ~(1 << prot);
966 (void)netisr_process_workstream_proto(nwsp, prot);
969 nwsp->nws_flags &= ~NWS_RUNNING;
972 #ifdef NETISR_LOCKING
973 NETISR_RUNLOCK(&tracker);
975 #ifdef DEVICE_POLLING
981 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
982 struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
985 NWS_LOCK_ASSERT(nwsp);
988 if (npwp->nw_len < npwp->nw_qlimit) {
990 if (npwp->nw_head == NULL) {
994 npwp->nw_tail->m_nextpkt = m;
998 if (npwp->nw_len > npwp->nw_watermark)
999 npwp->nw_watermark = npwp->nw_len;
1002 * We must set the bit regardless of NWS_RUNNING, so that
1003 * swi_net() keeps calling netisr_process_workstream_proto().
1005 nwsp->nws_pendingbits |= (1 << proto);
1006 if (!(nwsp->nws_flags &
1007 (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
1008 nwsp->nws_flags |= NWS_SCHEDULED;
1009 *dosignalp = 1; /* Defer until unlocked. */
1021 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
1023 struct netisr_workstream *nwsp;
1024 struct netisr_work *npwp;
1025 int dosignal, error;
1027 #ifdef NETISR_LOCKING
1028 NETISR_LOCK_ASSERT();
1030 KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
1032 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1036 nwsp = DPCPU_ID_PTR(cpuid, nws);
1037 npwp = &nwsp->nws_work[proto];
1039 error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
1047 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
1049 #ifdef NETISR_LOCKING
1050 struct rm_priotracker tracker;
1055 KASSERT(proto < NETISR_MAXPROT,
1056 ("%s: invalid proto %u", __func__, proto));
1058 #ifdef NETISR_LOCKING
1059 NETISR_RLOCK(&tracker);
1061 KASSERT(netisr_proto[proto].np_handler != NULL,
1062 ("%s: invalid proto %u", __func__, proto));
1065 if (V_netisr_enable[proto] == 0) {
1067 return (ENOPROTOOPT);
1071 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED,
1074 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
1076 VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
1077 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
1078 error = netisr_queue_internal(proto, m, cpuid);
1081 #ifdef NETISR_LOCKING
1082 NETISR_RUNLOCK(&tracker);
1088 netisr_queue(u_int proto, struct mbuf *m)
1091 return (netisr_queue_src(proto, 0, m));
1095 * Dispatch a packet for netisr processing; direct dispatch is permitted by
1099 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
1101 #ifdef NETISR_LOCKING
1102 struct rm_priotracker tracker;
1104 struct netisr_workstream *nwsp;
1105 struct netisr_proto *npp;
1106 struct netisr_work *npwp;
1107 int dosignal, error;
1108 u_int cpuid, dispatch_policy;
1111 KASSERT(proto < NETISR_MAXPROT,
1112 ("%s: invalid proto %u", __func__, proto));
1113 #ifdef NETISR_LOCKING
1114 NETISR_RLOCK(&tracker);
1116 npp = &netisr_proto[proto];
1117 KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__,
1121 if (V_netisr_enable[proto] == 0) {
1123 return (ENOPROTOOPT);
1127 dispatch_policy = netisr_get_dispatch(npp);
1128 if (dispatch_policy == NETISR_DISPATCH_DEFERRED)
1129 return (netisr_queue_src(proto, source, m));
1132 * If direct dispatch is forced, then unconditionally dispatch
1133 * without a formal CPU selection. Borrow the current CPU's stats,
1134 * even if there's no worker on it. In this case we don't update
1135 * nws_flags because all netisr processing will be source ordered due
1136 * to always being forced to directly dispatch.
1138 if (dispatch_policy == NETISR_DISPATCH_DIRECT) {
1139 nwsp = DPCPU_PTR(nws);
1140 npwp = &nwsp->nws_work[proto];
1141 npwp->nw_dispatched++;
1143 netisr_proto[proto].np_handler(m);
1148 KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID,
1149 ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy));
1152 * Otherwise, we execute in a hybrid mode where we will try to direct
1153 * dispatch if we're on the right CPU and the netisr worker isn't
1157 m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID,
1163 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1164 if (cpuid != curcpu)
1165 goto queue_fallback;
1166 nwsp = DPCPU_PTR(nws);
1167 npwp = &nwsp->nws_work[proto];
1170 * We are willing to direct dispatch only if three conditions hold:
1172 * (1) The netisr worker isn't already running,
1173 * (2) Another thread isn't already directly dispatching, and
1174 * (3) The netisr hasn't already been woken up.
1177 if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
1178 error = netisr_queue_workstream(nwsp, proto, npwp, m,
1187 * The current thread is now effectively the netisr worker, so set
1188 * the dispatching flag to prevent concurrent processing of the
1189 * stream from another thread (even the netisr worker), which could
1190 * otherwise lead to effective misordering of the stream.
1192 nwsp->nws_flags |= NWS_DISPATCHING;
1194 netisr_proto[proto].np_handler(m);
1196 nwsp->nws_flags &= ~NWS_DISPATCHING;
1198 npwp->nw_hybrid_dispatched++;
1201 * If other work was enqueued by another thread while we were direct
1202 * dispatching, we need to signal the netisr worker to do that work.
1203 * In the future, we might want to do some of that work in the
1204 * current thread, rather than trigger further context switches. If
1205 * so, we'll want to establish a reasonable bound on the work done in
1206 * the "borrowed" context.
1208 if (nwsp->nws_pendingbits != 0) {
1209 nwsp->nws_flags |= NWS_SCHEDULED;
1220 error = netisr_queue_internal(proto, m, cpuid);
1224 #ifdef NETISR_LOCKING
1225 NETISR_RUNLOCK(&tracker);
1231 netisr_dispatch(u_int proto, struct mbuf *m)
1234 return (netisr_dispatch_src(proto, 0, m));
1237 #ifdef DEVICE_POLLING
1239 * Kernel polling borrows a netisr thread to run interface polling in; this
1240 * function allows kernel polling to request that the netisr thread be
1241 * scheduled even if no packets are pending for protocols.
1244 netisr_sched_poll(void)
1246 struct netisr_workstream *nwsp;
1248 nwsp = DPCPU_ID_PTR(nws_array[0], nws);
1254 netisr_start_swi(u_int cpuid, struct pcpu *pc)
1257 struct netisr_workstream *nwsp;
1260 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
1262 nwsp = DPCPU_ID_PTR(cpuid, nws);
1263 mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
1264 nwsp->nws_cpu = cpuid;
1265 snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
1266 error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
1267 SWI_NET, INTR_TYPE_NET | INTR_MPSAFE, &nwsp->nws_swi_cookie);
1269 panic("%s: swi_add %d", __func__, error);
1270 pc->pc_netisr = nwsp->nws_intr_event;
1271 if (netisr_bindthreads) {
1272 error = intr_event_bind(nwsp->nws_intr_event, cpuid);
1274 printf("%s: cpu %u: intr_event_bind: %d", __func__,
1278 nws_array[nws_count] = nwsp->nws_cpu;
1284 * Initialize the netisr subsystem. We rely on BSS and static initialization
1285 * of most fields in global data structures.
1287 * Start a worker thread for the boot CPU so that we can support network
1288 * traffic immediately in case the network stack is used before additional
1289 * CPUs are started (for example, diskless boot).
1292 netisr_init(void *arg)
1297 if (netisr_maxthreads == 0 || netisr_maxthreads < -1 )
1298 netisr_maxthreads = 1; /* default behavior */
1299 else if (netisr_maxthreads == -1)
1300 netisr_maxthreads = mp_ncpus; /* use max cpus */
1301 if (netisr_maxthreads > mp_ncpus) {
1302 printf("netisr_init: forcing maxthreads from %d to %d\n",
1303 netisr_maxthreads, mp_ncpus);
1304 netisr_maxthreads = mp_ncpus;
1306 if (netisr_defaultqlimit > netisr_maxqlimit) {
1307 printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1308 netisr_defaultqlimit, netisr_maxqlimit);
1309 netisr_defaultqlimit = netisr_maxqlimit;
1311 #ifdef DEVICE_POLLING
1313 * The device polling code is not yet aware of how to deal with
1314 * multiple netisr threads, so for the time being compiling in device
1315 * polling disables parallel netisr workers.
1317 if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1318 printf("netisr_init: forcing maxthreads to 1 and "
1319 "bindthreads to 0 for device polling\n");
1320 netisr_maxthreads = 1;
1321 netisr_bindthreads = 0;
1325 #ifdef EARLY_AP_STARTUP
1326 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1327 if (nws_count >= netisr_maxthreads)
1329 netisr_start_swi(pc->pc_cpuid, pc);
1333 netisr_start_swi(pc->pc_cpuid, pc);
1336 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1338 #ifndef EARLY_AP_STARTUP
1340 * Start worker threads for additional CPUs. No attempt to gracefully handle
1341 * work reassignment, we don't yet support dynamic reconfiguration.
1344 netisr_start(void *arg)
1348 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
1349 if (nws_count >= netisr_maxthreads)
1351 /* Worker will already be present for boot CPU. */
1352 if (pc->pc_netisr != NULL)
1354 netisr_start_swi(pc->pc_cpuid, pc);
1357 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1361 * Sysctl monitoring for netisr: query a list of registered protocols.
1364 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1366 struct rm_priotracker tracker;
1367 struct sysctl_netisr_proto *snpp, *snp_array;
1368 struct netisr_proto *npp;
1369 u_int counter, proto;
1372 if (req->newptr != NULL)
1374 snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1377 NETISR_RLOCK(&tracker);
1378 for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1379 npp = &netisr_proto[proto];
1380 if (npp->np_name == NULL)
1382 snpp = &snp_array[counter];
1383 snpp->snp_version = sizeof(*snpp);
1384 strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1385 snpp->snp_proto = proto;
1386 snpp->snp_qlimit = npp->np_qlimit;
1387 snpp->snp_policy = npp->np_policy;
1388 snpp->snp_dispatch = npp->np_dispatch;
1389 if (npp->np_m2flow != NULL)
1390 snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1391 if (npp->np_m2cpuid != NULL)
1392 snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1393 if (npp->np_drainedcpu != NULL)
1394 snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1397 NETISR_RUNLOCK(&tracker);
1398 KASSERT(counter <= NETISR_MAXPROT,
1399 ("sysctl_netisr_proto: counter too big (%d)", counter));
1400 error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1401 free(snp_array, M_TEMP);
1405 SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1406 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1407 "S,sysctl_netisr_proto",
1408 "Return list of protocols registered with netisr");
1411 * Sysctl monitoring for netisr: query a list of workstreams.
1414 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1416 struct rm_priotracker tracker;
1417 struct sysctl_netisr_workstream *snwsp, *snws_array;
1418 struct netisr_workstream *nwsp;
1419 u_int counter, cpuid;
1422 if (req->newptr != NULL)
1424 snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1427 NETISR_RLOCK(&tracker);
1428 CPU_FOREACH(cpuid) {
1429 nwsp = DPCPU_ID_PTR(cpuid, nws);
1430 if (nwsp->nws_intr_event == NULL)
1433 snwsp = &snws_array[counter];
1434 snwsp->snws_version = sizeof(*snwsp);
1437 * For now, we equate workstream IDs and CPU IDs in the
1438 * kernel, but expose them independently to userspace in case
1439 * that assumption changes in the future.
1441 snwsp->snws_wsid = cpuid;
1442 snwsp->snws_cpu = cpuid;
1443 if (nwsp->nws_intr_event != NULL)
1444 snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1448 NETISR_RUNLOCK(&tracker);
1449 KASSERT(counter <= MAXCPU,
1450 ("sysctl_netisr_workstream: counter too big (%d)", counter));
1451 error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1452 free(snws_array, M_TEMP);
1456 SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1457 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1458 "S,sysctl_netisr_workstream",
1459 "Return list of workstreams implemented by netisr");
1462 * Sysctl monitoring for netisr: query per-protocol data across all
1466 sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1468 struct rm_priotracker tracker;
1469 struct sysctl_netisr_work *snwp, *snw_array;
1470 struct netisr_workstream *nwsp;
1471 struct netisr_proto *npp;
1472 struct netisr_work *nwp;
1473 u_int counter, cpuid, proto;
1476 if (req->newptr != NULL)
1478 snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1479 M_TEMP, M_ZERO | M_WAITOK);
1481 NETISR_RLOCK(&tracker);
1482 CPU_FOREACH(cpuid) {
1483 nwsp = DPCPU_ID_PTR(cpuid, nws);
1484 if (nwsp->nws_intr_event == NULL)
1487 for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1488 npp = &netisr_proto[proto];
1489 if (npp->np_name == NULL)
1491 nwp = &nwsp->nws_work[proto];
1492 snwp = &snw_array[counter];
1493 snwp->snw_version = sizeof(*snwp);
1494 snwp->snw_wsid = cpuid; /* See comment above. */
1495 snwp->snw_proto = proto;
1496 snwp->snw_len = nwp->nw_len;
1497 snwp->snw_watermark = nwp->nw_watermark;
1498 snwp->snw_dispatched = nwp->nw_dispatched;
1499 snwp->snw_hybrid_dispatched =
1500 nwp->nw_hybrid_dispatched;
1501 snwp->snw_qdrops = nwp->nw_qdrops;
1502 snwp->snw_queued = nwp->nw_queued;
1503 snwp->snw_handled = nwp->nw_handled;
1508 KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1509 ("sysctl_netisr_work: counter too big (%d)", counter));
1510 NETISR_RUNLOCK(&tracker);
1511 error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1512 free(snw_array, M_TEMP);
1516 SYSCTL_PROC(_net_isr, OID_AUTO, work,
1517 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1518 "S,sysctl_netisr_work",
1519 "Return list of per-workstream, per-protocol work in netisr");
1522 DB_SHOW_COMMAND(netisr, db_show_netisr)
1524 struct netisr_workstream *nwsp;
1525 struct netisr_work *nwp;
1529 db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1530 "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1531 CPU_FOREACH(cpuid) {
1532 nwsp = DPCPU_ID_PTR(cpuid, nws);
1533 if (nwsp->nws_intr_event == NULL)
1536 for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1537 if (netisr_proto[proto].np_handler == NULL)
1539 nwp = &nwsp->nws_work[proto];
1541 db_printf("%3d ", cpuid);
1544 db_printf("%3s ", "");
1546 "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1547 netisr_proto[proto].np_name, nwp->nw_len,
1548 nwp->nw_watermark, nwp->nw_qlimit,
1549 nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1550 nwp->nw_qdrops, nwp->nw_queued);