2 * Copyright (c) 2007-2009 Robert N. M. Watson
3 * Copyright (c) 2010 Juniper Networks, Inc.
6 * This software was developed by Robert N. M. Watson under contract
7 * to Juniper Networks, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * netisr is a packet dispatch service, allowing synchronous (directly
36 * dispatched) and asynchronous (deferred dispatch) processing of packets by
37 * registered protocol handlers. Callers pass a protocol identifier and
38 * packet to netisr, along with a direct dispatch hint, and work will either
39 * be immediately processed by the registered handler, or passed to a
40 * software interrupt (SWI) thread for deferred dispatch. Callers will
41 * generally select one or the other based on:
43 * - Whether directly dispatching a netisr handler lead to code reentrance or
44 * lock recursion, such as entering the socket code from the socket code.
45 * - Whether directly dispatching a netisr handler lead to recursive
46 * processing, such as when decapsulating several wrapped layers of tunnel
47 * information (IPSEC within IPSEC within ...).
49 * Maintaining ordering for protocol streams is a critical design concern.
50 * Enforcing ordering limits the opportunity for concurrency, but maintains
51 * the strong ordering requirements found in some protocols, such as TCP. Of
52 * related concern is CPU affinity--it is desirable to process all data
53 * associated with a particular stream on the same CPU over time in order to
54 * avoid acquiring locks associated with the connection on different CPUs,
55 * keep connection data in one cache, and to generally encourage associated
56 * user threads to live on the same CPU as the stream. It's also desirable
57 * to avoid lock migration and contention where locks are associated with
60 * netisr supports several policy variations, represented by the
61 * NETISR_POLICY_* constants, allowing protocols to play various roles in
62 * identifying flows, assigning work to CPUs, etc. These are described in
67 #include "opt_device_polling.h"
69 #include <sys/param.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/interrupt.h>
76 #include <sys/mutex.h>
79 #include <sys/rmlock.h>
80 #include <sys/sched.h>
82 #include <sys/socket.h>
83 #include <sys/sysctl.h>
84 #include <sys/systm.h>
90 #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */
92 #include <net/if_var.h>
93 #include <net/netisr.h>
94 #include <net/netisr_internal.h>
98 * Synchronize use and modification of the registered netisr data structures;
99 * acquire a read lock while modifying the set of registered protocols to
100 * prevent partially registered or unregistered protocols from being run.
102 * The following data structures and fields are protected by this lock:
104 * - The netisr_proto array, including all fields of struct netisr_proto.
105 * - The nws array, including all fields of struct netisr_worker.
106 * - The nws_array array.
108 * Note: the NETISR_LOCKING define controls whether read locks are acquired
109 * in packet processing paths requiring netisr registration stability. This
110 * is disabled by default as it can lead to measurable performance
111 * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and
112 * because netisr registration and unregistration is extremely rare at
113 * runtime. If it becomes more common, this decision should be revisited.
115 * XXXRW: rmlocks don't support assertions.
117 static struct rmlock netisr_rmlock;
118 #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \
120 #define NETISR_LOCK_ASSERT()
121 #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker))
122 #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker))
123 #define NETISR_WLOCK() rm_wlock(&netisr_rmlock)
124 #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock)
125 /* #define NETISR_LOCKING */
127 SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr");
130 * Three direct dispatch policies are supported:
132 * - Always defer: all work is scheduled for a netisr, regardless of context.
135 * - Hybrid: if the executing context allows direct dispatch, and we're
136 * running on the CPU the work would be done on, then direct dispatch if it
137 * wouldn't violate ordering constraints on the workstream.
138 * (direct && !direct_force)
140 * - Always direct: if the executing context allows direct dispatch, always
141 * direct dispatch. (direct && direct_force)
143 * Notice that changing the global policy could lead to short periods of
144 * misordered processing, but this is considered acceptable as compared to
145 * the complexity of enforcing ordering during policy changes.
147 static int netisr_direct_force = 1; /* Always direct dispatch. */
148 TUNABLE_INT("net.isr.direct_force", &netisr_direct_force);
149 SYSCTL_INT(_net_isr, OID_AUTO, direct_force, CTLFLAG_RW,
150 &netisr_direct_force, 0, "Force direct dispatch");
152 static int netisr_direct = 1; /* Enable direct dispatch. */
153 TUNABLE_INT("net.isr.direct", &netisr_direct);
154 SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
155 &netisr_direct, 0, "Enable direct dispatch");
158 * Allow the administrator to limit the number of threads (CPUs) to use for
159 * netisr. We don't check netisr_maxthreads before creating the thread for
160 * CPU 0, so in practice we ignore values <= 1. This must be set at boot.
161 * We will create at most one thread per CPU.
163 static int netisr_maxthreads = -1; /* Max number of threads. */
164 TUNABLE_INT("net.isr.maxthreads", &netisr_maxthreads);
165 SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN,
166 &netisr_maxthreads, 0,
167 "Use at most this many CPUs for netisr processing");
169 static int netisr_bindthreads = 0; /* Bind threads to CPUs. */
170 TUNABLE_INT("net.isr.bindthreads", &netisr_bindthreads);
171 SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN,
172 &netisr_bindthreads, 0, "Bind netisr threads to CPUs.");
175 * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit,
176 * both for initial configuration and later modification using
177 * netisr_setqlimit().
179 #define NETISR_DEFAULT_MAXQLIMIT 10240
180 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT;
181 TUNABLE_INT("net.isr.maxqlimit", &netisr_maxqlimit);
182 SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN,
183 &netisr_maxqlimit, 0,
184 "Maximum netisr per-protocol, per-CPU queue depth.");
187 * The default per-workstream mbuf queue limit for protocols that don't
188 * initialize the nh_qlimit field of their struct netisr_handler. If this is
189 * set above netisr_maxqlimit, we truncate it to the maximum during boot.
191 #define NETISR_DEFAULT_DEFAULTQLIMIT 256
192 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT;
193 TUNABLE_INT("net.isr.defaultqlimit", &netisr_defaultqlimit);
194 SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN,
195 &netisr_defaultqlimit, 0,
196 "Default netisr per-protocol, per-CPU queue limit if not set by protocol");
199 * Store and export the compile-time constant NETISR_MAXPROT limit on the
200 * number of protocols that can register with netisr at a time. This is
201 * required for crashdump analysis, as it sizes netisr_proto[].
203 static u_int netisr_maxprot = NETISR_MAXPROT;
204 SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD,
206 "Compile-time limit on the number of protocols supported by netisr.");
209 * The netisr_proto array describes all registered protocols, indexed by
210 * protocol number. See netisr_internal.h for more details.
212 static struct netisr_proto netisr_proto[NETISR_MAXPROT];
215 * Per-CPU workstream data. See netisr_internal.h for more details.
217 DPCPU_DEFINE(struct netisr_workstream, nws);
220 * Map contiguous values between 0 and nws_count into CPU IDs appropriate for
221 * accessing workstreams. This allows constructions of the form
222 * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws).
224 static u_int nws_array[MAXCPU];
227 * Number of registered workstreams. Will be at most the number of running
228 * CPUs once fully started.
230 static u_int nws_count;
231 SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD,
232 &nws_count, 0, "Number of extant netisr threads.");
235 * Synchronization for each workstream: a mutex protects all mutable fields
236 * in each stream, including per-protocol state (mbuf queues). The SWI is
237 * woken up if asynchronous dispatch is required.
239 #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx)
240 #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED)
241 #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx)
242 #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0)
245 * Utility routines for protocols that implement their own mapping of flows
249 netisr_get_cpucount(void)
256 netisr_get_cpuid(u_int cpunumber)
259 KASSERT(cpunumber < nws_count, ("%s: %u > %u", __func__, cpunumber,
262 return (nws_array[cpunumber]);
266 * The default implementation of flow -> CPU ID mapping.
268 * Non-static so that protocols can use it to map their own work to specific
269 * CPUs in a manner consistent to netisr for affinity purposes.
272 netisr_default_flow2cpu(u_int flowid)
275 return (nws_array[flowid % nws_count]);
279 * Register a new netisr handler, which requires initializing per-protocol
280 * fields for each workstream. All netisr work is briefly suspended while
281 * the protocol is installed.
284 netisr_register(const struct netisr_handler *nhp)
286 struct netisr_work *npwp;
290 proto = nhp->nh_proto;
294 * Test that the requested registration is valid.
296 KASSERT(nhp->nh_name != NULL,
297 ("%s: nh_name NULL for %u", __func__, proto));
298 KASSERT(nhp->nh_handler != NULL,
299 ("%s: nh_handler NULL for %s", __func__, name));
300 KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE ||
301 nhp->nh_policy == NETISR_POLICY_FLOW ||
302 nhp->nh_policy == NETISR_POLICY_CPU,
303 ("%s: unsupported nh_policy %u for %s", __func__,
304 nhp->nh_policy, name));
305 KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW ||
306 nhp->nh_m2flow == NULL,
307 ("%s: nh_policy != FLOW but m2flow defined for %s", __func__,
309 KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL,
310 ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__,
312 KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL,
313 ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__,
315 KASSERT(proto < NETISR_MAXPROT,
316 ("%s(%u, %s): protocol too big", __func__, proto, name));
319 * Test that no existing registration exists for this protocol.
322 KASSERT(netisr_proto[proto].np_name == NULL,
323 ("%s(%u, %s): name present", __func__, proto, name));
324 KASSERT(netisr_proto[proto].np_handler == NULL,
325 ("%s(%u, %s): handler present", __func__, proto, name));
327 netisr_proto[proto].np_name = name;
328 netisr_proto[proto].np_handler = nhp->nh_handler;
329 netisr_proto[proto].np_m2flow = nhp->nh_m2flow;
330 netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid;
331 netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu;
332 if (nhp->nh_qlimit == 0)
333 netisr_proto[proto].np_qlimit = netisr_defaultqlimit;
334 else if (nhp->nh_qlimit > netisr_maxqlimit) {
335 printf("%s: %s requested queue limit %u capped to "
336 "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit,
338 netisr_proto[proto].np_qlimit = netisr_maxqlimit;
340 netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
341 netisr_proto[proto].np_policy = nhp->nh_policy;
343 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
344 bzero(npwp, sizeof(*npwp));
345 npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
351 * Clear drop counters across all workstreams for a protocol.
354 netisr_clearqdrops(const struct netisr_handler *nhp)
356 struct netisr_work *npwp;
362 proto = nhp->nh_proto;
366 KASSERT(proto < NETISR_MAXPROT,
367 ("%s(%u): protocol too big for %s", __func__, proto, name));
370 KASSERT(netisr_proto[proto].np_handler != NULL,
371 ("%s(%u): protocol not registered for %s", __func__, proto,
375 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
382 * Query current drop counters across all workstreams for a protocol.
385 netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
387 struct netisr_work *npwp;
388 struct rm_priotracker tracker;
395 proto = nhp->nh_proto;
399 KASSERT(proto < NETISR_MAXPROT,
400 ("%s(%u): protocol too big for %s", __func__, proto, name));
402 NETISR_RLOCK(&tracker);
403 KASSERT(netisr_proto[proto].np_handler != NULL,
404 ("%s(%u): protocol not registered for %s", __func__, proto,
408 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
409 *qdropp += npwp->nw_qdrops;
411 NETISR_RUNLOCK(&tracker);
415 * Query current per-workstream queue limit for a protocol.
418 netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp)
420 struct rm_priotracker tracker;
426 proto = nhp->nh_proto;
430 KASSERT(proto < NETISR_MAXPROT,
431 ("%s(%u): protocol too big for %s", __func__, proto, name));
433 NETISR_RLOCK(&tracker);
434 KASSERT(netisr_proto[proto].np_handler != NULL,
435 ("%s(%u): protocol not registered for %s", __func__, proto,
437 *qlimitp = netisr_proto[proto].np_qlimit;
438 NETISR_RUNLOCK(&tracker);
442 * Update the queue limit across per-workstream queues for a protocol. We
443 * simply change the limits, and don't drain overflowed packets as they will
444 * (hopefully) take care of themselves shortly.
447 netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
449 struct netisr_work *npwp;
455 if (qlimit > netisr_maxqlimit)
458 proto = nhp->nh_proto;
462 KASSERT(proto < NETISR_MAXPROT,
463 ("%s(%u): protocol too big for %s", __func__, proto, name));
466 KASSERT(netisr_proto[proto].np_handler != NULL,
467 ("%s(%u): protocol not registered for %s", __func__, proto,
470 netisr_proto[proto].np_qlimit = qlimit;
472 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
473 npwp->nw_qlimit = qlimit;
480 * Drain all packets currently held in a particular protocol work queue.
483 netisr_drain_proto(struct netisr_work *npwp)
488 * We would assert the lock on the workstream but it's not passed in.
490 while ((m = npwp->nw_head) != NULL) {
491 npwp->nw_head = m->m_nextpkt;
493 if (npwp->nw_head == NULL)
494 npwp->nw_tail = NULL;
498 KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__));
499 KASSERT(npwp->nw_len == 0, ("%s: len", __func__));
503 * Remove the registration of a network protocol, which requires clearing
504 * per-protocol fields across all workstreams, including freeing all mbufs in
505 * the queues at time of unregister. All work in netisr is briefly suspended
506 * while this takes place.
509 netisr_unregister(const struct netisr_handler *nhp)
511 struct netisr_work *npwp;
517 proto = nhp->nh_proto;
521 KASSERT(proto < NETISR_MAXPROT,
522 ("%s(%u): protocol too big for %s", __func__, proto, name));
525 KASSERT(netisr_proto[proto].np_handler != NULL,
526 ("%s(%u): protocol not registered for %s", __func__, proto,
529 netisr_proto[proto].np_name = NULL;
530 netisr_proto[proto].np_handler = NULL;
531 netisr_proto[proto].np_m2flow = NULL;
532 netisr_proto[proto].np_m2cpuid = NULL;
533 netisr_proto[proto].np_qlimit = 0;
534 netisr_proto[proto].np_policy = 0;
536 npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
537 netisr_drain_proto(npwp);
538 bzero(npwp, sizeof(*npwp));
544 * Look up the workstream given a packet and source identifier. Do this by
545 * checking the protocol's policy, and optionally call out to the protocol
546 * for assistance if required.
549 netisr_select_cpuid(struct netisr_proto *npp, uintptr_t source,
550 struct mbuf *m, u_int *cpuidp)
554 NETISR_LOCK_ASSERT();
557 * In the event we have only one worker, shortcut and deliver to it
558 * without further ado.
560 if (nws_count == 1) {
561 *cpuidp = nws_array[0];
566 * What happens next depends on the policy selected by the protocol.
567 * If we want to support per-interface policies, we should do that
570 switch (npp->np_policy) {
571 case NETISR_POLICY_CPU:
572 return (npp->np_m2cpuid(m, source, cpuidp));
574 case NETISR_POLICY_FLOW:
575 if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
576 m = npp->np_m2flow(m, source);
580 if (m->m_flags & M_FLOWID) {
582 netisr_default_flow2cpu(m->m_pkthdr.flowid);
587 case NETISR_POLICY_SOURCE:
588 ifp = m->m_pkthdr.rcvif;
590 *cpuidp = nws_array[(ifp->if_index + source) %
593 *cpuidp = nws_array[source % nws_count];
597 panic("%s: invalid policy %u for %s", __func__,
598 npp->np_policy, npp->np_name);
603 * Process packets associated with a workstream and protocol. For reasons of
604 * fairness, we process up to one complete netisr queue at a time, moving the
605 * queue to a stack-local queue for processing, but do not loop refreshing
606 * from the global queue. The caller is responsible for deciding whether to
607 * loop, and for setting the NWS_RUNNING flag. The passed workstream will be
608 * locked on entry and relocked before return, but will be released while
609 * processing. The number of packets processed is returned.
612 netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto)
614 struct netisr_work local_npw, *npwp;
618 NETISR_LOCK_ASSERT();
619 NWS_LOCK_ASSERT(nwsp);
621 KASSERT(nwsp->nws_flags & NWS_RUNNING,
622 ("%s(%u): not running", __func__, proto));
623 KASSERT(proto >= 0 && proto < NETISR_MAXPROT,
624 ("%s(%u): invalid proto\n", __func__, proto));
626 npwp = &nwsp->nws_work[proto];
627 if (npwp->nw_len == 0)
631 * Move the global work queue to a thread-local work queue.
633 * Notice that this means the effective maximum length of the queue
634 * is actually twice that of the maximum queue length specified in
635 * the protocol registration call.
637 handled = npwp->nw_len;
639 npwp->nw_head = NULL;
640 npwp->nw_tail = NULL;
642 nwsp->nws_pendingbits &= ~(1 << proto);
644 while ((m = local_npw.nw_head) != NULL) {
645 local_npw.nw_head = m->m_nextpkt;
647 if (local_npw.nw_head == NULL)
648 local_npw.nw_tail = NULL;
650 VNET_ASSERT(m->m_pkthdr.rcvif != NULL,
651 ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m));
652 CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
653 netisr_proto[proto].np_handler(m);
656 KASSERT(local_npw.nw_len == 0,
657 ("%s(%u): len %u", __func__, proto, local_npw.nw_len));
658 if (netisr_proto[proto].np_drainedcpu)
659 netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu);
661 npwp->nw_handled += handled;
666 * SWI handler for netisr -- processes packets in a set of workstreams that
667 * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already
668 * being direct dispatched, go back to sleep and wait for the dispatching
669 * thread to wake us up again.
674 #ifdef NETISR_LOCKING
675 struct rm_priotracker tracker;
677 struct netisr_workstream *nwsp;
682 #ifdef DEVICE_POLLING
683 KASSERT(nws_count == 1,
684 ("%s: device_polling but nws_count != 1", __func__));
687 #ifdef NETISR_LOCKING
688 NETISR_RLOCK(&tracker);
691 KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running"));
692 if (nwsp->nws_flags & NWS_DISPATCHING)
694 nwsp->nws_flags |= NWS_RUNNING;
695 nwsp->nws_flags &= ~NWS_SCHEDULED;
696 while ((bits = nwsp->nws_pendingbits) != 0) {
697 while ((prot = ffs(bits)) != 0) {
699 bits &= ~(1 << prot);
700 (void)netisr_process_workstream_proto(nwsp, prot);
703 nwsp->nws_flags &= ~NWS_RUNNING;
706 #ifdef NETISR_LOCKING
707 NETISR_RUNLOCK(&tracker);
709 #ifdef DEVICE_POLLING
715 netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto,
716 struct netisr_work *npwp, struct mbuf *m, int *dosignalp)
719 NWS_LOCK_ASSERT(nwsp);
722 if (npwp->nw_len < npwp->nw_qlimit) {
724 if (npwp->nw_head == NULL) {
728 npwp->nw_tail->m_nextpkt = m;
732 if (npwp->nw_len > npwp->nw_watermark)
733 npwp->nw_watermark = npwp->nw_len;
736 * We must set the bit regardless of NWS_RUNNING, so that
737 * swi_net() keeps calling netisr_process_workstream_proto().
739 nwsp->nws_pendingbits |= (1 << proto);
740 if (!(nwsp->nws_flags &
741 (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) {
742 nwsp->nws_flags |= NWS_SCHEDULED;
743 *dosignalp = 1; /* Defer until unlocked. */
755 netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid)
757 struct netisr_workstream *nwsp;
758 struct netisr_work *npwp;
761 #ifdef NETISR_LOCKING
762 NETISR_LOCK_ASSERT();
764 KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__,
766 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
770 nwsp = DPCPU_ID_PTR(cpuid, nws);
771 npwp = &nwsp->nws_work[proto];
773 error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal);
781 netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m)
783 #ifdef NETISR_LOCKING
784 struct rm_priotracker tracker;
789 KASSERT(proto < NETISR_MAXPROT,
790 ("%s: invalid proto %u", __func__, proto));
792 #ifdef NETISR_LOCKING
793 NETISR_RLOCK(&tracker);
795 KASSERT(netisr_proto[proto].np_handler != NULL,
796 ("%s: invalid proto %u", __func__, proto));
798 m = netisr_select_cpuid(&netisr_proto[proto], source, m, &cpuid);
800 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__,
802 error = netisr_queue_internal(proto, m, cpuid);
805 #ifdef NETISR_LOCKING
806 NETISR_RUNLOCK(&tracker);
812 netisr_queue(u_int proto, struct mbuf *m)
815 return (netisr_queue_src(proto, 0, m));
819 * Dispatch a packet for netisr processing; direct dispatch is permitted by
823 netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m)
825 #ifdef NETISR_LOCKING
826 struct rm_priotracker tracker;
828 struct netisr_workstream *nwsp;
829 struct netisr_work *npwp;
834 * If direct dispatch is entirely disabled, fall back on queueing.
837 return (netisr_queue_src(proto, source, m));
839 KASSERT(proto < NETISR_MAXPROT,
840 ("%s: invalid proto %u", __func__, proto));
841 #ifdef NETISR_LOCKING
842 NETISR_RLOCK(&tracker);
844 KASSERT(netisr_proto[proto].np_handler != NULL,
845 ("%s: invalid proto %u", __func__, proto));
848 * If direct dispatch is forced, then unconditionally dispatch
849 * without a formal CPU selection. Borrow the current CPU's stats,
850 * even if there's no worker on it. In this case we don't update
851 * nws_flags because all netisr processing will be source ordered due
852 * to always being forced to directly dispatch.
854 if (netisr_direct_force) {
855 nwsp = DPCPU_PTR(nws);
856 npwp = &nwsp->nws_work[proto];
857 npwp->nw_dispatched++;
859 netisr_proto[proto].np_handler(m);
865 * Otherwise, we execute in a hybrid mode where we will try to direct
866 * dispatch if we're on the right CPU and the netisr worker isn't
869 m = netisr_select_cpuid(&netisr_proto[proto], source, m, &cpuid);
874 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
878 nwsp = DPCPU_PTR(nws);
879 npwp = &nwsp->nws_work[proto];
882 * We are willing to direct dispatch only if three conditions hold:
884 * (1) The netisr worker isn't already running,
885 * (2) Another thread isn't already directly dispatching, and
886 * (3) The netisr hasn't already been woken up.
889 if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) {
890 error = netisr_queue_workstream(nwsp, proto, npwp, m,
899 * The current thread is now effectively the netisr worker, so set
900 * the dispatching flag to prevent concurrent processing of the
901 * stream from another thread (even the netisr worker), which could
902 * otherwise lead to effective misordering of the stream.
904 nwsp->nws_flags |= NWS_DISPATCHING;
906 netisr_proto[proto].np_handler(m);
908 nwsp->nws_flags &= ~NWS_DISPATCHING;
910 npwp->nw_hybrid_dispatched++;
913 * If other work was enqueued by another thread while we were direct
914 * dispatching, we need to signal the netisr worker to do that work.
915 * In the future, we might want to do some of that work in the
916 * current thread, rather than trigger further context switches. If
917 * so, we'll want to establish a reasonable bound on the work done in
918 * the "borrowed" context.
920 if (nwsp->nws_pendingbits != 0) {
921 nwsp->nws_flags |= NWS_SCHEDULED;
932 error = netisr_queue_internal(proto, m, cpuid);
936 #ifdef NETISR_LOCKING
937 NETISR_RUNLOCK(&tracker);
943 netisr_dispatch(u_int proto, struct mbuf *m)
946 return (netisr_dispatch_src(proto, 0, m));
949 #ifdef DEVICE_POLLING
951 * Kernel polling borrows a netisr thread to run interface polling in; this
952 * function allows kernel polling to request that the netisr thread be
953 * scheduled even if no packets are pending for protocols.
956 netisr_sched_poll(void)
958 struct netisr_workstream *nwsp;
960 nwsp = DPCPU_ID_PTR(nws_array[0], nws);
966 netisr_start_swi(u_int cpuid, struct pcpu *pc)
969 struct netisr_workstream *nwsp;
972 KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid));
974 nwsp = DPCPU_ID_PTR(cpuid, nws);
975 mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF);
976 nwsp->nws_cpu = cpuid;
977 snprintf(swiname, sizeof(swiname), "netisr %u", cpuid);
978 error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp,
979 SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie);
981 panic("%s: swi_add %d", __func__, error);
982 pc->pc_netisr = nwsp->nws_intr_event;
983 if (netisr_bindthreads) {
984 error = intr_event_bind(nwsp->nws_intr_event, cpuid);
986 printf("%s: cpu %u: intr_event_bind: %d", __func__,
990 nws_array[nws_count] = nwsp->nws_cpu;
996 * Initialize the netisr subsystem. We rely on BSS and static initialization
997 * of most fields in global data structures.
999 * Start a worker thread for the boot CPU so that we can support network
1000 * traffic immediately in case the network stack is used before additional
1001 * CPUs are started (for example, diskless boot).
1004 netisr_init(void *arg)
1007 KASSERT(curcpu == 0, ("%s: not on CPU 0", __func__));
1010 if (netisr_maxthreads < 1)
1011 netisr_maxthreads = 1;
1012 if (netisr_maxthreads > mp_ncpus) {
1013 printf("netisr_init: forcing maxthreads from %d to %d\n",
1014 netisr_maxthreads, mp_ncpus);
1015 netisr_maxthreads = mp_ncpus;
1017 if (netisr_defaultqlimit > netisr_maxqlimit) {
1018 printf("netisr_init: forcing defaultqlimit from %d to %d\n",
1019 netisr_defaultqlimit, netisr_maxqlimit);
1020 netisr_defaultqlimit = netisr_maxqlimit;
1022 #ifdef DEVICE_POLLING
1024 * The device polling code is not yet aware of how to deal with
1025 * multiple netisr threads, so for the time being compiling in device
1026 * polling disables parallel netisr workers.
1028 if (netisr_maxthreads != 1 || netisr_bindthreads != 0) {
1029 printf("netisr_init: forcing maxthreads to 1 and "
1030 "bindthreads to 0 for device polling\n");
1031 netisr_maxthreads = 1;
1032 netisr_bindthreads = 0;
1036 netisr_start_swi(curcpu, pcpu_find(curcpu));
1038 SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL);
1041 * Start worker threads for additional CPUs. No attempt to gracefully handle
1042 * work reassignment, we don't yet support dynamic reconfiguration.
1045 netisr_start(void *arg)
1049 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1050 if (nws_count >= netisr_maxthreads)
1052 /* XXXRW: Is skipping absent CPUs still required here? */
1053 if (CPU_ABSENT(pc->pc_cpuid))
1055 /* Worker will already be present for boot CPU. */
1056 if (pc->pc_netisr != NULL)
1058 netisr_start_swi(pc->pc_cpuid, pc);
1061 SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL);
1064 * Sysctl monitoring for netisr: query a list of registered protocols.
1067 sysctl_netisr_proto(SYSCTL_HANDLER_ARGS)
1069 struct rm_priotracker tracker;
1070 struct sysctl_netisr_proto *snpp, *snp_array;
1071 struct netisr_proto *npp;
1072 u_int counter, proto;
1075 if (req->newptr != NULL)
1077 snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP,
1080 NETISR_RLOCK(&tracker);
1081 for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1082 npp = &netisr_proto[proto];
1083 if (npp->np_name == NULL)
1085 snpp = &snp_array[counter];
1086 snpp->snp_version = sizeof(*snpp);
1087 strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN);
1088 snpp->snp_proto = proto;
1089 snpp->snp_qlimit = npp->np_qlimit;
1090 snpp->snp_policy = npp->np_policy;
1091 if (npp->np_m2flow != NULL)
1092 snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW;
1093 if (npp->np_m2cpuid != NULL)
1094 snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID;
1095 if (npp->np_drainedcpu != NULL)
1096 snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU;
1099 NETISR_RUNLOCK(&tracker);
1100 KASSERT(counter <= NETISR_MAXPROT,
1101 ("sysctl_netisr_proto: counter too big (%d)", counter));
1102 error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter);
1103 free(snp_array, M_TEMP);
1107 SYSCTL_PROC(_net_isr, OID_AUTO, proto,
1108 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto,
1109 "S,sysctl_netisr_proto",
1110 "Return list of protocols registered with netisr");
1113 * Sysctl monitoring for netisr: query a list of workstreams.
1116 sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
1118 struct rm_priotracker tracker;
1119 struct sysctl_netisr_workstream *snwsp, *snws_array;
1120 struct netisr_workstream *nwsp;
1121 u_int counter, cpuid;
1124 if (req->newptr != NULL)
1126 snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP,
1129 NETISR_RLOCK(&tracker);
1130 CPU_FOREACH(cpuid) {
1131 nwsp = DPCPU_ID_PTR(cpuid, nws);
1132 if (nwsp->nws_intr_event == NULL)
1135 snwsp = &snws_array[counter];
1136 snwsp->snws_version = sizeof(*snwsp);
1139 * For now, we equate workstream IDs and CPU IDs in the
1140 * kernel, but expose them independently to userspace in case
1141 * that assumption changes in the future.
1143 snwsp->snws_wsid = cpuid;
1144 snwsp->snws_cpu = cpuid;
1145 if (nwsp->nws_intr_event != NULL)
1146 snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR;
1150 NETISR_RUNLOCK(&tracker);
1151 KASSERT(counter <= MAXCPU,
1152 ("sysctl_netisr_workstream: counter too big (%d)", counter));
1153 error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter);
1154 free(snws_array, M_TEMP);
1158 SYSCTL_PROC(_net_isr, OID_AUTO, workstream,
1159 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream,
1160 "S,sysctl_netisr_workstream",
1161 "Return list of workstreams implemented by netisr");
1164 * Sysctl monitoring for netisr: query per-protocol data across all
1168 sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
1170 struct rm_priotracker tracker;
1171 struct sysctl_netisr_work *snwp, *snw_array;
1172 struct netisr_workstream *nwsp;
1173 struct netisr_proto *npp;
1174 struct netisr_work *nwp;
1175 u_int counter, cpuid, proto;
1178 if (req->newptr != NULL)
1180 snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT,
1181 M_TEMP, M_ZERO | M_WAITOK);
1183 NETISR_RLOCK(&tracker);
1184 CPU_FOREACH(cpuid) {
1185 nwsp = DPCPU_ID_PTR(cpuid, nws);
1186 if (nwsp->nws_intr_event == NULL)
1189 for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1190 npp = &netisr_proto[proto];
1191 if (npp->np_name == NULL)
1193 nwp = &nwsp->nws_work[proto];
1194 snwp = &snw_array[counter];
1195 snwp->snw_version = sizeof(*snwp);
1196 snwp->snw_wsid = cpuid; /* See comment above. */
1197 snwp->snw_proto = proto;
1198 snwp->snw_len = nwp->nw_len;
1199 snwp->snw_watermark = nwp->nw_watermark;
1200 snwp->snw_dispatched = nwp->nw_dispatched;
1201 snwp->snw_hybrid_dispatched =
1202 nwp->nw_hybrid_dispatched;
1203 snwp->snw_qdrops = nwp->nw_qdrops;
1204 snwp->snw_queued = nwp->nw_queued;
1205 snwp->snw_handled = nwp->nw_handled;
1210 KASSERT(counter <= MAXCPU * NETISR_MAXPROT,
1211 ("sysctl_netisr_work: counter too big (%d)", counter));
1212 NETISR_RUNLOCK(&tracker);
1213 error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter);
1214 free(snw_array, M_TEMP);
1218 SYSCTL_PROC(_net_isr, OID_AUTO, work,
1219 CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work,
1220 "S,sysctl_netisr_work",
1221 "Return list of per-workstream, per-protocol work in netisr");
1224 DB_SHOW_COMMAND(netisr, db_show_netisr)
1226 struct netisr_workstream *nwsp;
1227 struct netisr_work *nwp;
1231 db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
1232 "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
1233 CPU_FOREACH(cpuid) {
1234 nwsp = DPCPU_ID_PTR(cpuid, nws);
1235 if (nwsp->nws_intr_event == NULL)
1238 for (proto = 0; proto < NETISR_MAXPROT; proto++) {
1239 if (netisr_proto[proto].np_handler == NULL)
1241 nwp = &nwsp->nws_work[proto];
1243 db_printf("%3d ", cpuid);
1246 db_printf("%3s ", "");
1248 "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n",
1249 netisr_proto[proto].np_name, nwp->nw_len,
1250 nwp->nw_watermark, nwp->nw_qlimit,
1251 nwp->nw_dispatched, nwp->nw_hybrid_dispatched,
1252 nwp->nw_qdrops, nwp->nw_queued);