2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_device_polling.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/socket.h> /* needed by net/if.h */
37 #include <sys/sockio.h>
38 #include <sys/sysctl.h>
39 #include <sys/syslog.h>
41 #include <net/if.h> /* for IFF_* flags */
42 #include <net/netisr.h> /* for NETISR_POLL */
45 #include <sys/resourcevar.h>
46 #include <sys/kthread.h>
47 #include <sys/vimage.h>
49 static void netisr_poll(void); /* the two netisr handlers */
50 static void netisr_pollmore(void);
51 static int poll_switch(SYSCTL_HANDLER_ARGS);
53 void hardclock_device_poll(void); /* hook from hardclock */
54 void ether_poll(int); /* polling in idle loop */
56 static struct mtx poll_mtx;
59 * Polling support for [network] device drivers.
61 * Drivers which support this feature can register with the
64 * If registration is successful, the driver must disable interrupts,
65 * and further I/O is performed through the handler, which is invoked
66 * (at least once per clock tick) with 3 arguments: the "arg" passed at
67 * register time (a struct ifnet pointer), a command, and a "count" limit.
69 * The command can be one of the following:
70 * POLL_ONLY: quick move of "count" packets from input/output queues.
71 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
72 * other more expensive operations. This command is issued periodically
73 * but less frequently than POLL_ONLY.
75 * The count limit specifies how much work the handler can do during the
76 * call -- typically this is the number of packets to be received, or
77 * transmitted, etc. (drivers are free to interpret this number, as long
78 * as the max time spent in the function grows roughly linearly with the
81 * Polling is enabled and disabled via setting IFCAP_POLLING flag on
82 * the interface. The driver ioctl handler should register interface
83 * with polling and disable interrupts, if registration was successful.
85 * A second variable controls the sharing of CPU between polling/kernel
86 * network processing, and other activities (typically userlevel tasks):
87 * kern.polling.user_frac (between 0 and 100, default 50) sets the share
88 * of CPU allocated to user tasks. CPU is allocated proportionally to the
89 * shares, by dynamically adjusting the "count" (poll_burst).
91 * Other parameters can should be left to their default values.
92 * The following constraints hold
94 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max
95 * 0 <= poll_each_burst
96 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
99 #define MIN_POLL_BURST_MAX 10
100 #define MAX_POLL_BURST_MAX 1000
102 static uint32_t poll_burst = 5;
103 static uint32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */
104 static uint32_t poll_each_burst = 5;
106 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
107 "Device polling parameters");
109 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RD,
110 &poll_burst, 0, "Current polling burst size");
112 static int poll_burst_max_sysctl(SYSCTL_HANDLER_ARGS)
114 uint32_t val = poll_burst_max;
117 error = sysctl_handle_int(oidp, &val, 0, req);
118 if (error || !req->newptr )
120 if (val < MIN_POLL_BURST_MAX || val > MAX_POLL_BURST_MAX)
124 poll_burst_max = val;
125 if (poll_burst > poll_burst_max)
126 poll_burst = poll_burst_max;
127 if (poll_each_burst > poll_burst_max)
128 poll_each_burst = MIN_POLL_BURST_MAX;
129 mtx_unlock(&poll_mtx);
133 SYSCTL_PROC(_kern_polling, OID_AUTO, burst_max, CTLTYPE_UINT | CTLFLAG_RW,
134 0, sizeof(uint32_t), poll_burst_max_sysctl, "I", "Max Polling burst size");
136 static int poll_each_burst_sysctl(SYSCTL_HANDLER_ARGS)
138 uint32_t val = poll_each_burst;
141 error = sysctl_handle_int(oidp, &val, 0, req);
142 if (error || !req->newptr )
148 if (val > poll_burst_max) {
149 mtx_unlock(&poll_mtx);
152 poll_each_burst = val;
153 mtx_unlock(&poll_mtx);
157 SYSCTL_PROC(_kern_polling, OID_AUTO, each_burst, CTLTYPE_UINT | CTLFLAG_RW,
158 0, sizeof(uint32_t), poll_each_burst_sysctl, "I",
159 "Max size of each burst");
161 static uint32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */
162 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
163 &poll_in_idle_loop, 0, "Enable device polling in idle loop");
165 static uint32_t user_frac = 50;
166 static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
168 uint32_t val = user_frac;
171 error = sysctl_handle_int(oidp, &val, 0, req);
172 if (error || !req->newptr )
174 if (val < 0 || val > 99)
179 mtx_unlock(&poll_mtx);
183 SYSCTL_PROC(_kern_polling, OID_AUTO, user_frac, CTLTYPE_UINT | CTLFLAG_RW,
184 0, sizeof(uint32_t), user_frac_sysctl, "I",
185 "Desired user fraction of cpu time");
187 static uint32_t reg_frac_count = 0;
188 static uint32_t reg_frac = 20 ;
189 static int reg_frac_sysctl(SYSCTL_HANDLER_ARGS)
191 uint32_t val = reg_frac;
194 error = sysctl_handle_int(oidp, &val, 0, req);
195 if (error || !req->newptr )
197 if (val < 1 || val > hz)
202 if (reg_frac_count >= reg_frac)
204 mtx_unlock(&poll_mtx);
208 SYSCTL_PROC(_kern_polling, OID_AUTO, reg_frac, CTLTYPE_UINT | CTLFLAG_RW,
209 0, sizeof(uint32_t), reg_frac_sysctl, "I",
210 "Every this many cycles check registers");
212 static uint32_t short_ticks;
213 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RD,
214 &short_ticks, 0, "Hardclock ticks shorter than they should be");
216 static uint32_t lost_polls;
217 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RD,
218 &lost_polls, 0, "How many times we would have lost a poll tick");
220 static uint32_t pending_polls;
221 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RD,
222 &pending_polls, 0, "Do we need to poll again");
224 static int residual_burst = 0;
225 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RD,
226 &residual_burst, 0, "# of residual cycles in burst");
228 static uint32_t poll_handlers; /* next free entry in pr[]. */
229 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
230 &poll_handlers, 0, "Number of registered poll handlers");
232 static int polling = 0;
233 SYSCTL_PROC(_kern_polling, OID_AUTO, enable, CTLTYPE_UINT | CTLFLAG_RW,
234 0, sizeof(int), poll_switch, "I", "Switch polling for all interfaces");
236 static uint32_t phase;
237 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RD,
238 &phase, 0, "Polling phase");
240 static uint32_t suspect;
241 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RD,
242 &suspect, 0, "suspect event");
244 static uint32_t stalled;
245 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RD,
246 &stalled, 0, "potential stalls");
248 static uint32_t idlepoll_sleeping; /* idlepoll is sleeping */
249 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
250 &idlepoll_sleeping, 0, "idlepoll is sleeping");
253 #define POLL_LIST_LEN 128
255 poll_handler_t *handler;
259 static struct pollrec pr[POLL_LIST_LEN];
262 init_device_poll(void)
265 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
266 netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL, 0);
267 netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL, 0);
269 SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL);
273 * Hook from hardclock. Tries to schedule a netisr, but keeps track
274 * of lost ticks due to the previous handler taking too long.
275 * Normally, this should not happen, because polling handler should
276 * run for a short time. However, in some cases (e.g. when there are
277 * changes in link status etc.) the drivers take a very long time
278 * (even in the order of milliseconds) to reset and reconfigure the
279 * device, causing apparent lost polls.
281 * The first part of the code is just for debugging purposes, and tries
282 * to count how often hardclock ticks are shorter than they should,
283 * meaning either stray interrupts or delayed events.
286 hardclock_device_poll(void)
288 static struct timeval prev_t, t;
291 if (poll_handlers == 0)
295 delta = (t.tv_usec - prev_t.tv_usec) +
296 (t.tv_sec - prev_t.tv_sec)*1000000;
297 if (delta * hz < 500000)
302 if (pending_polls > 100) {
304 * Too much, assume it has stalled (not always true
305 * see comment above).
316 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
319 if (pending_polls++ > 0)
324 * ether_poll is called from the idle loop.
327 ether_poll(int count)
333 if (count > poll_each_burst)
334 count = poll_each_burst;
336 for (i = 0 ; i < poll_handlers ; i++)
337 pr[i].handler(pr[i].ifp, POLL_ONLY, count);
339 mtx_unlock(&poll_mtx);
343 * netisr_pollmore is called after other netisr's, possibly scheduling
344 * another NETISR_POLL call, or adapting the burst size for the next cycle.
346 * It is very bad to fetch large bursts of packets from a single card at once,
347 * because the burst could take a long time to be completely processed, or
348 * could saturate the intermediate queue (ipintrq or similar) leading to
349 * losses or unfairness. To reduce the problem, and also to account better for
350 * time spent in network-related processing, we split the burst in smaller
351 * chunks of fixed size, giving control to the other netisr's between chunks.
352 * This helps in improving the fairness, reducing livelock (because we
353 * emulate more closely the "process to completion" that we have with
354 * fastforwarding) and accounting for the work performed in low level
355 * handling and forwarding.
358 static struct timeval poll_start_t;
368 if (residual_burst > 0) {
369 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
370 mtx_unlock(&poll_mtx);
371 /* will run immediately on return, followed by netisrs */
374 /* here we can account time spent in netisr's in this tick */
376 kern_load = (t.tv_usec - poll_start_t.tv_usec) +
377 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */
378 kern_load = (kern_load * hz) / 10000; /* 0..100 */
379 if (kern_load > (100 - user_frac)) { /* try decrease ticks */
383 if (poll_burst < poll_burst_max)
388 if (pending_polls == 0) /* we are done */
392 * Last cycle was long and caused us to miss one or more
393 * hardclock ticks. Restart processing again, but slightly
394 * reduce the burst size to prevent that this happens again.
396 poll_burst -= (poll_burst / 8);
399 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
402 mtx_unlock(&poll_mtx);
406 * netisr_poll is scheduled by schednetisr when appropriate, typically once
413 enum poll_cmd arg = POLL_ONLY;
417 if (residual_burst == 0) { /* first call in this tick */
418 microuptime(&poll_start_t);
419 if (++reg_frac_count == reg_frac) {
420 arg = POLL_AND_CHECK_STATUS;
424 residual_burst = poll_burst;
426 cycles = (residual_burst < poll_each_burst) ?
427 residual_burst : poll_each_burst;
428 residual_burst -= cycles;
430 for (i = 0 ; i < poll_handlers ; i++)
431 pr[i].handler(pr[i].ifp, arg, cycles);
434 mtx_unlock(&poll_mtx);
438 * Try to register routine for polling. Returns 0 if successful
439 * (and polling should be enabled), error code otherwise.
440 * A device is not supposed to register itself multiple times.
442 * This is called from within the *_ioctl() functions.
445 ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
449 KASSERT(h != NULL, ("%s: handler is NULL", __func__));
450 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
453 if (poll_handlers >= POLL_LIST_LEN) {
455 * List full, cannot register more entries.
456 * This should never happen; if it does, it is probably a
457 * broken driver trying to register multiple times. Checking
458 * this at runtime is expensive, and won't solve the problem
459 * anyways, so just report a few times and then give up.
461 static int verbose = 10 ;
463 log(LOG_ERR, "poll handlers list full, "
464 "maybe a broken driver ?\n");
467 mtx_unlock(&poll_mtx);
468 return (ENOMEM); /* no polling for you */
471 for (i = 0 ; i < poll_handlers ; i++)
472 if (pr[i].ifp == ifp && pr[i].handler != NULL) {
473 mtx_unlock(&poll_mtx);
474 log(LOG_DEBUG, "ether_poll_register: %s: handler"
475 " already registered\n", ifp->if_xname);
479 pr[poll_handlers].handler = h;
480 pr[poll_handlers].ifp = ifp;
482 mtx_unlock(&poll_mtx);
483 if (idlepoll_sleeping)
484 wakeup(&idlepoll_sleeping);
489 * Remove interface from the polling list. Called from *_ioctl(), too.
492 ether_poll_deregister(struct ifnet *ifp)
496 KASSERT(ifp != NULL, ("%s: ifp is NULL", __func__));
500 for (i = 0 ; i < poll_handlers ; i++)
501 if (pr[i].ifp == ifp) /* found it */
503 if (i == poll_handlers) {
504 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
506 mtx_unlock(&poll_mtx);
510 if (i < poll_handlers) { /* Last entry replaces this one. */
511 pr[i].handler = pr[poll_handlers].handler;
512 pr[i].ifp = pr[poll_handlers].ifp;
514 mtx_unlock(&poll_mtx);
519 * Legacy interface for turning polling on all interfaces at one time.
522 poll_switch(SYSCTL_HANDLER_ARGS)
528 error = sysctl_handle_int(oidp, &val, 0, req);
529 if (error || !req->newptr )
535 if (val < 0 || val > 1)
541 TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
542 if (ifp->if_capabilities & IFCAP_POLLING) {
547 ifp->if_capenable | IFCAP_POLLING;
550 ifp->if_capenable & ~IFCAP_POLLING;
551 IFF_LOCKGIANT(ifp); /* LOR here */
552 (void) (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
553 IFF_UNLOCKGIANT(ifp);
558 log(LOG_ERR, "kern.polling.enable is deprecated. Use ifconfig(8)");
566 struct thread *td = curthread;
569 rtp.prio = RTP_PRIO_MAX; /* lowest priority */
570 rtp.type = RTP_PRIO_IDLE;
571 PROC_SLOCK(td->td_proc);
572 rtp_to_pri(&rtp, td);
573 PROC_SUNLOCK(td->td_proc);
576 if (poll_in_idle_loop && poll_handlers > 0) {
577 idlepoll_sleeping = 0;
578 ether_poll(poll_each_burst);
580 mi_switch(SW_VOL, NULL);
583 idlepoll_sleeping = 1;
584 tsleep(&idlepoll_sleeping, 0, "pollid", hz * 3);
589 static struct proc *idlepoll;
590 static struct kproc_desc idlepoll_kp = {
595 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start,