2 * Copyright (c) 2001-2002 Luigi Rizzo
4 * Supported by: the Xorp Project (www.xorp.org)
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h> /* needed by net/if.h */
35 #include <sys/sysctl.h>
36 #include <sys/syslog.h>
38 #include <net/if.h> /* for IFF_* flags */
39 #include <net/netisr.h> /* for NETISR_POLL */
42 #include <sys/resourcevar.h>
43 #include <sys/kthread.h>
45 static void netisr_poll(void); /* the two netisr handlers */
46 static void netisr_pollmore(void);
48 void hardclock_device_poll(void); /* hook from hardclock */
49 void ether_poll(int); /* polling while in trap */
52 * Polling support for [network] device drivers.
54 * Drivers which support this feature try to register with the
57 * If registration is successful, the driver must disable interrupts,
58 * and further I/O is performed through the handler, which is invoked
59 * (at least once per clock tick) with 3 arguments: the "arg" passed at
60 * register time (a struct ifnet pointer), a command, and a "count" limit.
62 * The command can be one of the following:
63 * POLL_ONLY: quick move of "count" packets from input/output queues.
64 * POLL_AND_CHECK_STATUS: as above, plus check status registers or do
65 * other more expensive operations. This command is issued periodically
66 * but less frequently than POLL_ONLY.
67 * POLL_DEREGISTER: deregister and return to interrupt mode.
69 * The first two commands are only issued if the interface is marked as
70 * 'IFF_UP and IFF_DRV_RUNNING', the last one only if IFF_DRV_RUNNING is set.
72 * The count limit specifies how much work the handler can do during the
73 * call -- typically this is the number of packets to be received, or
74 * transmitted, etc. (drivers are free to interpret this number, as long
75 * as the max time spent in the function grows roughly linearly with the
78 * Deregistration can be requested by the driver itself (typically in the
79 * *_stop() routine), or by the polling code, by invoking the handler.
81 * Polling can be globally enabled or disabled with the sysctl variable
82 * kern.polling.enable (default is 0, disabled)
84 * A second variable controls the sharing of CPU between polling/kernel
85 * network processing, and other activities (typically userlevel tasks):
86 * kern.polling.user_frac (between 0 and 100, default 50) sets the share
87 * of CPU allocated to user tasks. CPU is allocated proportionally to the
88 * shares, by dynamically adjusting the "count" (poll_burst).
90 * Other parameters can should be left to their default values.
91 * The following constraints hold
93 * 1 <= poll_each_burst <= poll_burst <= poll_burst_max
94 * 0 <= poll_in_trap <= poll_each_burst
95 * MIN_POLL_BURST_MAX <= poll_burst_max <= MAX_POLL_BURST_MAX
98 #define MIN_POLL_BURST_MAX 10
99 #define MAX_POLL_BURST_MAX 1000
101 SYSCTL_NODE(_kern, OID_AUTO, polling, CTLFLAG_RW, 0,
102 "Device polling parameters");
104 static u_int32_t poll_burst = 5;
105 SYSCTL_UINT(_kern_polling, OID_AUTO, burst, CTLFLAG_RW,
106 &poll_burst, 0, "Current polling burst size");
108 static u_int32_t poll_each_burst = 5;
109 SYSCTL_UINT(_kern_polling, OID_AUTO, each_burst, CTLFLAG_RW,
110 &poll_each_burst, 0, "Max size of each burst");
112 static u_int32_t poll_burst_max = 150; /* good for 100Mbit net and HZ=1000 */
113 SYSCTL_UINT(_kern_polling, OID_AUTO, burst_max, CTLFLAG_RW,
114 &poll_burst_max, 0, "Max Polling burst size");
116 static u_int32_t poll_in_idle_loop=0; /* do we poll in idle loop ? */
117 SYSCTL_UINT(_kern_polling, OID_AUTO, idle_poll, CTLFLAG_RW,
118 &poll_in_idle_loop, 0, "Enable device polling in idle loop");
120 u_int32_t poll_in_trap; /* used in trap.c */
121 SYSCTL_UINT(_kern_polling, OID_AUTO, poll_in_trap, CTLFLAG_RW,
122 &poll_in_trap, 0, "Poll burst size during a trap");
124 static u_int32_t user_frac = 50;
125 SYSCTL_UINT(_kern_polling, OID_AUTO, user_frac, CTLFLAG_RW,
126 &user_frac, 0, "Desired user fraction of cpu time");
128 static u_int32_t reg_frac = 20 ;
129 SYSCTL_UINT(_kern_polling, OID_AUTO, reg_frac, CTLFLAG_RW,
130 ®_frac, 0, "Every this many cycles poll register");
132 static u_int32_t short_ticks;
133 SYSCTL_UINT(_kern_polling, OID_AUTO, short_ticks, CTLFLAG_RW,
134 &short_ticks, 0, "Hardclock ticks shorter than they should be");
136 static u_int32_t lost_polls;
137 SYSCTL_UINT(_kern_polling, OID_AUTO, lost_polls, CTLFLAG_RW,
138 &lost_polls, 0, "How many times we would have lost a poll tick");
140 static u_int32_t pending_polls;
141 SYSCTL_UINT(_kern_polling, OID_AUTO, pending_polls, CTLFLAG_RW,
142 &pending_polls, 0, "Do we need to poll again");
144 static int residual_burst = 0;
145 SYSCTL_INT(_kern_polling, OID_AUTO, residual_burst, CTLFLAG_RW,
146 &residual_burst, 0, "# of residual cycles in burst");
148 static u_int32_t poll_handlers; /* next free entry in pr[]. */
149 SYSCTL_UINT(_kern_polling, OID_AUTO, handlers, CTLFLAG_RD,
150 &poll_handlers, 0, "Number of registered poll handlers");
152 static int polling = 0; /* global polling enable */
153 SYSCTL_UINT(_kern_polling, OID_AUTO, enable, CTLFLAG_RW,
154 &polling, 0, "Polling enabled");
156 static u_int32_t phase;
157 SYSCTL_UINT(_kern_polling, OID_AUTO, phase, CTLFLAG_RW,
158 &phase, 0, "Polling phase");
160 static u_int32_t suspect;
161 SYSCTL_UINT(_kern_polling, OID_AUTO, suspect, CTLFLAG_RW,
162 &suspect, 0, "suspect event");
164 static u_int32_t stalled;
165 SYSCTL_UINT(_kern_polling, OID_AUTO, stalled, CTLFLAG_RW,
166 &stalled, 0, "potential stalls");
168 static u_int32_t idlepoll_sleeping; /* idlepoll is sleeping */
169 SYSCTL_UINT(_kern_polling, OID_AUTO, idlepoll_sleeping, CTLFLAG_RD,
170 &idlepoll_sleeping, 0, "idlepoll is sleeping");
173 #define POLL_LIST_LEN 128
175 poll_handler_t *handler;
178 * Flags of polling record (protected by poll_mtx).
179 * PRF_RUNNING means that the handler is now executing.
180 * PRF_LEAVING means that the handler is now deregistering.
182 #define PRF_RUNNING 0x1
183 #define PRF_LEAVING 0x2
187 static struct pollrec pr[POLL_LIST_LEN];
189 #define PR_VALID(i) (pr[(i)].handler != NULL && \
190 !(pr[(i)].flags & (PRF_RUNNING|PRF_LEAVING)) && \
191 (pr[(i)].ifp->if_drv_flags & IFF_DRV_RUNNING) &&\
192 (pr[(i)].ifp->if_flags & IFF_UP))
194 static struct mtx poll_mtx;
197 init_device_poll(void)
200 mtx_init(&poll_mtx, "polling", NULL, MTX_DEF);
201 netisr_register(NETISR_POLL, (netisr_t *)netisr_poll, NULL,
203 netisr_register(NETISR_POLLMORE, (netisr_t *)netisr_pollmore, NULL,
206 SYSINIT(device_poll, SI_SUB_CLOCKS, SI_ORDER_MIDDLE, init_device_poll, NULL)
210 * Hook from hardclock. Tries to schedule a netisr, but keeps track
211 * of lost ticks due to the previous handler taking too long.
212 * Normally, this should not happen, because polling handler should
213 * run for a short time. However, in some cases (e.g. when there are
214 * changes in link status etc.) the drivers take a very long time
215 * (even in the order of milliseconds) to reset and reconfigure the
216 * device, causing apparent lost polls.
218 * The first part of the code is just for debugging purposes, and tries
219 * to count how often hardclock ticks are shorter than they should,
220 * meaning either stray interrupts or delayed events.
223 hardclock_device_poll(void)
225 static struct timeval prev_t, t;
228 if (poll_handlers == 0)
232 delta = (t.tv_usec - prev_t.tv_usec) +
233 (t.tv_sec - prev_t.tv_sec)*1000000;
234 if (delta * hz < 500000)
239 if (pending_polls > 100) {
241 * Too much, assume it has stalled (not always true
242 * see comment above).
253 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
256 if (pending_polls++ > 0)
261 * ether_poll is called from the idle loop or from the trap handler.
264 ether_poll(int count)
270 if (count > poll_each_burst)
271 count = poll_each_burst;
273 for (i = 0 ; i < poll_handlers ; i++) {
275 pr[i].flags |= PRF_RUNNING;
276 mtx_unlock(&poll_mtx);
278 pr[i].handler(pr[i].ifp, POLL_ONLY, count);
281 pr[i].flags &= ~PRF_RUNNING;
284 mtx_unlock(&poll_mtx);
288 * netisr_pollmore is called after other netisr's, possibly scheduling
289 * another NETISR_POLL call, or adapting the burst size for the next cycle.
291 * It is very bad to fetch large bursts of packets from a single card at once,
292 * because the burst could take a long time to be completely processed, or
293 * could saturate the intermediate queue (ipintrq or similar) leading to
294 * losses or unfairness. To reduce the problem, and also to account better for
295 * time spent in network-related processing, we split the burst in smaller
296 * chunks of fixed size, giving control to the other netisr's between chunks.
297 * This helps in improving the fairness, reducing livelock (because we
298 * emulate more closely the "process to completion" that we have with
299 * fastforwarding) and accounting for the work performed in low level
300 * handling and forwarding.
303 static struct timeval poll_start_t;
315 if (residual_burst > 0) {
316 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
317 mtx_unlock(&poll_mtx);
318 /* will run immediately on return, followed by netisrs */
321 /* here we can account time spent in netisr's in this tick */
323 kern_load = (t.tv_usec - poll_start_t.tv_usec) +
324 (t.tv_sec - poll_start_t.tv_sec)*1000000; /* us */
325 kern_load = (kern_load * hz) / 10000; /* 0..100 */
326 if (kern_load > (100 - user_frac)) { /* try decrease ticks */
330 if (poll_burst < poll_burst_max)
335 if (pending_polls == 0) /* we are done */
339 * Last cycle was long and caused us to miss one or more
340 * hardclock ticks. Restart processing again, but slightly
341 * reduce the burst size to prevent that this happens again.
343 poll_burst -= (poll_burst / 8);
346 schednetisrbits(1 << NETISR_POLL | 1 << NETISR_POLLMORE);
349 mtx_unlock(&poll_mtx);
353 * netisr_poll is scheduled by schednetisr when appropriate, typically once
359 static int reg_frac_count;
361 enum poll_cmd arg = POLL_ONLY;
367 if (residual_burst == 0) { /* first call in this tick */
368 microuptime(&poll_start_t);
370 * Check that paremeters are consistent with runtime
371 * variables. Some of these tests could be done at sysctl
372 * time, but the savings would be very limited because we
373 * still have to check against reg_frac_count and
374 * poll_each_burst. So, instead of writing separate sysctl
375 * handlers, we do all here.
380 else if (reg_frac < 1)
382 if (reg_frac_count > reg_frac)
383 reg_frac_count = reg_frac - 1;
384 if (reg_frac_count-- == 0) {
385 arg = POLL_AND_CHECK_STATUS;
386 reg_frac_count = reg_frac - 1;
388 if (poll_burst_max < MIN_POLL_BURST_MAX)
389 poll_burst_max = MIN_POLL_BURST_MAX;
390 else if (poll_burst_max > MAX_POLL_BURST_MAX)
391 poll_burst_max = MAX_POLL_BURST_MAX;
393 if (poll_each_burst < 1)
395 else if (poll_each_burst > poll_burst_max)
396 poll_each_burst = poll_burst_max;
398 if (poll_burst > poll_burst_max)
399 poll_burst = poll_burst_max;
400 residual_burst = poll_burst;
402 cycles = (residual_burst < poll_each_burst) ?
403 residual_burst : poll_each_burst;
404 residual_burst -= cycles;
407 for (i = 0 ; i < poll_handlers ; i++) {
409 pr[i].flags |= PRF_RUNNING;
410 mtx_unlock(&poll_mtx);
411 pr[i].handler(pr[i].ifp, arg, cycles);
413 pr[i].flags &= ~PRF_RUNNING;
416 } else { /* unregister */
417 for (i = 0 ; i < poll_handlers ; i++) {
418 if (pr[i].handler != NULL &&
419 pr[i].ifp->if_drv_flags & IFF_DRV_RUNNING) {
420 pr[i].ifp->if_flags &= ~IFF_POLLING;
421 pr[i].flags |= PRF_LEAVING;
422 mtx_unlock(&poll_mtx);
423 pr[i].handler(pr[i].ifp, POLL_DEREGISTER, 1);
425 pr[i].flags &= ~PRF_LEAVING;
427 pr[i].handler = NULL;
434 mtx_unlock(&poll_mtx);
438 * Try to register routine for polling. Returns 1 if successful
439 * (and polling should be enabled), 0 otherwise.
440 * A device is not supposed to register itself multiple times.
442 * This is called from within the *_intr() functions, so we do not need
443 * further ifnet locking.
446 ether_poll_register(poll_handler_t *h, struct ifnet *ifp)
452 if (polling == 0) /* polling disabled, cannot register */
454 if (h == NULL || ifp == NULL) /* bad arguments */
456 if ( !(ifp->if_flags & IFF_UP) ) /* must be up */
458 if (ifp->if_flags & IFF_POLLING) /* already polling */
462 if (poll_handlers >= POLL_LIST_LEN) {
464 * List full, cannot register more entries.
465 * This should never happen; if it does, it is probably a
466 * broken driver trying to register multiple times. Checking
467 * this at runtime is expensive, and won't solve the problem
468 * anyways, so just report a few times and then give up.
470 static int verbose = 10 ;
472 log(LOG_ERR, "poll handlers list full, "
473 "maybe a broken driver ?\n");
476 mtx_unlock(&poll_mtx);
477 return 0; /* no polling for you */
480 for (i = 0 ; i < poll_handlers ; i++)
481 if (pr[i].ifp == ifp && pr[i].handler != NULL) {
482 mtx_unlock(&poll_mtx);
483 log(LOG_DEBUG, "ether_poll_register: %s: handler"
484 " already registered\n", ifp->if_xname);
488 pr[poll_handlers].handler = h;
489 pr[poll_handlers].ifp = ifp;
491 ifp->if_flags |= IFF_POLLING;
492 mtx_unlock(&poll_mtx);
493 if (idlepoll_sleeping)
494 wakeup(&idlepoll_sleeping);
495 return 1; /* polling enabled in next call */
499 * Remove interface from the polling list. Normally called by *_stop().
500 * It is not an error to call it with IFF_POLLING clear, the call is
501 * sufficiently rare to be preferable to save the space for the extra
502 * test in each driver in exchange of one additional function call.
505 ether_poll_deregister(struct ifnet *ifp)
511 if ( !ifp || !(ifp->if_flags & IFF_POLLING) ) {
515 for (i = 0 ; i < poll_handlers ; i++)
516 if (pr[i].ifp == ifp) /* found it */
518 ifp->if_flags &= ~IFF_POLLING; /* found or not... */
519 if (i == poll_handlers) {
520 mtx_unlock(&poll_mtx);
521 log(LOG_DEBUG, "ether_poll_deregister: %s: not found!\n",
526 if (i < poll_handlers) { /* Last entry replaces this one. */
527 pr[i].handler = pr[poll_handlers].handler;
528 pr[i].ifp = pr[poll_handlers].ifp;
530 mtx_unlock(&poll_mtx);
537 struct thread *td = curthread;
541 rtp.prio = RTP_PRIO_MAX; /* lowest priority */
542 rtp.type = RTP_PRIO_IDLE;
543 mtx_lock_spin(&sched_lock);
544 rtp_to_pri(&rtp, td->td_ksegrp);
545 pri = td->td_priority;
546 mtx_unlock_spin(&sched_lock);
549 if (poll_in_idle_loop && poll_handlers > 0) {
550 idlepoll_sleeping = 0;
551 ether_poll(poll_each_burst);
552 mtx_lock_spin(&sched_lock);
553 mi_switch(SW_VOL, NULL);
554 mtx_unlock_spin(&sched_lock);
556 idlepoll_sleeping = 1;
557 tsleep(&idlepoll_sleeping, pri, "pollid", hz * 3);
562 static struct proc *idlepoll;
563 static struct kproc_desc idlepoll_kp = {
568 SYSINIT(idlepoll, SI_SUB_KTHREAD_VM, SI_ORDER_ANY, kproc_start, &idlepoll_kp)