2 * Copyright (c) 2016-2018 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 #include "opt_inet6.h"
32 #include "opt_tcpdebug.h"
35 * Some notes about usage.
37 * The tcp_hpts system is designed to provide a high precision timer
38 * system for tcp. Its main purpose is to provide a mechanism for
39 * pacing packets out onto the wire. It can be used in two ways
40 * by a given TCP stack (and those two methods can be used simultaneously).
42 * First, and probably the main thing its used by Rack and BBR, it can
43 * be used to call tcp_output() of a transport stack at some time in the future.
44 * The normal way this is done is that tcp_output() of the stack schedules
45 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
46 * slot is the time from now that the stack wants to be called but it
47 * must be converted to tcp_hpts's notion of slot. This is done with
48 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
49 * call from the tcp_output() routine might look like:
51 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
53 * The above would schedule tcp_ouput() to be called in 550 useconds.
54 * Note that if using this mechanism the stack will want to add near
55 * its top a check to prevent unwanted calls (from user land or the
56 * arrival of incoming ack's). So it would add something like:
58 * if (tcp_in_hpts(inp))
61 * to prevent output processing until the time alotted has gone by.
62 * Of course this is a bare bones example and the stack will probably
63 * have more consideration then just the above.
65 * In order to run input queued segments from the HPTS context the
66 * tcp stack must define an input function for
67 * tfb_do_queued_segments(). This function understands
68 * how to dequeue a array of packets that were input and
69 * knows how to call the correct processing routine.
71 * Locking in this is important as well so most likely the
72 * stack will need to define the tfb_do_segment_nounlock()
73 * splitting tfb_do_segment() into two parts. The main processing
74 * part that does not unlock the INP and returns a value of 1 or 0.
75 * It returns 0 if all is well and the lock was not released. It
76 * returns 1 if we had to destroy the TCB (a reset received etc).
77 * The remains of tfb_do_segment() then become just a simple call
78 * to the tfb_do_segment_nounlock() function and check the return
79 * code and possibly unlock.
81 * The stack must also set the flag on the INP that it supports this
82 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
83 * this flag as well and will queue packets when it is set.
84 * There are other flags as well INP_MBUF_QUEUE_READY and
85 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
86 * that we are in the pacer for output so there is no
87 * need to wake up the hpts system to get immediate
88 * input. The second tells the LRO code that its okay
89 * if a SACK arrives you can still defer input and let
90 * the current hpts timer run (this is usually set when
91 * a rack timer is up so we know SACK's are happening
92 * on the connection already and don't want to wakeup yet).
94 * There is a common functions within the rack_bbr_common code
95 * version i.e. ctf_do_queued_segments(). This function
96 * knows how to take the input queue of packets from
97 * tp->t_in_pkts and process them digging out
98 * all the arguments, calling any bpf tap and
99 * calling into tfb_do_segment_nounlock(). The common
100 * function (ctf_do_queued_segments()) requires that
101 * you have defined the tfb_do_segment_nounlock() as
105 #include <sys/param.h>
107 #include <sys/interrupt.h>
108 #include <sys/module.h>
109 #include <sys/kernel.h>
110 #include <sys/hhook.h>
111 #include <sys/malloc.h>
112 #include <sys/mbuf.h>
113 #include <sys/proc.h> /* for proc0 declaration */
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/sysctl.h>
117 #include <sys/systm.h>
118 #include <sys/refcount.h>
119 #include <sys/sched.h>
120 #include <sys/queue.h>
122 #include <sys/counter.h>
123 #include <sys/time.h>
124 #include <sys/kthread.h>
125 #include <sys/kern_prefetch.h>
130 #include <net/route.h>
131 #include <net/vnet.h>
134 #include <net/netisr.h>
135 #include <net/rss_config.h>
138 #define TCPSTATES /* for logging */
140 #include <netinet/in.h>
141 #include <netinet/in_kdtrace.h>
142 #include <netinet/in_pcb.h>
143 #include <netinet/ip.h>
144 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
145 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
146 #include <netinet/ip_var.h>
147 #include <netinet/ip6.h>
148 #include <netinet6/in6_pcb.h>
149 #include <netinet6/ip6_var.h>
150 #include <netinet/tcp.h>
151 #include <netinet/tcp_fsm.h>
152 #include <netinet/tcp_seq.h>
153 #include <netinet/tcp_timer.h>
154 #include <netinet/tcp_var.h>
155 #include <netinet/tcpip.h>
156 #include <netinet/cc/cc.h>
157 #include <netinet/tcp_hpts.h>
158 #include <netinet/tcp_log_buf.h>
161 #include <netinet/tcp_debug.h>
162 #endif /* tcpdebug */
164 #include <netinet/tcp_offload.h>
168 * The hpts uses a 102400 wheel. The wheel
169 * defines the time in 10 usec increments (102400 x 10).
170 * This gives a range of 10usec - 1024ms to place
171 * an entry within. If the user requests more than
172 * 1.024 second, a remaineder is attached and the hpts
173 * when seeing the remainder will re-insert the
174 * inpcb forward in time from where it is until
175 * the remainder is zero.
178 #define NUM_OF_HPTSI_SLOTS 102400
180 /* Each hpts has its own p_mtx which is used for locking */
181 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
182 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
183 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
184 struct tcp_hpts_entry {
185 /* Cache line 0x00 */
186 struct mtx p_mtx; /* Mutex for hpts */
187 struct timeval p_mysleep; /* Our min sleep time */
188 uint64_t syscall_cnt;
189 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
190 uint16_t p_hpts_active; /* Flag that says hpts is awake */
191 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
192 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
193 uint32_t p_runningslot; /* Current tick we are at if we are running */
194 uint32_t p_prev_slot; /* Previous slot we were on */
195 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
196 uint32_t p_nxt_slot; /* The next slot outside the current range of
197 * slots that the hpts is running on. */
198 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
199 uint32_t p_lasttick; /* Last tick before the current one */
200 uint8_t p_direct_wake :1, /* boolean */
201 p_on_min_sleep:1, /* boolean */
202 p_hpts_wake_scheduled:1, /* boolean */
204 uint8_t p_fill[3]; /* Fill to 32 bits */
205 /* Cache line 0x40 */
207 TAILQ_HEAD(, inpcb) head;
210 } *p_hptss; /* Hptsi wheel */
211 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
213 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
214 uint32_t saved_lasttick; /* for logging */
215 uint32_t saved_curtick; /* for logging */
216 uint32_t saved_curslot; /* for logging */
217 uint32_t saved_prev_slot; /* for logging */
218 uint32_t p_delayed_by; /* How much were we delayed by */
219 /* Cache line 0x80 */
220 struct sysctl_ctx_list hpts_ctx;
221 struct sysctl_oid *hpts_root;
222 struct intr_event *ie;
224 uint16_t p_num; /* The hpts number one per cpu */
225 uint16_t p_cpu; /* The hpts CPU */
226 /* There is extra space in here */
227 /* Cache line 0x100 */
228 struct callout co __aligned(CACHE_LINE_SIZE);
229 } __aligned(CACHE_LINE_SIZE);
231 static struct tcp_hptsi {
232 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
233 uint32_t *cts_last_ran;
234 uint32_t rp_num_hptss; /* Number of hpts threads */
237 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
239 static int tcp_bind_threads = 1;
241 static int tcp_bind_threads = 2;
243 static int tcp_use_irq_cpu = 0;
244 static uint32_t *cts_last_ran;
245 static int hpts_does_tp_logging = 0;
246 static int hpts_use_assigned_cpu = 1;
247 static int32_t hpts_uses_oldest = OLDEST_THRESHOLD;
249 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
250 static void tcp_hpts_thread(void *ctx);
251 static void tcp_init_hptsi(void *st);
253 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
254 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
255 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
256 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
260 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
261 "TCP Hpts controls");
262 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
263 "TCP Hpts statistics");
265 #define timersub(tvp, uvp, vvp) \
267 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
268 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
269 if ((vvp)->tv_usec < 0) { \
271 (vvp)->tv_usec += 1000000; \
275 static int32_t tcp_hpts_precision = 120;
277 static struct hpts_domain_info {
280 } hpts_domains[MAXMEMDOM];
288 counter_u64_t hpts_hopelessly_behind;
290 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
291 &hpts_hopelessly_behind,
292 "Number of times hpts could not catch up and was behind hopelessly");
294 counter_u64_t hpts_loops;
296 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
297 &hpts_loops, "Number of times hpts had to loop to catch up");
299 counter_u64_t back_tosleep;
301 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
302 &back_tosleep, "Number of times hpts found no tcbs");
304 counter_u64_t combined_wheel_wrap;
306 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
307 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
309 counter_u64_t wheel_wrap;
311 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
312 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
314 counter_u64_t hpts_direct_call;
315 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
316 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
318 counter_u64_t hpts_wake_timeout;
320 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
321 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
323 counter_u64_t hpts_direct_awakening;
325 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
326 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
328 counter_u64_t hpts_back_tosleep;
330 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
331 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
333 counter_u64_t cpu_uses_flowid;
334 counter_u64_t cpu_uses_random;
336 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
337 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
338 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
339 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
341 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
342 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
343 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
344 &tcp_bind_threads, 2,
345 "Thread Binding tunable");
346 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
348 "Use of irq CPU tunable");
349 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
350 &tcp_hpts_precision, 120,
351 "Value for PRE() precision of callout");
352 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
354 "How many connections (below) make us use the callout based mechanism");
355 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
356 &hpts_does_tp_logging, 0,
357 "Do we add to any tp that has logging on pacer logs");
358 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_assigned_cpu, CTLFLAG_RW,
359 &hpts_use_assigned_cpu, 0,
360 "Do we start any hpts timer on the assigned cpu?");
361 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_oldest, CTLFLAG_RW,
362 &hpts_uses_oldest, OLDEST_THRESHOLD,
363 "Do syscalls look for the hpts that has been the longest since running (or just use cpu no if 0)?");
364 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
365 &dynamic_min_sleep, 250,
366 "What is the dynamic minsleep value?");
367 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
368 &dynamic_max_sleep, 5000,
369 "What is the dynamic maxsleep value?");
375 static int32_t max_pacer_loops = 10;
376 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
377 &max_pacer_loops, 10,
378 "What is the maximum number of times the pacer will loop trying to catch up");
380 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
382 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
385 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
390 new = hpts_sleep_max;
391 error = sysctl_handle_int(oidp, &new, 0, req);
392 if (error == 0 && req->newptr) {
393 if ((new < dynamic_min_sleep) ||
394 (new > HPTS_MAX_SLEEP_ALLOWED))
397 hpts_sleep_max = new;
403 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
408 new = tcp_min_hptsi_time;
409 error = sysctl_handle_int(oidp, &new, 0, req);
410 if (error == 0 && req->newptr) {
411 if (new < LOWEST_SLEEP_ALLOWED)
414 tcp_min_hptsi_time = new;
419 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
420 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
422 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
423 "Maximum time hpts will sleep");
425 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
426 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
427 &tcp_min_hptsi_time, 0,
428 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
429 "The minimum time the hpts must sleep before processing more slots");
431 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
432 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
433 static int tcp_hpts_no_wake_over_thresh = 1;
435 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
436 &ticks_indicate_more_sleep, 0,
437 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
438 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
439 &ticks_indicate_less_sleep, 0,
440 "If we process this many or more on a timeout, we need less sleep on the next callout");
441 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
442 &tcp_hpts_no_wake_over_thresh, 0,
443 "When we are over the threshold on the pacer do we prohibit wakeups?");
446 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
447 int slots_to_run, int idx, int from_callout)
449 union tcp_log_stackspecific log;
452 * 64 bit - delRate, rttProp, bw_inuse
454 * 8 bit - bbr_state, bbr_substate, inhpts;
456 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
457 log.u_bbr.flex1 = hpts->p_nxt_slot;
458 log.u_bbr.flex2 = hpts->p_cur_slot;
459 log.u_bbr.flex3 = hpts->p_prev_slot;
460 log.u_bbr.flex4 = idx;
461 log.u_bbr.flex5 = hpts->p_curtick;
462 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
463 log.u_bbr.flex7 = hpts->p_cpu;
464 log.u_bbr.flex8 = (uint8_t)from_callout;
465 log.u_bbr.inflight = slots_to_run;
466 log.u_bbr.applimited = hpts->overidden_sleep;
467 log.u_bbr.delivered = hpts->saved_curtick;
468 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
469 log.u_bbr.epoch = hpts->saved_curslot;
470 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
471 log.u_bbr.pkts_out = hpts->p_delayed_by;
472 log.u_bbr.lost = hpts->p_hpts_sleep_time;
473 log.u_bbr.pacing_gain = hpts->p_cpu;
474 log.u_bbr.pkt_epoch = hpts->p_runningslot;
475 log.u_bbr.use_lt_bw = 1;
476 TCP_LOG_EVENTP(tp, NULL,
477 &tp->t_inpcb->inp_socket->so_rcv,
478 &tp->t_inpcb->inp_socket->so_snd,
484 tcp_wakehpts(struct tcp_hpts_entry *hpts)
486 HPTS_MTX_ASSERT(hpts);
488 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
489 hpts->p_direct_wake = 0;
492 if (hpts->p_hpts_wake_scheduled == 0) {
493 hpts->p_hpts_wake_scheduled = 1;
494 swi_sched(hpts->ie_cookie, 0);
499 hpts_timeout_swi(void *arg)
501 struct tcp_hpts_entry *hpts;
503 hpts = (struct tcp_hpts_entry *)arg;
504 swi_sched(hpts->ie_cookie, 0);
508 inp_hpts_insert(struct inpcb *inp, struct tcp_hpts_entry *hpts)
512 INP_WLOCK_ASSERT(inp);
513 HPTS_MTX_ASSERT(hpts);
514 MPASS(hpts->p_cpu == inp->inp_hpts_cpu);
515 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
517 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
519 if (inp->inp_in_hpts == IHPTS_NONE) {
520 inp->inp_in_hpts = IHPTS_ONQUEUE;
522 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
523 inp->inp_in_hpts = IHPTS_ONQUEUE;
525 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
526 inp->inp_hpts_gencnt = hptsh->gencnt;
528 TAILQ_INSERT_TAIL(&hptsh->head, inp, inp_hpts);
530 hpts->p_on_queue_cnt++;
533 static struct tcp_hpts_entry *
534 tcp_hpts_lock(struct inpcb *inp)
536 struct tcp_hpts_entry *hpts;
538 INP_LOCK_ASSERT(inp);
540 hpts = tcp_pace.rp_ent[inp->inp_hpts_cpu];
547 inp_hpts_release(struct inpcb *inp)
549 bool released __diagused;
551 inp->inp_in_hpts = IHPTS_NONE;
552 released = in_pcbrele_wlocked(inp);
553 MPASS(released == false);
557 * Called normally with the INP_LOCKED but it
558 * does not matter, the hpts lock is the key
559 * but the lock order allows us to hold the
560 * INP lock and then get the hpts lock.
563 tcp_hpts_remove(struct inpcb *inp)
565 struct tcp_hpts_entry *hpts;
568 INP_WLOCK_ASSERT(inp);
570 hpts = tcp_hpts_lock(inp);
571 if (inp->inp_in_hpts == IHPTS_ONQUEUE) {
572 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
573 inp->inp_hpts_request = 0;
574 if (__predict_true(inp->inp_hpts_gencnt == hptsh->gencnt)) {
575 TAILQ_REMOVE(&hptsh->head, inp, inp_hpts);
576 MPASS(hptsh->count > 0);
578 MPASS(hpts->p_on_queue_cnt > 0);
579 hpts->p_on_queue_cnt--;
580 inp_hpts_release(inp);
583 * tcp_hptsi() now owns the TAILQ head of this inp.
584 * Can't TAILQ_REMOVE, just mark it.
589 TAILQ_FOREACH(tmp, &hptsh->head, inp_hpts)
592 inp->inp_in_hpts = IHPTS_MOVING;
593 inp->inp_hptsslot = -1;
595 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
597 * Handle a special race condition:
598 * tcp_hptsi() moves inpcb to detached tailq
599 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
600 * tcp_hpts_insert() sets slot to a meaningful value
601 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
602 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
604 inp->inp_hptsslot = -1;
610 tcp_in_hpts(struct inpcb *inp)
613 return (inp->inp_in_hpts == IHPTS_ONQUEUE);
617 hpts_slot(uint32_t wheel_slot, uint32_t plus)
620 * Given a slot on the wheel, what slot
621 * is that plus ticks out?
623 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
624 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
628 tick_to_wheel(uint32_t cts_in_wticks)
631 * Given a timestamp in ticks (so by
632 * default to get it to a real time one
633 * would multiply by 10.. i.e the number
634 * of ticks in a slot) map it to our limited
637 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
641 hpts_slots_diff(int prev_slot, int slot_now)
644 * Given two slots that are someplace
645 * on our wheel. How far are they apart?
647 if (slot_now > prev_slot)
648 return (slot_now - prev_slot);
649 else if (slot_now == prev_slot)
651 * Special case, same means we can go all of our
652 * wheel less one slot.
654 return (NUM_OF_HPTSI_SLOTS - 1);
656 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
660 * Given a slot on the wheel that is the current time
661 * mapped to the wheel (wheel_slot), what is the maximum
662 * distance forward that can be obtained without
663 * wrapping past either prev_slot or running_slot
664 * depending on the htps state? Also if passed
665 * a uint32_t *, fill it with the slot location.
667 * Note if you do not give this function the current
668 * time (that you think it is) mapped to the wheel slot
669 * then the results will not be what you expect and
670 * could lead to invalid inserts.
672 static inline int32_t
673 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
675 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
677 if ((hpts->p_hpts_active == 1) &&
678 (hpts->p_wheel_complete == 0)) {
679 end_slot = hpts->p_runningslot;
680 /* Back up one tick */
682 end_slot = NUM_OF_HPTSI_SLOTS - 1;
686 *target_slot = end_slot;
689 * For the case where we are
690 * not active, or we have
691 * completed the pass over
692 * the wheel, we can use the
693 * prev tick and subtract one from it. This puts us
694 * as far out as possible on the wheel.
696 end_slot = hpts->p_prev_slot;
698 end_slot = NUM_OF_HPTSI_SLOTS - 1;
702 *target_slot = end_slot;
704 * Now we have close to the full wheel left minus the
705 * time it has been since the pacer went to sleep. Note
706 * that wheel_tick, passed in, should be the current time
707 * from the perspective of the caller, mapped to the wheel.
709 if (hpts->p_prev_slot != wheel_slot)
710 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
714 * dis_to_travel in this case is the space from when the
715 * pacer stopped (p_prev_slot) and where our wheel_slot
716 * is now. To know how many slots we can put it in we
717 * subtract from the wheel size. We would not want
718 * to place something after p_prev_slot or it will
721 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
724 * So how many slots are open between p_runningslot -> p_cur_slot
725 * that is what is currently un-available for insertion. Special
726 * case when we are at the last slot, this gets 1, so that
727 * the answer to how many slots are available is all but 1.
729 if (hpts->p_runningslot == hpts->p_cur_slot)
732 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
734 * How long has the pacer been running?
736 if (hpts->p_cur_slot != wheel_slot) {
737 /* The pacer is a bit late */
738 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
740 /* The pacer is right on time, now == pacers start time */
744 * To get the number left we can insert into we simply
745 * subract the distance the pacer has to run from how
746 * many slots there are.
748 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
750 * Now how many of those we will eat due to the pacer's
751 * time (p_cur_slot) of start being behind the
752 * real time (wheel_slot)?
754 if (avail_on_wheel <= pacer_to_now) {
756 * Wheel wrap, we can't fit on the wheel, that
757 * is unusual the system must be way overloaded!
758 * Insert into the assured slot, and return special
761 counter_u64_add(combined_wheel_wrap, 1);
762 *target_slot = hpts->p_nxt_slot;
766 * We know how many slots are open
767 * on the wheel (the reverse of what
768 * is left to run. Take away the time
769 * the pacer started to now (wheel_slot)
770 * and that tells you how many slots are
771 * open that can be inserted into that won't
772 * be touched by the pacer until later.
774 return (avail_on_wheel - pacer_to_now);
781 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
784 * Sanity checks for the pacer with invariants
787 KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
788 ("hpts:%p inp:%p slot:%d > max",
789 hpts, inp, inp_hptsslot));
790 if ((hpts->p_hpts_active) &&
791 (hpts->p_wheel_complete == 0)) {
793 * If the pacer is processing a arc
794 * of the wheel, we need to make
795 * sure we are not inserting within
798 int distance, yet_to_run;
800 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
801 if (hpts->p_runningslot != hpts->p_cur_slot)
802 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
804 yet_to_run = 0; /* processing last slot */
805 KASSERT(yet_to_run <= distance,
806 ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
807 hpts, inp, inp_hptsslot,
808 distance, yet_to_run,
809 hpts->p_runningslot, hpts->p_cur_slot));
815 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
817 struct tcp_hpts_entry *hpts;
819 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
820 int32_t wheel_slot, maxslots;
822 bool need_wakeup = false;
824 INP_WLOCK_ASSERT(inp);
825 MPASS(!tcp_in_hpts(inp));
826 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
829 * We now return the next-slot the hpts will be on, beyond its
830 * current run (if up) or where it was when it stopped if it is
833 hpts = tcp_hpts_lock(inp);
836 memset(diag, 0, sizeof(struct hpts_diag));
837 diag->p_hpts_active = hpts->p_hpts_active;
838 diag->p_prev_slot = hpts->p_prev_slot;
839 diag->p_runningslot = hpts->p_runningslot;
840 diag->p_nxt_slot = hpts->p_nxt_slot;
841 diag->p_cur_slot = hpts->p_cur_slot;
842 diag->p_curtick = hpts->p_curtick;
843 diag->p_lasttick = hpts->p_lasttick;
844 diag->slot_req = slot;
845 diag->p_on_min_sleep = hpts->p_on_min_sleep;
846 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
849 /* Ok we need to set it on the hpts in the current slot */
850 inp->inp_hpts_request = 0;
851 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
853 * A sleeping hpts we want in next slot to run
854 * note that in this state p_prev_slot == p_cur_slot
856 inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
857 if ((hpts->p_on_min_sleep == 0) &&
858 (hpts->p_hpts_active == 0))
861 inp->inp_hptsslot = hpts->p_runningslot;
862 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
863 inp_hpts_insert(inp, hpts);
866 * Activate the hpts if it is sleeping and its
869 hpts->p_direct_wake = 1;
872 slot_on = hpts->p_nxt_slot;
877 /* Get the current time relative to the wheel */
878 wheel_cts = tcp_tv_to_hptstick(&tv);
879 /* Map it onto the wheel */
880 wheel_slot = tick_to_wheel(wheel_cts);
881 /* Now what's the max we can place it at? */
882 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
884 diag->wheel_slot = wheel_slot;
885 diag->maxslots = maxslots;
886 diag->wheel_cts = wheel_cts;
889 /* The pacer is in a wheel wrap behind, yikes! */
892 * Reduce by 1 to prevent a forever loop in
893 * case something else is wrong. Note this
894 * probably does not hurt because the pacer
895 * if its true is so far behind we will be
896 * > 1second late calling anyway.
900 inp->inp_hptsslot = last_slot;
901 inp->inp_hpts_request = slot;
902 } else if (maxslots >= slot) {
903 /* It all fits on the wheel */
904 inp->inp_hpts_request = 0;
905 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
907 /* It does not fit */
908 inp->inp_hpts_request = slot - maxslots;
909 inp->inp_hptsslot = last_slot;
912 diag->slot_remaining = inp->inp_hpts_request;
913 diag->inp_hptsslot = inp->inp_hptsslot;
916 check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
918 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
919 inp_hpts_insert(inp, hpts);
920 if ((hpts->p_hpts_active == 0) &&
921 (inp->inp_hpts_request == 0) &&
922 (hpts->p_on_min_sleep == 0)) {
924 * The hpts is sleeping and NOT on a minimum
925 * sleep time, we need to figure out where
926 * it will wake up at and if we need to reschedule
929 uint32_t have_slept, yet_to_sleep;
931 /* Now do we need to restart the hpts's timer? */
932 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
933 if (have_slept < hpts->p_hpts_sleep_time)
934 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
936 /* We are over-due */
941 diag->have_slept = have_slept;
942 diag->yet_to_sleep = yet_to_sleep;
945 (yet_to_sleep > slot)) {
947 * We need to reschedule the hpts's time-out.
949 hpts->p_hpts_sleep_time = slot;
950 need_new_to = slot * HPTS_TICKS_PER_SLOT;
954 * Now how far is the hpts sleeping to? if active is 1, its
955 * up and ticking we do nothing, otherwise we may need to
956 * reschedule its callout if need_new_to is set from above.
959 hpts->p_direct_wake = 1;
962 diag->need_new_to = 0;
963 diag->co_ret = 0xffff0000;
965 } else if (need_new_to) {
972 while (need_new_to > HPTS_USEC_IN_SEC) {
974 need_new_to -= HPTS_USEC_IN_SEC;
976 tv.tv_usec = need_new_to;
978 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
979 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
980 hpts_timeout_swi, hpts, cpu,
981 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
983 diag->need_new_to = need_new_to;
984 diag->co_ret = co_ret;
987 slot_on = hpts->p_nxt_slot;
994 hpts_random_cpu(struct inpcb *inp){
996 * No flow type set distribute the load randomly.
1002 * Shortcut if it is already set. XXXGL: does it happen?
1004 if (inp->inp_hpts_cpu_set) {
1005 return (inp->inp_hpts_cpu);
1007 /* Nothing set use a random number */
1009 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
1014 hpts_cpuid(struct inpcb *inp, int *failed)
1018 struct hpts_domain_info *di;
1022 if (inp->inp_hpts_cpu_set) {
1023 return (inp->inp_hpts_cpu);
1026 * If we are using the irq cpu set by LRO or
1027 * the driver then it overrides all other domains.
1029 if (tcp_use_irq_cpu) {
1030 if (inp->inp_irq_cpu_set == 0) {
1034 return(inp->inp_irq_cpu);
1036 /* If one is set the other must be the same */
1038 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1039 if (cpuid == NETISR_CPUID_NONE)
1040 return (hpts_random_cpu(inp));
1045 * We don't have a flowid -> cpuid mapping, so cheat and just map
1046 * unknown cpuids to curcpu. Not the best, but apparently better
1047 * than defaulting to swi 0.
1049 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1050 counter_u64_add(cpu_uses_random, 1);
1051 return (hpts_random_cpu(inp));
1054 * Hash to a thread based on the flowid. If we are using numa,
1055 * then restrict the hash to the numa domain where the inp lives.
1058 if (tcp_bind_threads == 2 && inp->inp_numa_domain != M_NODOM) {
1059 di = &hpts_domains[inp->inp_numa_domain];
1060 cpuid = di->cpu[inp->inp_flowid % di->count];
1063 cpuid = inp->inp_flowid % mp_ncpus;
1064 counter_u64_add(cpu_uses_flowid, 1);
1069 tcp_drop_in_pkts(struct tcpcb *tp)
1078 tp->t_in_pkt = NULL;
1088 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1090 uint32_t t = 0, i, fnd = 0;
1092 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1094 * Find next slot that is occupied and use that to
1095 * be the sleep time.
1097 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1098 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1102 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1104 KASSERT(fnd != 0, ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1105 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1107 /* No one on the wheel sleep for all but 400 slots or sleep max */
1108 hpts->p_hpts_sleep_time = hpts_sleep_max;
1113 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1118 uint64_t total_slots_processed = 0;
1119 int32_t slots_to_run, i, error;
1120 int32_t paced_cnt = 0;
1121 int32_t loop_cnt = 0;
1122 int32_t did_prefetch = 0;
1123 int32_t prefetch_ninp = 0;
1124 int32_t prefetch_tp = 0;
1125 int32_t wrap_loop_cnt = 0;
1126 int32_t slot_pos_of_endpoint = 0;
1127 int32_t orig_exit_slot;
1128 int8_t completed_measure = 0, seen_endpoint = 0;
1130 HPTS_MTX_ASSERT(hpts);
1132 /* record previous info for any logging */
1133 hpts->saved_lasttick = hpts->p_lasttick;
1134 hpts->saved_curtick = hpts->p_curtick;
1135 hpts->saved_curslot = hpts->p_cur_slot;
1136 hpts->saved_prev_slot = hpts->p_prev_slot;
1138 hpts->p_lasttick = hpts->p_curtick;
1139 hpts->p_curtick = tcp_gethptstick(&tv);
1140 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1141 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1142 if ((hpts->p_on_queue_cnt == 0) ||
1143 (hpts->p_lasttick == hpts->p_curtick)) {
1145 * No time has yet passed,
1148 hpts->p_prev_slot = hpts->p_cur_slot;
1149 hpts->p_lasttick = hpts->p_curtick;
1153 hpts->p_wheel_complete = 0;
1154 HPTS_MTX_ASSERT(hpts);
1155 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1156 if (((hpts->p_curtick - hpts->p_lasttick) >
1157 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1158 (hpts->p_on_queue_cnt != 0)) {
1160 * Wheel wrap is occuring, basically we
1161 * are behind and the distance between
1162 * run's has spread so much it has exceeded
1163 * the time on the wheel (1.024 seconds). This
1164 * is ugly and should NOT be happening. We
1165 * need to run the entire wheel. We last processed
1166 * p_prev_slot, so that needs to be the last slot
1167 * we run. The next slot after that should be our
1168 * reserved first slot for new, and then starts
1169 * the running position. Now the problem is the
1170 * reserved "not to yet" place does not exist
1171 * and there may be inp's in there that need
1172 * running. We can merge those into the
1173 * first slot at the head.
1176 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1177 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1179 * Adjust p_cur_slot to be where we are starting from
1180 * hopefully we will catch up (fat chance if something
1181 * is broken this bad :( )
1183 hpts->p_cur_slot = hpts->p_prev_slot;
1185 * The next slot has guys to run too, and that would
1186 * be where we would normally start, lets move them into
1187 * the next slot (p_prev_slot + 2) so that we will
1188 * run them, the extra 10usecs of late (by being
1189 * put behind) does not really matter in this situation.
1191 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1193 MPASS(inp->inp_hptsslot == hpts->p_nxt_slot);
1194 MPASS(inp->inp_hpts_gencnt ==
1195 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1196 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1199 * Update gencnt and nextslot accordingly to match
1200 * the new location. This is safe since it takes both
1201 * the INP lock and the pacer mutex to change the
1202 * inp_hptsslot and inp_hpts_gencnt.
1204 inp->inp_hpts_gencnt =
1205 hpts->p_hptss[hpts->p_runningslot].gencnt;
1206 inp->inp_hptsslot = hpts->p_runningslot;
1208 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1209 &hpts->p_hptss[hpts->p_nxt_slot].head, inp_hpts);
1210 hpts->p_hptss[hpts->p_runningslot].count +=
1211 hpts->p_hptss[hpts->p_nxt_slot].count;
1212 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1213 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1214 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1215 counter_u64_add(wheel_wrap, 1);
1218 * Nxt slot is always one after p_runningslot though
1219 * its not used usually unless we are doing wheel wrap.
1221 hpts->p_nxt_slot = hpts->p_prev_slot;
1222 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1224 if (hpts->p_on_queue_cnt == 0) {
1227 for (i = 0; i < slots_to_run; i++) {
1228 struct inpcb *inp, *ninp;
1229 TAILQ_HEAD(, inpcb) head = TAILQ_HEAD_INITIALIZER(head);
1230 struct hptsh *hptsh;
1231 uint32_t runningslot;
1234 * Calculate our delay, if there are no extra ticks there
1235 * was not any (i.e. if slots_to_run == 1, no delay).
1237 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1238 HPTS_TICKS_PER_SLOT;
1240 runningslot = hpts->p_runningslot;
1241 hptsh = &hpts->p_hptss[runningslot];
1242 TAILQ_SWAP(&head, &hptsh->head, inpcb, inp_hpts);
1243 hpts->p_on_queue_cnt -= hptsh->count;
1249 TAILQ_FOREACH_SAFE(inp, &head, inp_hpts, ninp) {
1253 /* We prefetch the next inp if possible */
1254 kern_prefetch(ninp, &prefetch_ninp);
1259 if (seen_endpoint == 0) {
1261 orig_exit_slot = slot_pos_of_endpoint =
1263 } else if (completed_measure == 0) {
1264 /* Record the new position */
1265 orig_exit_slot = runningslot;
1267 total_slots_processed++;
1271 if (inp->inp_hpts_cpu_set == 0) {
1277 if (__predict_false(inp->inp_in_hpts == IHPTS_MOVING)) {
1278 if (inp->inp_hptsslot == -1) {
1279 inp->inp_in_hpts = IHPTS_NONE;
1280 if (in_pcbrele_wlocked(inp) == false)
1284 inp_hpts_insert(inp, hpts);
1291 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1292 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
1293 KASSERT(runningslot == inp->inp_hptsslot,
1294 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1295 hpts, inp, runningslot, inp->inp_hptsslot));
1297 if (inp->inp_hpts_request) {
1299 * This guy is deferred out further in time
1300 * then our wheel had available on it.
1301 * Push him back on the wheel or run it
1304 uint32_t maxslots, last_slot, remaining_slots;
1306 remaining_slots = slots_to_run - (i + 1);
1307 if (inp->inp_hpts_request > remaining_slots) {
1310 * How far out can we go?
1312 maxslots = max_slots_available(hpts,
1313 hpts->p_cur_slot, &last_slot);
1314 if (maxslots >= inp->inp_hpts_request) {
1315 /* We can place it finally to
1317 inp->inp_hptsslot = hpts_slot(
1318 hpts->p_runningslot,
1319 inp->inp_hpts_request);
1320 inp->inp_hpts_request = 0;
1322 /* Work off some more time */
1323 inp->inp_hptsslot = last_slot;
1324 inp->inp_hpts_request -=
1327 inp_hpts_insert(inp, hpts);
1332 inp->inp_hpts_request = 0;
1333 /* Fall through we will so do it now */
1336 inp_hpts_release(inp);
1337 tp = intotcpcb(inp);
1341 * Setup so the next time we will move to
1342 * the right CPU. This should be a rare
1343 * event. It will sometimes happens when we
1344 * are the client side (usually not the
1345 * server). Somehow tcp_output() gets called
1346 * before the tcp_do_segment() sets the
1347 * intial state. This means the r_cpu and
1348 * r_hpts_cpu is 0. We get on the hpts, and
1349 * then tcp_input() gets called setting up
1350 * the r_cpu to the correct value. The hpts
1351 * goes off and sees the mis-match. We
1352 * simply correct it here and the CPU will
1353 * switch to the new hpts nextime the tcb
1354 * gets added to the the hpts (not this one)
1359 CURVNET_SET(inp->inp_vnet);
1360 /* Lets do any logging that we might want to */
1361 if (hpts_does_tp_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1362 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1365 if (tp->t_fb_ptr != NULL) {
1366 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1369 if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && tp->t_in_pkt) {
1370 error = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1372 /* The input killed the connection */
1376 inp->inp_hpts_calls = 1;
1377 error = tcp_output(tp);
1380 inp->inp_hpts_calls = 0;
1381 if (ninp && ninp->inp_ppcb) {
1383 * If we have a nxt inp, see if we can
1384 * prefetch its ppcb. Note this may seem
1385 * "risky" since we have no locks (other
1386 * than the previous inp) and there no
1387 * assurance that ninp was not pulled while
1388 * we were processing inp and freed. If this
1389 * occured it could mean that either:
1391 * a) Its NULL (which is fine we won't go
1392 * here) <or> b) Its valid (which is cool we
1393 * will prefetch it) <or> c) The inp got
1394 * freed back to the slab which was
1395 * reallocated. Then the piece of memory was
1396 * re-used and something else (not an
1397 * address) is in inp_ppcb. If that occurs
1398 * we don't crash, but take a TLB shootdown
1399 * performance hit (same as if it was NULL
1400 * and we tried to pre-fetch it).
1402 * Considering that the likelyhood of <c> is
1403 * quite rare we will take a risk on doing
1404 * this. If performance drops after testing
1405 * we can always take this out. NB: the
1406 * kern_prefetch on amd64 actually has
1407 * protection against a bad address now via
1408 * the DMAP_() tests. This will prevent the
1409 * TLB hit, and instead if <c> occurs just
1410 * cause us to load cache with a useless
1413 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1420 if (seen_endpoint) {
1422 * We now have a accurate distance between
1423 * slot_pos_of_endpoint <-> orig_exit_slot
1424 * to tell us how late we were, orig_exit_slot
1425 * is where we calculated the end of our cycle to
1426 * be when we first entered.
1428 completed_measure = 1;
1431 hpts->p_runningslot++;
1432 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1433 hpts->p_runningslot = 0;
1437 HPTS_MTX_ASSERT(hpts);
1438 hpts->p_delayed_by = 0;
1440 * Check to see if we took an excess amount of time and need to run
1441 * more ticks (if we did not hit eno-bufs).
1443 hpts->p_prev_slot = hpts->p_cur_slot;
1444 hpts->p_lasttick = hpts->p_curtick;
1445 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1447 * Something is serious slow we have
1448 * looped through processing the wheel
1449 * and by the time we cleared the
1450 * needs to run max_pacer_loops time
1451 * we still needed to run. That means
1452 * the system is hopelessly behind and
1453 * can never catch up :(
1455 * We will just lie to this thread
1456 * and let it thing p_curtick is
1457 * correct. When it next awakens
1458 * it will find itself further behind.
1461 counter_u64_add(hpts_hopelessly_behind, 1);
1464 hpts->p_curtick = tcp_gethptstick(&tv);
1465 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1466 if (seen_endpoint == 0) {
1467 /* We saw no endpoint but we may be looping */
1468 orig_exit_slot = hpts->p_cur_slot;
1470 if ((wrap_loop_cnt < 2) &&
1471 (hpts->p_lasttick != hpts->p_curtick)) {
1472 counter_u64_add(hpts_loops, 1);
1477 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1479 * Set flag to tell that we are done for
1480 * any slot input that happens during
1483 hpts->p_wheel_complete = 1;
1485 * Now did we spend too long running input and need to run more ticks?
1486 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1487 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1488 * is greater than 2, then the condtion most likely are *not* true.
1489 * Also if we are called not from the callout, we don't run the wheel
1490 * multiple times so the slots may not align either.
1492 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1493 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1494 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1495 hpts->p_prev_slot, hpts->p_cur_slot));
1496 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1497 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1498 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1499 hpts->p_lasttick, hpts->p_curtick));
1500 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1501 hpts->p_curtick = tcp_gethptstick(&tv);
1502 counter_u64_add(hpts_loops, 1);
1503 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1508 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1511 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1517 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1519 struct tcp_hpts_entry *hpts;
1522 INP_WLOCK_ASSERT(inp);
1523 hpts = tcp_hpts_lock(inp);
1524 if ((inp->inp_in_hpts == 0) &&
1525 (inp->inp_hpts_cpu_set == 0)) {
1526 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1528 inp->inp_hpts_cpu_set = 1;
1530 mtx_unlock(&hpts->p_mtx);
1534 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1538 if (hpts->p_hpts_active) {
1539 /* Already active */
1542 if (mtx_trylock(&hpts->p_mtx) == 0) {
1543 /* Someone else got the lock */
1546 if (hpts->p_hpts_active)
1548 hpts->syscall_cnt++;
1549 counter_u64_add(hpts_direct_call, 1);
1550 hpts->p_hpts_active = 1;
1551 ticks_ran = tcp_hptsi(hpts, 0);
1552 /* We may want to adjust the sleep values here */
1553 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1554 if (ticks_ran > ticks_indicate_less_sleep) {
1559 hpts->p_mysleep.tv_usec /= 2;
1560 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1561 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1562 /* Reschedule with new to value */
1563 tcp_hpts_set_max_sleep(hpts, 0);
1564 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1565 /* Validate its in the right ranges */
1566 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1567 hpts->overidden_sleep = tv.tv_usec;
1568 tv.tv_usec = hpts->p_mysleep.tv_usec;
1569 } else if (tv.tv_usec > dynamic_max_sleep) {
1570 /* Lets not let sleep get above this value */
1571 hpts->overidden_sleep = tv.tv_usec;
1572 tv.tv_usec = dynamic_max_sleep;
1575 * In this mode the timer is a backstop to
1576 * all the userret/lro_flushes so we use
1577 * the dynamic value and set the on_min_sleep
1578 * flag so we will not be awoken.
1581 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
1582 /* Store off to make visible the actual sleep time */
1583 hpts->sleeping = tv.tv_usec;
1584 callout_reset_sbt_on(&hpts->co, sb, 0,
1585 hpts_timeout_swi, hpts, cpu,
1586 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1587 } else if (ticks_ran < ticks_indicate_more_sleep) {
1588 /* For the further sleep, don't reschedule hpts */
1589 hpts->p_mysleep.tv_usec *= 2;
1590 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1591 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1593 hpts->p_on_min_sleep = 1;
1595 hpts->p_hpts_active = 0;
1597 HPTS_MTX_ASSERT(hpts);
1598 mtx_unlock(&hpts->p_mtx);
1601 static struct tcp_hpts_entry *
1602 tcp_choose_hpts_to_run()
1605 uint32_t cts, time_since_ran, calc;
1607 if ((hpts_uses_oldest == 0) ||
1608 ((hpts_uses_oldest > 1) &&
1609 (tcp_pace.rp_ent[(tcp_pace.rp_num_hptss-1)]->p_on_queue_cnt >= hpts_uses_oldest))) {
1611 * We have either disabled the feature (0), or
1612 * we have crossed over the oldest threshold on the
1613 * last hpts. We use the last one for simplification
1614 * since we don't want to use the first one (it may
1615 * have starting connections that have not settled
1618 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1620 /* Lets find the oldest hpts to attempt to run */
1621 cts = tcp_get_usecs(NULL);
1624 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1625 if (TSTMP_GT(cts, cts_last_ran[i]))
1626 calc = cts - cts_last_ran[i];
1629 if (calc > time_since_ran) {
1631 time_since_ran = calc;
1634 if (oldest_idx >= 0)
1635 return(tcp_pace.rp_ent[oldest_idx]);
1637 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1644 static struct tcp_hpts_entry *hpts;
1645 struct epoch_tracker et;
1647 NET_EPOCH_ENTER(et);
1648 hpts = tcp_choose_hpts_to_run();
1649 __tcp_run_hpts(hpts);
1655 tcp_hpts_thread(void *ctx)
1657 struct tcp_hpts_entry *hpts;
1658 struct epoch_tracker et;
1663 hpts = (struct tcp_hpts_entry *)ctx;
1664 mtx_lock(&hpts->p_mtx);
1665 if (hpts->p_direct_wake) {
1666 /* Signaled by input or output with low occupancy count. */
1667 callout_stop(&hpts->co);
1668 counter_u64_add(hpts_direct_awakening, 1);
1670 /* Timed out, the normal case. */
1671 counter_u64_add(hpts_wake_timeout, 1);
1672 if (callout_pending(&hpts->co) ||
1673 !callout_active(&hpts->co)) {
1674 mtx_unlock(&hpts->p_mtx);
1678 callout_deactivate(&hpts->co);
1679 hpts->p_hpts_wake_scheduled = 0;
1680 NET_EPOCH_ENTER(et);
1681 if (hpts->p_hpts_active) {
1683 * We are active already. This means that a syscall
1684 * trap or LRO is running in behalf of hpts. In that case
1685 * we need to double our timeout since there seems to be
1686 * enough activity in the system that we don't need to
1687 * run as often (if we were not directly woken).
1689 if (hpts->p_direct_wake == 0) {
1690 counter_u64_add(hpts_back_tosleep, 1);
1691 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1692 hpts->p_mysleep.tv_usec *= 2;
1693 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1694 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1695 tv.tv_usec = hpts->p_mysleep.tv_usec;
1696 hpts->p_on_min_sleep = 1;
1699 * Here we have low count on the wheel, but
1700 * somehow we still collided with one of the
1701 * connections. Lets go back to sleep for a
1702 * min sleep time, but clear the flag so we
1703 * can be awoken by insert.
1705 hpts->p_on_min_sleep = 0;
1706 tv.tv_usec = tcp_min_hptsi_time;
1710 * Directly woken most likely to reset the
1714 tv.tv_usec = hpts->p_mysleep.tv_usec;
1719 hpts->p_hpts_active = 1;
1720 ticks_ran = tcp_hptsi(hpts, 1);
1722 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1723 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1724 if(hpts->p_direct_wake == 0) {
1726 * Only adjust sleep time if we were
1727 * called from the callout i.e. direct_wake == 0.
1729 if (ticks_ran < ticks_indicate_more_sleep) {
1730 hpts->p_mysleep.tv_usec *= 2;
1731 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1732 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1733 } else if (ticks_ran > ticks_indicate_less_sleep) {
1734 hpts->p_mysleep.tv_usec /= 2;
1735 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1736 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1739 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1740 hpts->overidden_sleep = tv.tv_usec;
1741 tv.tv_usec = hpts->p_mysleep.tv_usec;
1742 } else if (tv.tv_usec > dynamic_max_sleep) {
1743 /* Lets not let sleep get above this value */
1744 hpts->overidden_sleep = tv.tv_usec;
1745 tv.tv_usec = dynamic_max_sleep;
1748 * In this mode the timer is a backstop to
1749 * all the userret/lro_flushes so we use
1750 * the dynamic value and set the on_min_sleep
1751 * flag so we will not be awoken.
1753 hpts->p_on_min_sleep = 1;
1754 } else if (hpts->p_on_queue_cnt == 0) {
1756 * No one on the wheel, please wake us up
1757 * if you insert on the wheel.
1759 hpts->p_on_min_sleep = 0;
1760 hpts->overidden_sleep = 0;
1763 * We hit here when we have a low number of
1764 * clients on the wheel (our else clause).
1765 * We may need to go on min sleep, if we set
1766 * the flag we will not be awoken if someone
1767 * is inserted ahead of us. Clearing the flag
1768 * means we can be awoken. This is "old mode"
1769 * where the timer is what runs hpts mainly.
1771 if (tv.tv_usec < tcp_min_hptsi_time) {
1773 * Yes on min sleep, which means
1774 * we cannot be awoken.
1776 hpts->overidden_sleep = tv.tv_usec;
1777 tv.tv_usec = tcp_min_hptsi_time;
1778 hpts->p_on_min_sleep = 1;
1780 /* Clear the min sleep flag */
1781 hpts->overidden_sleep = 0;
1782 hpts->p_on_min_sleep = 0;
1785 HPTS_MTX_ASSERT(hpts);
1786 hpts->p_hpts_active = 0;
1788 hpts->p_direct_wake = 0;
1790 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
1791 /* Store off to make visible the actual sleep time */
1792 hpts->sleeping = tv.tv_usec;
1793 callout_reset_sbt_on(&hpts->co, sb, 0,
1794 hpts_timeout_swi, hpts, cpu,
1795 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1797 mtx_unlock(&hpts->p_mtx);
1803 tcp_init_hptsi(void *st)
1805 int32_t i, j, error, bound = 0, created = 0;
1809 struct tcp_hpts_entry *hpts;
1813 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1814 int count, domain, cpu;
1816 tcp_pace.rp_num_hptss = ncpus;
1817 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1818 hpts_loops = counter_u64_alloc(M_WAITOK);
1819 back_tosleep = counter_u64_alloc(M_WAITOK);
1820 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1821 wheel_wrap = counter_u64_alloc(M_WAITOK);
1822 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1823 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1824 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1825 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1826 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1827 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1830 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1831 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1832 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1833 cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1834 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1835 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1836 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1837 M_TCPHPTS, M_WAITOK | M_ZERO);
1838 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1839 M_TCPHPTS, M_WAITOK);
1840 hpts = tcp_pace.rp_ent[i];
1842 * Init all the hpts structures that are not specifically
1843 * zero'd by the allocations. Also lets attach them to the
1844 * appropriate sysctl block as well.
1846 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1847 "hpts", MTX_DEF | MTX_DUPOK);
1848 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1849 TAILQ_INIT(&hpts->p_hptss[j].head);
1850 hpts->p_hptss[j].count = 0;
1851 hpts->p_hptss[j].gencnt = 0;
1853 sysctl_ctx_init(&hpts->hpts_ctx);
1854 sprintf(unit, "%d", i);
1855 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1856 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1859 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1861 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1862 SYSCTL_CHILDREN(hpts->hpts_root),
1863 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1864 &hpts->p_on_queue_cnt, 0,
1865 "Count TCB's awaiting output processing");
1866 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1867 SYSCTL_CHILDREN(hpts->hpts_root),
1868 OID_AUTO, "active", CTLFLAG_RD,
1869 &hpts->p_hpts_active, 0,
1870 "Is the hpts active");
1871 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1872 SYSCTL_CHILDREN(hpts->hpts_root),
1873 OID_AUTO, "curslot", CTLFLAG_RD,
1874 &hpts->p_cur_slot, 0,
1875 "What the current running pacers goal");
1876 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1877 SYSCTL_CHILDREN(hpts->hpts_root),
1878 OID_AUTO, "runtick", CTLFLAG_RD,
1879 &hpts->p_runningslot, 0,
1880 "What the running pacers current slot is");
1881 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1882 SYSCTL_CHILDREN(hpts->hpts_root),
1883 OID_AUTO, "curtick", CTLFLAG_RD,
1884 &hpts->p_curtick, 0,
1885 "What the running pacers last tick mapped to the wheel was");
1886 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1887 SYSCTL_CHILDREN(hpts->hpts_root),
1888 OID_AUTO, "lastran", CTLFLAG_RD,
1889 &cts_last_ran[i], 0,
1890 "The last usec tick that this hpts ran");
1891 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1892 SYSCTL_CHILDREN(hpts->hpts_root),
1893 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1894 &hpts->p_mysleep.tv_usec,
1895 "What the running pacers is using for p_mysleep.tv_usec");
1896 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1897 SYSCTL_CHILDREN(hpts->hpts_root),
1898 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1900 "What the running pacers is actually sleeping for");
1901 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1902 SYSCTL_CHILDREN(hpts->hpts_root),
1903 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1904 &hpts->syscall_cnt, 0,
1905 "How many times we had syscalls on this hpts");
1907 hpts->p_hpts_sleep_time = hpts_sleep_max;
1909 hpts->p_curtick = tcp_gethptstick(&tv);
1910 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1911 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1912 hpts->p_cpu = 0xffff;
1913 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1914 callout_init(&hpts->co, 1);
1917 /* Don't try to bind to NUMA domains if we don't have any */
1918 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1919 tcp_bind_threads = 0;
1922 * Now lets start ithreads to handle the hptss.
1924 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1925 hpts = tcp_pace.rp_ent[i];
1927 error = swi_add(&hpts->ie, "hpts",
1928 tcp_hpts_thread, (void *)hpts,
1929 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1931 ("Can't add hpts:%p i:%d err:%d",
1934 hpts->p_mysleep.tv_sec = 0;
1935 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1936 if (tcp_bind_threads == 1) {
1937 if (intr_event_bind(hpts->ie, i) == 0)
1939 } else if (tcp_bind_threads == 2) {
1941 domain = pc->pc_domain;
1942 CPU_COPY(&cpuset_domain[domain], &cs);
1943 if (intr_event_bind_ithread_cpuset(hpts->ie, &cs)
1946 count = hpts_domains[domain].count;
1947 hpts_domains[domain].cpu[count] = i;
1948 hpts_domains[domain].count++;
1952 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1953 hpts->sleeping = tv.tv_usec;
1955 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ? hpts->p_cpu : curcpu;
1956 callout_reset_sbt_on(&hpts->co, sb, 0,
1957 hpts_timeout_swi, hpts, cpu,
1958 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1961 * If we somehow have an empty domain, fall back to choosing
1962 * among all htps threads.
1964 for (i = 0; i < vm_ndomains; i++) {
1965 if (hpts_domains[i].count == 0) {
1966 tcp_bind_threads = 0;
1970 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
1972 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
1974 printf("HPTS is in INVARIANT mode!!\n");
1978 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1979 MODULE_VERSION(tcphpts, 1);