2 * Copyright (c) 2016-2018 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 #include "opt_inet6.h"
34 * Some notes about usage.
36 * The tcp_hpts system is designed to provide a high precision timer
37 * system for tcp. Its main purpose is to provide a mechanism for
38 * pacing packets out onto the wire. It can be used in two ways
39 * by a given TCP stack (and those two methods can be used simultaneously).
41 * First, and probably the main thing its used by Rack and BBR, it can
42 * be used to call tcp_output() of a transport stack at some time in the future.
43 * The normal way this is done is that tcp_output() of the stack schedules
44 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45 * slot is the time from now that the stack wants to be called but it
46 * must be converted to tcp_hpts's notion of slot. This is done with
47 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48 * call from the tcp_output() routine might look like:
50 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
52 * The above would schedule tcp_ouput() to be called in 550 useconds.
53 * Note that if using this mechanism the stack will want to add near
54 * its top a check to prevent unwanted calls (from user land or the
55 * arrival of incoming ack's). So it would add something like:
57 * if (tcp_in_hpts(inp))
60 * to prevent output processing until the time alotted has gone by.
61 * Of course this is a bare bones example and the stack will probably
62 * have more consideration then just the above.
64 * In order to run input queued segments from the HPTS context the
65 * tcp stack must define an input function for
66 * tfb_do_queued_segments(). This function understands
67 * how to dequeue a array of packets that were input and
68 * knows how to call the correct processing routine.
70 * Locking in this is important as well so most likely the
71 * stack will need to define the tfb_do_segment_nounlock()
72 * splitting tfb_do_segment() into two parts. The main processing
73 * part that does not unlock the INP and returns a value of 1 or 0.
74 * It returns 0 if all is well and the lock was not released. It
75 * returns 1 if we had to destroy the TCB (a reset received etc).
76 * The remains of tfb_do_segment() then become just a simple call
77 * to the tfb_do_segment_nounlock() function and check the return
78 * code and possibly unlock.
80 * The stack must also set the flag on the INP that it supports this
81 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
82 * this flag as well and will queue packets when it is set.
83 * There are other flags as well INP_MBUF_QUEUE_READY and
84 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
85 * that we are in the pacer for output so there is no
86 * need to wake up the hpts system to get immediate
87 * input. The second tells the LRO code that its okay
88 * if a SACK arrives you can still defer input and let
89 * the current hpts timer run (this is usually set when
90 * a rack timer is up so we know SACK's are happening
91 * on the connection already and don't want to wakeup yet).
93 * There is a common functions within the rack_bbr_common code
94 * version i.e. ctf_do_queued_segments(). This function
95 * knows how to take the input queue of packets from tp->t_inqueue
96 * and process them digging out all the arguments, calling any bpf tap and
97 * calling into tfb_do_segment_nounlock(). The common
98 * function (ctf_do_queued_segments()) requires that
99 * you have defined the tfb_do_segment_nounlock() as
103 #include <sys/param.h>
105 #include <sys/interrupt.h>
106 #include <sys/module.h>
107 #include <sys/kernel.h>
108 #include <sys/hhook.h>
109 #include <sys/malloc.h>
110 #include <sys/mbuf.h>
111 #include <sys/proc.h> /* for proc0 declaration */
112 #include <sys/socket.h>
113 #include <sys/socketvar.h>
114 #include <sys/sysctl.h>
115 #include <sys/systm.h>
116 #include <sys/refcount.h>
117 #include <sys/sched.h>
118 #include <sys/queue.h>
120 #include <sys/counter.h>
121 #include <sys/time.h>
122 #include <sys/kthread.h>
123 #include <sys/kern_prefetch.h>
128 #include <net/route.h>
129 #include <net/vnet.h>
132 #include <net/netisr.h>
133 #include <net/rss_config.h>
136 #define TCPSTATES /* for logging */
138 #include <netinet/in.h>
139 #include <netinet/in_kdtrace.h>
140 #include <netinet/in_pcb.h>
141 #include <netinet/ip.h>
142 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
143 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
144 #include <netinet/ip_var.h>
145 #include <netinet/ip6.h>
146 #include <netinet6/in6_pcb.h>
147 #include <netinet6/ip6_var.h>
148 #include <netinet/tcp.h>
149 #include <netinet/tcp_fsm.h>
150 #include <netinet/tcp_seq.h>
151 #include <netinet/tcp_timer.h>
152 #include <netinet/tcp_var.h>
153 #include <netinet/tcpip.h>
154 #include <netinet/cc/cc.h>
155 #include <netinet/tcp_hpts.h>
156 #include <netinet/tcp_log_buf.h>
159 #include <netinet/tcp_offload.h>
163 * The hpts uses a 102400 wheel. The wheel
164 * defines the time in 10 usec increments (102400 x 10).
165 * This gives a range of 10usec - 1024ms to place
166 * an entry within. If the user requests more than
167 * 1.024 second, a remaineder is attached and the hpts
168 * when seeing the remainder will re-insert the
169 * inpcb forward in time from where it is until
170 * the remainder is zero.
173 #define NUM_OF_HPTSI_SLOTS 102400
175 /* Each hpts has its own p_mtx which is used for locking */
176 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
177 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
178 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
179 struct tcp_hpts_entry {
180 /* Cache line 0x00 */
181 struct mtx p_mtx; /* Mutex for hpts */
182 struct timeval p_mysleep; /* Our min sleep time */
183 uint64_t syscall_cnt;
184 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
185 uint16_t p_hpts_active; /* Flag that says hpts is awake */
186 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
187 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
188 uint32_t p_runningslot; /* Current tick we are at if we are running */
189 uint32_t p_prev_slot; /* Previous slot we were on */
190 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
191 uint32_t p_nxt_slot; /* The next slot outside the current range of
192 * slots that the hpts is running on. */
193 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
194 uint32_t p_lasttick; /* Last tick before the current one */
195 uint8_t p_direct_wake :1, /* boolean */
196 p_on_min_sleep:1, /* boolean */
197 p_hpts_wake_scheduled:1, /* boolean */
199 uint8_t p_fill[3]; /* Fill to 32 bits */
200 /* Cache line 0x40 */
202 TAILQ_HEAD(, tcpcb) head;
205 } *p_hptss; /* Hptsi wheel */
206 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
208 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
209 uint32_t saved_lasttick; /* for logging */
210 uint32_t saved_curtick; /* for logging */
211 uint32_t saved_curslot; /* for logging */
212 uint32_t saved_prev_slot; /* for logging */
213 uint32_t p_delayed_by; /* How much were we delayed by */
214 /* Cache line 0x80 */
215 struct sysctl_ctx_list hpts_ctx;
216 struct sysctl_oid *hpts_root;
217 struct intr_event *ie;
219 uint16_t p_num; /* The hpts number one per cpu */
220 uint16_t p_cpu; /* The hpts CPU */
221 /* There is extra space in here */
222 /* Cache line 0x100 */
223 struct callout co __aligned(CACHE_LINE_SIZE);
224 } __aligned(CACHE_LINE_SIZE);
226 static struct tcp_hptsi {
227 struct cpu_group **grps;
228 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
229 uint32_t *cts_last_ran;
231 uint32_t rp_num_hptss; /* Number of hpts threads */
234 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
236 static int tcp_bind_threads = 1;
238 static int tcp_bind_threads = 2;
240 static int tcp_use_irq_cpu = 0;
241 static uint32_t *cts_last_ran;
242 static int hpts_does_tp_logging = 0;
244 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
245 static void tcp_hpts_thread(void *ctx);
246 static void tcp_init_hptsi(void *st);
248 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
249 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
250 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
251 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
254 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
255 "TCP Hpts controls");
256 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
257 "TCP Hpts statistics");
259 #define timersub(tvp, uvp, vvp) \
261 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
262 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
263 if ((vvp)->tv_usec < 0) { \
265 (vvp)->tv_usec += 1000000; \
269 static int32_t tcp_hpts_precision = 120;
271 static struct hpts_domain_info {
274 } hpts_domains[MAXMEMDOM];
276 counter_u64_t hpts_hopelessly_behind;
278 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
279 &hpts_hopelessly_behind,
280 "Number of times hpts could not catch up and was behind hopelessly");
282 counter_u64_t hpts_loops;
284 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
285 &hpts_loops, "Number of times hpts had to loop to catch up");
287 counter_u64_t back_tosleep;
289 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
290 &back_tosleep, "Number of times hpts found no tcbs");
292 counter_u64_t combined_wheel_wrap;
294 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
295 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
297 counter_u64_t wheel_wrap;
299 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
300 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
302 counter_u64_t hpts_direct_call;
303 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
304 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
306 counter_u64_t hpts_wake_timeout;
308 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
309 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
311 counter_u64_t hpts_direct_awakening;
313 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
314 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
316 counter_u64_t hpts_back_tosleep;
318 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
319 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
321 counter_u64_t cpu_uses_flowid;
322 counter_u64_t cpu_uses_random;
324 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
325 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
326 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
327 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
329 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
330 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
331 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
332 &tcp_bind_threads, 2,
333 "Thread Binding tunable");
334 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
336 "Use of irq CPU tunable");
337 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
338 &tcp_hpts_precision, 120,
339 "Value for PRE() precision of callout");
340 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
342 "How many connections (below) make us use the callout based mechanism");
343 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
344 &hpts_does_tp_logging, 0,
345 "Do we add to any tp that has logging on pacer logs");
346 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
347 &dynamic_min_sleep, 250,
348 "What is the dynamic minsleep value?");
349 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
350 &dynamic_max_sleep, 5000,
351 "What is the dynamic maxsleep value?");
353 static int32_t max_pacer_loops = 10;
354 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
355 &max_pacer_loops, 10,
356 "What is the maximum number of times the pacer will loop trying to catch up");
358 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
360 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
363 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
368 new = hpts_sleep_max;
369 error = sysctl_handle_int(oidp, &new, 0, req);
370 if (error == 0 && req->newptr) {
371 if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) ||
372 (new > HPTS_MAX_SLEEP_ALLOWED))
375 hpts_sleep_max = new;
381 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
386 new = tcp_min_hptsi_time;
387 error = sysctl_handle_int(oidp, &new, 0, req);
388 if (error == 0 && req->newptr) {
389 if (new < LOWEST_SLEEP_ALLOWED)
392 tcp_min_hptsi_time = new;
397 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
398 CTLTYPE_UINT | CTLFLAG_RW,
400 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
401 "Maximum time hpts will sleep in slots");
403 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
404 CTLTYPE_UINT | CTLFLAG_RW,
405 &tcp_min_hptsi_time, 0,
406 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
407 "The minimum time the hpts must sleep before processing more slots");
409 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
410 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
411 static int tcp_hpts_no_wake_over_thresh = 1;
413 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
414 &ticks_indicate_more_sleep, 0,
415 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
416 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
417 &ticks_indicate_less_sleep, 0,
418 "If we process this many or more on a timeout, we need less sleep on the next callout");
419 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
420 &tcp_hpts_no_wake_over_thresh, 0,
421 "When we are over the threshold on the pacer do we prohibit wakeups?");
424 hpts_random_cpu(void)
430 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
435 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
436 int slots_to_run, int idx, int from_callout)
438 union tcp_log_stackspecific log;
441 * 64 bit - delRate, rttProp, bw_inuse
443 * 8 bit - bbr_state, bbr_substate, inhpts;
445 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
446 log.u_bbr.flex1 = hpts->p_nxt_slot;
447 log.u_bbr.flex2 = hpts->p_cur_slot;
448 log.u_bbr.flex3 = hpts->p_prev_slot;
449 log.u_bbr.flex4 = idx;
450 log.u_bbr.flex5 = hpts->p_curtick;
451 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
452 log.u_bbr.flex7 = hpts->p_cpu;
453 log.u_bbr.flex8 = (uint8_t)from_callout;
454 log.u_bbr.inflight = slots_to_run;
455 log.u_bbr.applimited = hpts->overidden_sleep;
456 log.u_bbr.delivered = hpts->saved_curtick;
457 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
458 log.u_bbr.epoch = hpts->saved_curslot;
459 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
460 log.u_bbr.pkts_out = hpts->p_delayed_by;
461 log.u_bbr.lost = hpts->p_hpts_sleep_time;
462 log.u_bbr.pacing_gain = hpts->p_cpu;
463 log.u_bbr.pkt_epoch = hpts->p_runningslot;
464 log.u_bbr.use_lt_bw = 1;
465 TCP_LOG_EVENTP(tp, NULL,
466 &tptosocket(tp)->so_rcv,
467 &tptosocket(tp)->so_snd,
473 tcp_wakehpts(struct tcp_hpts_entry *hpts)
475 HPTS_MTX_ASSERT(hpts);
477 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
478 hpts->p_direct_wake = 0;
481 if (hpts->p_hpts_wake_scheduled == 0) {
482 hpts->p_hpts_wake_scheduled = 1;
483 swi_sched(hpts->ie_cookie, 0);
488 hpts_timeout_swi(void *arg)
490 struct tcp_hpts_entry *hpts;
492 hpts = (struct tcp_hpts_entry *)arg;
493 swi_sched(hpts->ie_cookie, 0);
497 tcp_hpts_insert_internal(struct tcpcb *tp, struct tcp_hpts_entry *hpts)
499 struct inpcb *inp = tptoinpcb(tp);
502 INP_WLOCK_ASSERT(inp);
503 HPTS_MTX_ASSERT(hpts);
504 MPASS(hpts->p_cpu == tp->t_hpts_cpu);
505 MPASS(!(inp->inp_flags & INP_DROPPED));
507 hptsh = &hpts->p_hptss[tp->t_hpts_slot];
509 if (tp->t_in_hpts == IHPTS_NONE) {
510 tp->t_in_hpts = IHPTS_ONQUEUE;
512 } else if (tp->t_in_hpts == IHPTS_MOVING) {
513 tp->t_in_hpts = IHPTS_ONQUEUE;
515 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
516 tp->t_hpts_gencnt = hptsh->gencnt;
518 TAILQ_INSERT_TAIL(&hptsh->head, tp, t_hpts);
520 hpts->p_on_queue_cnt++;
523 static struct tcp_hpts_entry *
524 tcp_hpts_lock(struct tcpcb *tp)
526 struct tcp_hpts_entry *hpts;
528 INP_LOCK_ASSERT(tptoinpcb(tp));
530 hpts = tcp_pace.rp_ent[tp->t_hpts_cpu];
537 tcp_hpts_release(struct tcpcb *tp)
539 bool released __diagused;
541 tp->t_in_hpts = IHPTS_NONE;
542 released = in_pcbrele_wlocked(tptoinpcb(tp));
543 MPASS(released == false);
547 * Initialize newborn tcpcb to get ready for use with HPTS.
550 tcp_hpts_init(struct tcpcb *tp)
553 tp->t_hpts_cpu = hpts_random_cpu();
554 tp->t_lro_cpu = HPTS_CPU_NONE;
555 MPASS(!(tp->t_flags2 & TF2_HPTS_CPU_SET));
559 * Called normally with the INP_LOCKED but it
560 * does not matter, the hpts lock is the key
561 * but the lock order allows us to hold the
562 * INP lock and then get the hpts lock.
565 tcp_hpts_remove(struct tcpcb *tp)
567 struct tcp_hpts_entry *hpts;
570 INP_WLOCK_ASSERT(tptoinpcb(tp));
572 hpts = tcp_hpts_lock(tp);
573 if (tp->t_in_hpts == IHPTS_ONQUEUE) {
574 hptsh = &hpts->p_hptss[tp->t_hpts_slot];
575 tp->t_hpts_request = 0;
576 if (__predict_true(tp->t_hpts_gencnt == hptsh->gencnt)) {
577 TAILQ_REMOVE(&hptsh->head, tp, t_hpts);
578 MPASS(hptsh->count > 0);
580 MPASS(hpts->p_on_queue_cnt > 0);
581 hpts->p_on_queue_cnt--;
582 tcp_hpts_release(tp);
585 * tcp_hptsi() now owns the TAILQ head of this inp.
586 * Can't TAILQ_REMOVE, just mark it.
591 TAILQ_FOREACH(tmp, &hptsh->head, t_hpts)
594 tp->t_in_hpts = IHPTS_MOVING;
595 tp->t_hpts_slot = -1;
597 } else if (tp->t_in_hpts == IHPTS_MOVING) {
599 * Handle a special race condition:
600 * tcp_hptsi() moves inpcb to detached tailq
601 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
602 * tcp_hpts_insert() sets slot to a meaningful value
603 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
604 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
606 tp->t_hpts_slot = -1;
612 hpts_slot(uint32_t wheel_slot, uint32_t plus)
615 * Given a slot on the wheel, what slot
616 * is that plus ticks out?
618 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
619 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
623 tick_to_wheel(uint32_t cts_in_wticks)
626 * Given a timestamp in ticks (so by
627 * default to get it to a real time one
628 * would multiply by 10.. i.e the number
629 * of ticks in a slot) map it to our limited
632 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
636 hpts_slots_diff(int prev_slot, int slot_now)
639 * Given two slots that are someplace
640 * on our wheel. How far are they apart?
642 if (slot_now > prev_slot)
643 return (slot_now - prev_slot);
644 else if (slot_now == prev_slot)
646 * Special case, same means we can go all of our
647 * wheel less one slot.
649 return (NUM_OF_HPTSI_SLOTS - 1);
651 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
655 * Given a slot on the wheel that is the current time
656 * mapped to the wheel (wheel_slot), what is the maximum
657 * distance forward that can be obtained without
658 * wrapping past either prev_slot or running_slot
659 * depending on the htps state? Also if passed
660 * a uint32_t *, fill it with the slot location.
662 * Note if you do not give this function the current
663 * time (that you think it is) mapped to the wheel slot
664 * then the results will not be what you expect and
665 * could lead to invalid inserts.
667 static inline int32_t
668 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
670 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
672 if ((hpts->p_hpts_active == 1) &&
673 (hpts->p_wheel_complete == 0)) {
674 end_slot = hpts->p_runningslot;
675 /* Back up one tick */
677 end_slot = NUM_OF_HPTSI_SLOTS - 1;
681 *target_slot = end_slot;
684 * For the case where we are
685 * not active, or we have
686 * completed the pass over
687 * the wheel, we can use the
688 * prev tick and subtract one from it. This puts us
689 * as far out as possible on the wheel.
691 end_slot = hpts->p_prev_slot;
693 end_slot = NUM_OF_HPTSI_SLOTS - 1;
697 *target_slot = end_slot;
699 * Now we have close to the full wheel left minus the
700 * time it has been since the pacer went to sleep. Note
701 * that wheel_tick, passed in, should be the current time
702 * from the perspective of the caller, mapped to the wheel.
704 if (hpts->p_prev_slot != wheel_slot)
705 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
709 * dis_to_travel in this case is the space from when the
710 * pacer stopped (p_prev_slot) and where our wheel_slot
711 * is now. To know how many slots we can put it in we
712 * subtract from the wheel size. We would not want
713 * to place something after p_prev_slot or it will
716 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
719 * So how many slots are open between p_runningslot -> p_cur_slot
720 * that is what is currently un-available for insertion. Special
721 * case when we are at the last slot, this gets 1, so that
722 * the answer to how many slots are available is all but 1.
724 if (hpts->p_runningslot == hpts->p_cur_slot)
727 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
729 * How long has the pacer been running?
731 if (hpts->p_cur_slot != wheel_slot) {
732 /* The pacer is a bit late */
733 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
735 /* The pacer is right on time, now == pacers start time */
739 * To get the number left we can insert into we simply
740 * subtract the distance the pacer has to run from how
741 * many slots there are.
743 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
745 * Now how many of those we will eat due to the pacer's
746 * time (p_cur_slot) of start being behind the
747 * real time (wheel_slot)?
749 if (avail_on_wheel <= pacer_to_now) {
751 * Wheel wrap, we can't fit on the wheel, that
752 * is unusual the system must be way overloaded!
753 * Insert into the assured slot, and return special
756 counter_u64_add(combined_wheel_wrap, 1);
757 *target_slot = hpts->p_nxt_slot;
761 * We know how many slots are open
762 * on the wheel (the reverse of what
763 * is left to run. Take away the time
764 * the pacer started to now (wheel_slot)
765 * and that tells you how many slots are
766 * open that can be inserted into that won't
767 * be touched by the pacer until later.
769 return (avail_on_wheel - pacer_to_now);
776 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
777 uint32_t hptsslot, int line)
780 * Sanity checks for the pacer with invariants
783 KASSERT(hptsslot < NUM_OF_HPTSI_SLOTS,
784 ("hpts:%p tp:%p slot:%d > max", hpts, tp, hptsslot));
785 if ((hpts->p_hpts_active) &&
786 (hpts->p_wheel_complete == 0)) {
788 * If the pacer is processing a arc
789 * of the wheel, we need to make
790 * sure we are not inserting within
793 int distance, yet_to_run;
795 distance = hpts_slots_diff(hpts->p_runningslot, hptsslot);
796 if (hpts->p_runningslot != hpts->p_cur_slot)
797 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
799 yet_to_run = 0; /* processing last slot */
800 KASSERT(yet_to_run <= distance, ("hpts:%p tp:%p slot:%d "
801 "distance:%d yet_to_run:%d rs:%d cs:%d", hpts, tp,
802 hptsslot, distance, yet_to_run, hpts->p_runningslot,
809 tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_diag *diag)
811 struct tcp_hpts_entry *hpts;
813 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
814 int32_t wheel_slot, maxslots;
815 bool need_wakeup = false;
817 INP_WLOCK_ASSERT(tptoinpcb(tp));
818 MPASS(!(tptoinpcb(tp)->inp_flags & INP_DROPPED));
819 MPASS(!tcp_in_hpts(tp));
822 * We now return the next-slot the hpts will be on, beyond its
823 * current run (if up) or where it was when it stopped if it is
826 hpts = tcp_hpts_lock(tp);
829 memset(diag, 0, sizeof(struct hpts_diag));
830 diag->p_hpts_active = hpts->p_hpts_active;
831 diag->p_prev_slot = hpts->p_prev_slot;
832 diag->p_runningslot = hpts->p_runningslot;
833 diag->p_nxt_slot = hpts->p_nxt_slot;
834 diag->p_cur_slot = hpts->p_cur_slot;
835 diag->p_curtick = hpts->p_curtick;
836 diag->p_lasttick = hpts->p_lasttick;
837 diag->slot_req = slot;
838 diag->p_on_min_sleep = hpts->p_on_min_sleep;
839 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
842 /* Ok we need to set it on the hpts in the current slot */
843 tp->t_hpts_request = 0;
844 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
846 * A sleeping hpts we want in next slot to run
847 * note that in this state p_prev_slot == p_cur_slot
849 tp->t_hpts_slot = hpts_slot(hpts->p_prev_slot, 1);
850 if ((hpts->p_on_min_sleep == 0) &&
851 (hpts->p_hpts_active == 0))
854 tp->t_hpts_slot = hpts->p_runningslot;
855 if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
856 tcp_hpts_insert_internal(tp, hpts);
859 * Activate the hpts if it is sleeping and its
862 hpts->p_direct_wake = 1;
865 slot_on = hpts->p_nxt_slot;
870 /* Get the current time relative to the wheel */
871 wheel_cts = tcp_tv_to_hptstick(&tv);
872 /* Map it onto the wheel */
873 wheel_slot = tick_to_wheel(wheel_cts);
874 /* Now what's the max we can place it at? */
875 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
877 diag->wheel_slot = wheel_slot;
878 diag->maxslots = maxslots;
879 diag->wheel_cts = wheel_cts;
882 /* The pacer is in a wheel wrap behind, yikes! */
885 * Reduce by 1 to prevent a forever loop in
886 * case something else is wrong. Note this
887 * probably does not hurt because the pacer
888 * if its true is so far behind we will be
889 * > 1second late calling anyway.
893 tp->t_hpts_slot = last_slot;
894 tp->t_hpts_request = slot;
895 } else if (maxslots >= slot) {
896 /* It all fits on the wheel */
897 tp->t_hpts_request = 0;
898 tp->t_hpts_slot = hpts_slot(wheel_slot, slot);
900 /* It does not fit */
901 tp->t_hpts_request = slot - maxslots;
902 tp->t_hpts_slot = last_slot;
905 diag->slot_remaining = tp->t_hpts_request;
906 diag->inp_hptsslot = tp->t_hpts_slot;
909 check_if_slot_would_be_wrong(hpts, tp, tp->t_hpts_slot, line);
911 if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
912 tcp_hpts_insert_internal(tp, hpts);
913 if ((hpts->p_hpts_active == 0) &&
914 (tp->t_hpts_request == 0) &&
915 (hpts->p_on_min_sleep == 0)) {
917 * The hpts is sleeping and NOT on a minimum
918 * sleep time, we need to figure out where
919 * it will wake up at and if we need to reschedule
922 uint32_t have_slept, yet_to_sleep;
924 /* Now do we need to restart the hpts's timer? */
925 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
926 if (have_slept < hpts->p_hpts_sleep_time)
927 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
929 /* We are over-due */
934 diag->have_slept = have_slept;
935 diag->yet_to_sleep = yet_to_sleep;
938 (yet_to_sleep > slot)) {
940 * We need to reschedule the hpts's time-out.
942 hpts->p_hpts_sleep_time = slot;
943 need_new_to = slot * HPTS_TICKS_PER_SLOT;
947 * Now how far is the hpts sleeping to? if active is 1, its
948 * up and ticking we do nothing, otherwise we may need to
949 * reschedule its callout if need_new_to is set from above.
952 hpts->p_direct_wake = 1;
955 diag->need_new_to = 0;
956 diag->co_ret = 0xffff0000;
958 } else if (need_new_to) {
965 while (need_new_to > HPTS_USEC_IN_SEC) {
967 need_new_to -= HPTS_USEC_IN_SEC;
969 tv.tv_usec = need_new_to;
971 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
972 hpts_timeout_swi, hpts, hpts->p_cpu,
973 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
975 diag->need_new_to = need_new_to;
976 diag->co_ret = co_ret;
979 slot_on = hpts->p_nxt_slot;
986 hpts_cpuid(struct tcpcb *tp, int *failed)
988 struct inpcb *inp = tptoinpcb(tp);
991 struct hpts_domain_info *di;
995 if (tp->t_flags2 & TF2_HPTS_CPU_SET) {
996 return (tp->t_hpts_cpu);
999 * If we are using the irq cpu set by LRO or
1000 * the driver then it overrides all other domains.
1002 if (tcp_use_irq_cpu) {
1003 if (tp->t_lro_cpu == HPTS_CPU_NONE) {
1007 return (tp->t_lro_cpu);
1009 /* If one is set the other must be the same */
1011 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1012 if (cpuid == NETISR_CPUID_NONE)
1013 return (hpts_random_cpu());
1018 * We don't have a flowid -> cpuid mapping, so cheat and just map
1019 * unknown cpuids to curcpu. Not the best, but apparently better
1020 * than defaulting to swi 0.
1022 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1023 counter_u64_add(cpu_uses_random, 1);
1024 return (hpts_random_cpu());
1027 * Hash to a thread based on the flowid. If we are using numa,
1028 * then restrict the hash to the numa domain where the inp lives.
1032 if ((vm_ndomains == 1) ||
1033 (inp->inp_numa_domain == M_NODOM)) {
1035 cpuid = inp->inp_flowid % mp_ncpus;
1038 /* Hash into the cpu's that use that domain */
1039 di = &hpts_domains[inp->inp_numa_domain];
1040 cpuid = di->cpu[inp->inp_flowid % di->count];
1043 counter_u64_add(cpu_uses_flowid, 1);
1048 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1052 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1054 * Find next slot that is occupied and use that to
1055 * be the sleep time.
1057 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1058 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1061 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1063 KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1064 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1066 /* No one on the wheel sleep for all but 400 slots or sleep max */
1067 hpts->p_hpts_sleep_time = hpts_sleep_max;
1072 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1076 int32_t slots_to_run, i, error;
1077 int32_t loop_cnt = 0;
1078 int32_t did_prefetch = 0;
1079 int32_t prefetch_tp = 0;
1080 int32_t wrap_loop_cnt = 0;
1081 int32_t slot_pos_of_endpoint = 0;
1082 int32_t orig_exit_slot;
1083 int8_t completed_measure = 0, seen_endpoint = 0;
1085 HPTS_MTX_ASSERT(hpts);
1087 /* record previous info for any logging */
1088 hpts->saved_lasttick = hpts->p_lasttick;
1089 hpts->saved_curtick = hpts->p_curtick;
1090 hpts->saved_curslot = hpts->p_cur_slot;
1091 hpts->saved_prev_slot = hpts->p_prev_slot;
1093 hpts->p_lasttick = hpts->p_curtick;
1094 hpts->p_curtick = tcp_gethptstick(&tv);
1095 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1096 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1097 if ((hpts->p_on_queue_cnt == 0) ||
1098 (hpts->p_lasttick == hpts->p_curtick)) {
1100 * No time has yet passed,
1103 hpts->p_prev_slot = hpts->p_cur_slot;
1104 hpts->p_lasttick = hpts->p_curtick;
1108 hpts->p_wheel_complete = 0;
1109 HPTS_MTX_ASSERT(hpts);
1110 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1111 if (((hpts->p_curtick - hpts->p_lasttick) >
1112 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1113 (hpts->p_on_queue_cnt != 0)) {
1115 * Wheel wrap is occuring, basically we
1116 * are behind and the distance between
1117 * run's has spread so much it has exceeded
1118 * the time on the wheel (1.024 seconds). This
1119 * is ugly and should NOT be happening. We
1120 * need to run the entire wheel. We last processed
1121 * p_prev_slot, so that needs to be the last slot
1122 * we run. The next slot after that should be our
1123 * reserved first slot for new, and then starts
1124 * the running position. Now the problem is the
1125 * reserved "not to yet" place does not exist
1126 * and there may be inp's in there that need
1127 * running. We can merge those into the
1128 * first slot at the head.
1131 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1132 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1134 * Adjust p_cur_slot to be where we are starting from
1135 * hopefully we will catch up (fat chance if something
1136 * is broken this bad :( )
1138 hpts->p_cur_slot = hpts->p_prev_slot;
1140 * The next slot has guys to run too, and that would
1141 * be where we would normally start, lets move them into
1142 * the next slot (p_prev_slot + 2) so that we will
1143 * run them, the extra 10usecs of late (by being
1144 * put behind) does not really matter in this situation.
1146 TAILQ_FOREACH(tp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1148 MPASS(tp->t_hpts_slot == hpts->p_nxt_slot);
1149 MPASS(tp->t_hpts_gencnt ==
1150 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1151 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
1154 * Update gencnt and nextslot accordingly to match
1155 * the new location. This is safe since it takes both
1156 * the INP lock and the pacer mutex to change the
1157 * t_hptsslot and t_hpts_gencnt.
1160 hpts->p_hptss[hpts->p_runningslot].gencnt;
1161 tp->t_hpts_slot = hpts->p_runningslot;
1163 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1164 &hpts->p_hptss[hpts->p_nxt_slot].head, t_hpts);
1165 hpts->p_hptss[hpts->p_runningslot].count +=
1166 hpts->p_hptss[hpts->p_nxt_slot].count;
1167 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1168 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1169 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1170 counter_u64_add(wheel_wrap, 1);
1173 * Nxt slot is always one after p_runningslot though
1174 * its not used usually unless we are doing wheel wrap.
1176 hpts->p_nxt_slot = hpts->p_prev_slot;
1177 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1179 if (hpts->p_on_queue_cnt == 0) {
1182 for (i = 0; i < slots_to_run; i++) {
1183 struct tcpcb *tp, *ntp;
1184 TAILQ_HEAD(, tcpcb) head = TAILQ_HEAD_INITIALIZER(head);
1185 struct hptsh *hptsh;
1186 uint32_t runningslot;
1189 * Calculate our delay, if there are no extra ticks there
1190 * was not any (i.e. if slots_to_run == 1, no delay).
1192 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1193 HPTS_TICKS_PER_SLOT;
1195 runningslot = hpts->p_runningslot;
1196 hptsh = &hpts->p_hptss[runningslot];
1197 TAILQ_SWAP(&head, &hptsh->head, tcpcb, t_hpts);
1198 hpts->p_on_queue_cnt -= hptsh->count;
1204 TAILQ_FOREACH_SAFE(tp, &head, t_hpts, ntp) {
1205 struct inpcb *inp = tptoinpcb(tp);
1210 * If we have a next tcpcb, see if we can
1211 * prefetch it. Note this may seem
1212 * "risky" since we have no locks (other
1213 * than the previous inp) and there no
1214 * assurance that ntp was not pulled while
1215 * we were processing tp and freed. If this
1216 * occurred it could mean that either:
1218 * a) Its NULL (which is fine we won't go
1219 * here) <or> b) Its valid (which is cool we
1220 * will prefetch it) <or> c) The inp got
1221 * freed back to the slab which was
1222 * reallocated. Then the piece of memory was
1223 * re-used and something else (not an
1224 * address) is in inp_ppcb. If that occurs
1225 * we don't crash, but take a TLB shootdown
1226 * performance hit (same as if it was NULL
1227 * and we tried to pre-fetch it).
1229 * Considering that the likelyhood of <c> is
1230 * quite rare we will take a risk on doing
1231 * this. If performance drops after testing
1232 * we can always take this out. NB: the
1233 * kern_prefetch on amd64 actually has
1234 * protection against a bad address now via
1235 * the DMAP_() tests. This will prevent the
1236 * TLB hit, and instead if <c> occurs just
1237 * cause us to load cache with a useless
1240 * XXXGL: this comment and the prefetch action
1241 * could be outdated after tp == inp change.
1243 kern_prefetch(ntp, &prefetch_tp);
1248 if (seen_endpoint == 0) {
1250 orig_exit_slot = slot_pos_of_endpoint =
1252 } else if (completed_measure == 0) {
1253 /* Record the new position */
1254 orig_exit_slot = runningslot;
1258 if ((tp->t_flags2 & TF2_HPTS_CPU_SET) == 0) {
1264 if (__predict_false(tp->t_in_hpts == IHPTS_MOVING)) {
1265 if (tp->t_hpts_slot == -1) {
1266 tp->t_in_hpts = IHPTS_NONE;
1267 if (in_pcbrele_wlocked(inp) == false)
1271 tcp_hpts_insert_internal(tp, hpts);
1278 MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
1279 MPASS(!(inp->inp_flags & INP_DROPPED));
1280 KASSERT(runningslot == tp->t_hpts_slot,
1281 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1282 hpts, inp, runningslot, tp->t_hpts_slot));
1284 if (tp->t_hpts_request) {
1286 * This guy is deferred out further in time
1287 * then our wheel had available on it.
1288 * Push him back on the wheel or run it
1291 uint32_t maxslots, last_slot, remaining_slots;
1293 remaining_slots = slots_to_run - (i + 1);
1294 if (tp->t_hpts_request > remaining_slots) {
1297 * How far out can we go?
1299 maxslots = max_slots_available(hpts,
1300 hpts->p_cur_slot, &last_slot);
1301 if (maxslots >= tp->t_hpts_request) {
1302 /* We can place it finally to
1304 tp->t_hpts_slot = hpts_slot(
1305 hpts->p_runningslot,
1306 tp->t_hpts_request);
1307 tp->t_hpts_request = 0;
1309 /* Work off some more time */
1310 tp->t_hpts_slot = last_slot;
1311 tp->t_hpts_request -=
1314 tcp_hpts_insert_internal(tp, hpts);
1319 tp->t_hpts_request = 0;
1320 /* Fall through we will so do it now */
1323 tcp_hpts_release(tp);
1326 * Setup so the next time we will move to
1327 * the right CPU. This should be a rare
1328 * event. It will sometimes happens when we
1329 * are the client side (usually not the
1330 * server). Somehow tcp_output() gets called
1331 * before the tcp_do_segment() sets the
1332 * intial state. This means the r_cpu and
1333 * r_hpts_cpu is 0. We get on the hpts, and
1334 * then tcp_input() gets called setting up
1335 * the r_cpu to the correct value. The hpts
1336 * goes off and sees the mis-match. We
1337 * simply correct it here and the CPU will
1338 * switch to the new hpts nextime the tcb
1339 * gets added to the hpts (not this one)
1344 CURVNET_SET(inp->inp_vnet);
1345 /* Lets do any logging that we might want to */
1346 if (hpts_does_tp_logging && tcp_bblogging_on(tp)) {
1347 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1350 if (tp->t_fb_ptr != NULL) {
1351 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1355 * We set TF2_HPTS_CALLS before any possible output.
1356 * The contract with the transport is that if it cares
1357 * about hpts calling it should clear the flag. That
1358 * way next time it is called it will know it is hpts.
1360 * We also only call tfb_do_queued_segments() <or>
1361 * tcp_output(). It is expected that if segments are
1362 * queued and come in that the final input mbuf will
1363 * cause a call to output if it is needed.
1365 tp->t_flags2 |= TF2_HPTS_CALLS;
1366 if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) &&
1367 !STAILQ_EMPTY(&tp->t_inqueue)) {
1368 error = (*tp->t_fb->tfb_do_queued_segments)(tp, 0);
1370 /* The input killed the connection */
1374 error = tcp_output(tp);
1381 if (seen_endpoint) {
1383 * We now have a accurate distance between
1384 * slot_pos_of_endpoint <-> orig_exit_slot
1385 * to tell us how late we were, orig_exit_slot
1386 * is where we calculated the end of our cycle to
1387 * be when we first entered.
1389 completed_measure = 1;
1392 hpts->p_runningslot++;
1393 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1394 hpts->p_runningslot = 0;
1398 HPTS_MTX_ASSERT(hpts);
1399 hpts->p_delayed_by = 0;
1401 * Check to see if we took an excess amount of time and need to run
1402 * more ticks (if we did not hit eno-bufs).
1404 hpts->p_prev_slot = hpts->p_cur_slot;
1405 hpts->p_lasttick = hpts->p_curtick;
1406 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1408 * Something is serious slow we have
1409 * looped through processing the wheel
1410 * and by the time we cleared the
1411 * needs to run max_pacer_loops time
1412 * we still needed to run. That means
1413 * the system is hopelessly behind and
1414 * can never catch up :(
1416 * We will just lie to this thread
1417 * and let it thing p_curtick is
1418 * correct. When it next awakens
1419 * it will find itself further behind.
1422 counter_u64_add(hpts_hopelessly_behind, 1);
1425 hpts->p_curtick = tcp_gethptstick(&tv);
1426 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1427 if (seen_endpoint == 0) {
1428 /* We saw no endpoint but we may be looping */
1429 orig_exit_slot = hpts->p_cur_slot;
1431 if ((wrap_loop_cnt < 2) &&
1432 (hpts->p_lasttick != hpts->p_curtick)) {
1433 counter_u64_add(hpts_loops, 1);
1438 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1440 * Set flag to tell that we are done for
1441 * any slot input that happens during
1444 hpts->p_wheel_complete = 1;
1446 * Now did we spend too long running input and need to run more ticks?
1447 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1448 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1449 * is greater than 2, then the condtion most likely are *not* true.
1450 * Also if we are called not from the callout, we don't run the wheel
1451 * multiple times so the slots may not align either.
1453 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1454 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1455 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1456 hpts->p_prev_slot, hpts->p_cur_slot));
1457 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1458 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1459 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1460 hpts->p_lasttick, hpts->p_curtick));
1461 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1462 hpts->p_curtick = tcp_gethptstick(&tv);
1463 counter_u64_add(hpts_loops, 1);
1464 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1469 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1472 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1478 __tcp_set_hpts(struct tcpcb *tp, int32_t line)
1480 struct tcp_hpts_entry *hpts;
1483 INP_WLOCK_ASSERT(tptoinpcb(tp));
1485 hpts = tcp_hpts_lock(tp);
1486 if (tp->t_in_hpts == IHPTS_NONE && !(tp->t_flags2 & TF2_HPTS_CPU_SET)) {
1487 tp->t_hpts_cpu = hpts_cpuid(tp, &failed);
1489 tp->t_flags2 |= TF2_HPTS_CPU_SET;
1491 mtx_unlock(&hpts->p_mtx);
1495 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1499 if (hpts->p_hpts_active) {
1500 /* Already active */
1503 if (mtx_trylock(&hpts->p_mtx) == 0) {
1504 /* Someone else got the lock */
1507 if (hpts->p_hpts_active)
1509 hpts->syscall_cnt++;
1510 counter_u64_add(hpts_direct_call, 1);
1511 hpts->p_hpts_active = 1;
1512 ticks_ran = tcp_hptsi(hpts, 0);
1513 /* We may want to adjust the sleep values here */
1514 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1515 if (ticks_ran > ticks_indicate_less_sleep) {
1519 hpts->p_mysleep.tv_usec /= 2;
1520 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1521 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1522 /* Reschedule with new to value */
1523 tcp_hpts_set_max_sleep(hpts, 0);
1524 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1525 /* Validate its in the right ranges */
1526 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1527 hpts->overidden_sleep = tv.tv_usec;
1528 tv.tv_usec = hpts->p_mysleep.tv_usec;
1529 } else if (tv.tv_usec > dynamic_max_sleep) {
1530 /* Lets not let sleep get above this value */
1531 hpts->overidden_sleep = tv.tv_usec;
1532 tv.tv_usec = dynamic_max_sleep;
1535 * In this mode the timer is a backstop to
1536 * all the userret/lro_flushes so we use
1537 * the dynamic value and set the on_min_sleep
1538 * flag so we will not be awoken.
1541 /* Store off to make visible the actual sleep time */
1542 hpts->sleeping = tv.tv_usec;
1543 callout_reset_sbt_on(&hpts->co, sb, 0,
1544 hpts_timeout_swi, hpts, hpts->p_cpu,
1545 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1546 } else if (ticks_ran < ticks_indicate_more_sleep) {
1547 /* For the further sleep, don't reschedule hpts */
1548 hpts->p_mysleep.tv_usec *= 2;
1549 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1550 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1552 hpts->p_on_min_sleep = 1;
1554 hpts->p_hpts_active = 0;
1556 HPTS_MTX_ASSERT(hpts);
1557 mtx_unlock(&hpts->p_mtx);
1560 static struct tcp_hpts_entry *
1561 tcp_choose_hpts_to_run(void)
1563 int i, oldest_idx, start, end;
1564 uint32_t cts, time_since_ran, calc;
1566 cts = tcp_get_usecs(NULL);
1568 /* Default is all one group */
1570 end = tcp_pace.rp_num_hptss;
1572 * If we have more than one L3 group figure out which one
1575 if (tcp_pace.grp_cnt > 1) {
1576 for (i = 0; i < tcp_pace.grp_cnt; i++) {
1577 if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
1578 start = tcp_pace.grps[i]->cg_first;
1579 end = (tcp_pace.grps[i]->cg_last + 1);
1585 for (i = start; i < end; i++) {
1586 if (TSTMP_GT(cts, cts_last_ran[i]))
1587 calc = cts - cts_last_ran[i];
1590 if (calc > time_since_ran) {
1592 time_since_ran = calc;
1595 if (oldest_idx >= 0)
1596 return(tcp_pace.rp_ent[oldest_idx]);
1598 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1605 static struct tcp_hpts_entry *hpts;
1606 struct epoch_tracker et;
1608 NET_EPOCH_ENTER(et);
1609 hpts = tcp_choose_hpts_to_run();
1610 __tcp_run_hpts(hpts);
1616 tcp_hpts_thread(void *ctx)
1618 struct tcp_hpts_entry *hpts;
1619 struct epoch_tracker et;
1624 hpts = (struct tcp_hpts_entry *)ctx;
1625 mtx_lock(&hpts->p_mtx);
1626 if (hpts->p_direct_wake) {
1627 /* Signaled by input or output with low occupancy count. */
1628 callout_stop(&hpts->co);
1629 counter_u64_add(hpts_direct_awakening, 1);
1631 /* Timed out, the normal case. */
1632 counter_u64_add(hpts_wake_timeout, 1);
1633 if (callout_pending(&hpts->co) ||
1634 !callout_active(&hpts->co)) {
1635 mtx_unlock(&hpts->p_mtx);
1639 callout_deactivate(&hpts->co);
1640 hpts->p_hpts_wake_scheduled = 0;
1641 NET_EPOCH_ENTER(et);
1642 if (hpts->p_hpts_active) {
1644 * We are active already. This means that a syscall
1645 * trap or LRO is running in behalf of hpts. In that case
1646 * we need to double our timeout since there seems to be
1647 * enough activity in the system that we don't need to
1648 * run as often (if we were not directly woken).
1650 if (hpts->p_direct_wake == 0) {
1651 counter_u64_add(hpts_back_tosleep, 1);
1652 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1653 hpts->p_mysleep.tv_usec *= 2;
1654 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1655 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1656 tv.tv_usec = hpts->p_mysleep.tv_usec;
1657 hpts->p_on_min_sleep = 1;
1660 * Here we have low count on the wheel, but
1661 * somehow we still collided with one of the
1662 * connections. Lets go back to sleep for a
1663 * min sleep time, but clear the flag so we
1664 * can be awoken by insert.
1666 hpts->p_on_min_sleep = 0;
1667 tv.tv_usec = tcp_min_hptsi_time;
1671 * Directly woken most likely to reset the
1675 tv.tv_usec = hpts->p_mysleep.tv_usec;
1680 hpts->p_hpts_active = 1;
1681 ticks_ran = tcp_hptsi(hpts, 1);
1683 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1684 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1685 if(hpts->p_direct_wake == 0) {
1687 * Only adjust sleep time if we were
1688 * called from the callout i.e. direct_wake == 0.
1690 if (ticks_ran < ticks_indicate_more_sleep) {
1691 hpts->p_mysleep.tv_usec *= 2;
1692 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1693 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1694 } else if (ticks_ran > ticks_indicate_less_sleep) {
1695 hpts->p_mysleep.tv_usec /= 2;
1696 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1697 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1700 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1701 hpts->overidden_sleep = tv.tv_usec;
1702 tv.tv_usec = hpts->p_mysleep.tv_usec;
1703 } else if (tv.tv_usec > dynamic_max_sleep) {
1704 /* Lets not let sleep get above this value */
1705 hpts->overidden_sleep = tv.tv_usec;
1706 tv.tv_usec = dynamic_max_sleep;
1709 * In this mode the timer is a backstop to
1710 * all the userret/lro_flushes so we use
1711 * the dynamic value and set the on_min_sleep
1712 * flag so we will not be awoken.
1714 hpts->p_on_min_sleep = 1;
1715 } else if (hpts->p_on_queue_cnt == 0) {
1717 * No one on the wheel, please wake us up
1718 * if you insert on the wheel.
1720 hpts->p_on_min_sleep = 0;
1721 hpts->overidden_sleep = 0;
1724 * We hit here when we have a low number of
1725 * clients on the wheel (our else clause).
1726 * We may need to go on min sleep, if we set
1727 * the flag we will not be awoken if someone
1728 * is inserted ahead of us. Clearing the flag
1729 * means we can be awoken. This is "old mode"
1730 * where the timer is what runs hpts mainly.
1732 if (tv.tv_usec < tcp_min_hptsi_time) {
1734 * Yes on min sleep, which means
1735 * we cannot be awoken.
1737 hpts->overidden_sleep = tv.tv_usec;
1738 tv.tv_usec = tcp_min_hptsi_time;
1739 hpts->p_on_min_sleep = 1;
1741 /* Clear the min sleep flag */
1742 hpts->overidden_sleep = 0;
1743 hpts->p_on_min_sleep = 0;
1746 HPTS_MTX_ASSERT(hpts);
1747 hpts->p_hpts_active = 0;
1749 hpts->p_direct_wake = 0;
1751 /* Store off to make visible the actual sleep time */
1752 hpts->sleeping = tv.tv_usec;
1753 callout_reset_sbt_on(&hpts->co, sb, 0,
1754 hpts_timeout_swi, hpts, hpts->p_cpu,
1755 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1757 mtx_unlock(&hpts->p_mtx);
1763 hpts_count_level(struct cpu_group *cg)
1765 int32_t count_l3, i;
1768 if (cg->cg_level == CG_SHARE_L3)
1770 /* Walk all the children looking for L3 */
1771 for (i = 0; i < cg->cg_children; i++) {
1772 count_l3 += hpts_count_level(&cg->cg_child[i]);
1778 hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
1783 if (cg->cg_level == CG_SHARE_L3) {
1792 /* Walk all the children looking for L3 */
1793 for (i = 0; i < cg->cg_children; i++) {
1794 hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
1799 tcp_init_hptsi(void *st)
1801 struct cpu_group *cpu_top;
1802 int32_t error __diagused;
1803 int32_t i, j, bound = 0, created = 0;
1807 struct tcp_hpts_entry *hpts;
1810 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1814 cpu_top = smp_topo();
1818 tcp_pace.rp_num_hptss = ncpus;
1819 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1820 hpts_loops = counter_u64_alloc(M_WAITOK);
1821 back_tosleep = counter_u64_alloc(M_WAITOK);
1822 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1823 wheel_wrap = counter_u64_alloc(M_WAITOK);
1824 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1825 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1826 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1827 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1828 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1829 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1831 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1832 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1833 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1834 cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1835 tcp_pace.grp_cnt = 0;
1836 if (cpu_top == NULL) {
1837 tcp_pace.grp_cnt = 1;
1839 /* Find out how many cache level 3 domains we have */
1841 tcp_pace.grp_cnt = hpts_count_level(cpu_top);
1842 if (tcp_pace.grp_cnt == 0) {
1843 tcp_pace.grp_cnt = 1;
1845 sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
1846 tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
1847 /* Now populate the groups */
1848 if (tcp_pace.grp_cnt == 1) {
1850 * All we need is the top level all cpu's are in
1851 * the same cache so when we use grp[0]->cg_mask
1852 * with the cg_first <-> cg_last it will include
1853 * all cpu's in it. The level here is probably
1856 tcp_pace.grps[0] = cpu_top;
1859 * Here we must find all the level three cache domains
1860 * and setup our pointers to them.
1863 hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
1866 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1867 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1868 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1869 M_TCPHPTS, M_WAITOK | M_ZERO);
1870 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
1871 hpts = tcp_pace.rp_ent[i];
1873 * Init all the hpts structures that are not specifically
1874 * zero'd by the allocations. Also lets attach them to the
1875 * appropriate sysctl block as well.
1877 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1878 "hpts", MTX_DEF | MTX_DUPOK);
1879 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1880 TAILQ_INIT(&hpts->p_hptss[j].head);
1881 hpts->p_hptss[j].count = 0;
1882 hpts->p_hptss[j].gencnt = 0;
1884 sysctl_ctx_init(&hpts->hpts_ctx);
1885 sprintf(unit, "%d", i);
1886 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1887 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1890 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1892 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1893 SYSCTL_CHILDREN(hpts->hpts_root),
1894 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1895 &hpts->p_on_queue_cnt, 0,
1896 "Count TCB's awaiting output processing");
1897 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1898 SYSCTL_CHILDREN(hpts->hpts_root),
1899 OID_AUTO, "active", CTLFLAG_RD,
1900 &hpts->p_hpts_active, 0,
1901 "Is the hpts active");
1902 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1903 SYSCTL_CHILDREN(hpts->hpts_root),
1904 OID_AUTO, "curslot", CTLFLAG_RD,
1905 &hpts->p_cur_slot, 0,
1906 "What the current running pacers goal");
1907 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1908 SYSCTL_CHILDREN(hpts->hpts_root),
1909 OID_AUTO, "runtick", CTLFLAG_RD,
1910 &hpts->p_runningslot, 0,
1911 "What the running pacers current slot is");
1912 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1913 SYSCTL_CHILDREN(hpts->hpts_root),
1914 OID_AUTO, "curtick", CTLFLAG_RD,
1915 &hpts->p_curtick, 0,
1916 "What the running pacers last tick mapped to the wheel was");
1917 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1918 SYSCTL_CHILDREN(hpts->hpts_root),
1919 OID_AUTO, "lastran", CTLFLAG_RD,
1920 &cts_last_ran[i], 0,
1921 "The last usec tick that this hpts ran");
1922 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1923 SYSCTL_CHILDREN(hpts->hpts_root),
1924 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1925 &hpts->p_mysleep.tv_usec,
1926 "What the running pacers is using for p_mysleep.tv_usec");
1927 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1928 SYSCTL_CHILDREN(hpts->hpts_root),
1929 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1931 "What the running pacers is actually sleeping for");
1932 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1933 SYSCTL_CHILDREN(hpts->hpts_root),
1934 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1935 &hpts->syscall_cnt, 0,
1936 "How many times we had syscalls on this hpts");
1938 hpts->p_hpts_sleep_time = hpts_sleep_max;
1940 hpts->p_curtick = tcp_gethptstick(&tv);
1941 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1942 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1943 hpts->p_cpu = 0xffff;
1944 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1945 callout_init(&hpts->co, 1);
1947 /* Don't try to bind to NUMA domains if we don't have any */
1948 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1949 tcp_bind_threads = 0;
1952 * Now lets start ithreads to handle the hptss.
1954 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1955 hpts = tcp_pace.rp_ent[i];
1958 error = swi_add(&hpts->ie, "hpts",
1959 tcp_hpts_thread, (void *)hpts,
1960 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1962 ("Can't add hpts:%p i:%d err:%d",
1965 hpts->p_mysleep.tv_sec = 0;
1966 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1967 if (tcp_bind_threads == 1) {
1968 if (intr_event_bind(hpts->ie, i) == 0)
1970 } else if (tcp_bind_threads == 2) {
1971 /* Find the group for this CPU (i) and bind into it */
1972 for (j = 0; j < tcp_pace.grp_cnt; j++) {
1973 if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
1974 if (intr_event_bind_ithread_cpuset(hpts->ie,
1975 &tcp_pace.grps[j]->cg_mask) == 0) {
1978 domain = pc->pc_domain;
1979 count = hpts_domains[domain].count;
1980 hpts_domains[domain].cpu[count] = i;
1981 hpts_domains[domain].count++;
1988 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1989 hpts->sleeping = tv.tv_usec;
1991 callout_reset_sbt_on(&hpts->co, sb, 0,
1992 hpts_timeout_swi, hpts, hpts->p_cpu,
1993 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1996 * If we somehow have an empty domain, fall back to choosing
1997 * among all htps threads.
1999 for (i = 0; i < vm_ndomains; i++) {
2000 if (hpts_domains[i].count == 0) {
2001 tcp_bind_threads = 0;
2005 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2007 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2009 printf("HPTS is in INVARIANT mode!!\n");
2013 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
2014 MODULE_VERSION(tcphpts, 1);