2 * Copyright (c) 2016-2018 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 #include "opt_inet6.h"
34 * Some notes about usage.
36 * The tcp_hpts system is designed to provide a high precision timer
37 * system for tcp. Its main purpose is to provide a mechanism for
38 * pacing packets out onto the wire. It can be used in two ways
39 * by a given TCP stack (and those two methods can be used simultaneously).
41 * First, and probably the main thing its used by Rack and BBR, it can
42 * be used to call tcp_output() of a transport stack at some time in the future.
43 * The normal way this is done is that tcp_output() of the stack schedules
44 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45 * slot is the time from now that the stack wants to be called but it
46 * must be converted to tcp_hpts's notion of slot. This is done with
47 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48 * call from the tcp_output() routine might look like:
50 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
52 * The above would schedule tcp_ouput() to be called in 550 useconds.
53 * Note that if using this mechanism the stack will want to add near
54 * its top a check to prevent unwanted calls (from user land or the
55 * arrival of incoming ack's). So it would add something like:
57 * if (tcp_in_hpts(inp))
60 * to prevent output processing until the time alotted has gone by.
61 * Of course this is a bare bones example and the stack will probably
62 * have more consideration then just the above.
64 * In order to run input queued segments from the HPTS context the
65 * tcp stack must define an input function for
66 * tfb_do_queued_segments(). This function understands
67 * how to dequeue a array of packets that were input and
68 * knows how to call the correct processing routine.
70 * Locking in this is important as well so most likely the
71 * stack will need to define the tfb_do_segment_nounlock()
72 * splitting tfb_do_segment() into two parts. The main processing
73 * part that does not unlock the INP and returns a value of 1 or 0.
74 * It returns 0 if all is well and the lock was not released. It
75 * returns 1 if we had to destroy the TCB (a reset received etc).
76 * The remains of tfb_do_segment() then become just a simple call
77 * to the tfb_do_segment_nounlock() function and check the return
78 * code and possibly unlock.
80 * The stack must also set the flag on the INP that it supports this
81 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
82 * this flag as well and will queue packets when it is set.
83 * There are other flags as well INP_MBUF_QUEUE_READY and
84 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
85 * that we are in the pacer for output so there is no
86 * need to wake up the hpts system to get immediate
87 * input. The second tells the LRO code that its okay
88 * if a SACK arrives you can still defer input and let
89 * the current hpts timer run (this is usually set when
90 * a rack timer is up so we know SACK's are happening
91 * on the connection already and don't want to wakeup yet).
93 * There is a common functions within the rack_bbr_common code
94 * version i.e. ctf_do_queued_segments(). This function
95 * knows how to take the input queue of packets from tp->t_inqueue
96 * and process them digging out all the arguments, calling any bpf tap and
97 * calling into tfb_do_segment_nounlock(). The common
98 * function (ctf_do_queued_segments()) requires that
99 * you have defined the tfb_do_segment_nounlock() as
103 #include <sys/param.h>
105 #include <sys/interrupt.h>
106 #include <sys/module.h>
107 #include <sys/kernel.h>
108 #include <sys/hhook.h>
109 #include <sys/malloc.h>
110 #include <sys/mbuf.h>
111 #include <sys/proc.h> /* for proc0 declaration */
112 #include <sys/socket.h>
113 #include <sys/socketvar.h>
114 #include <sys/sysctl.h>
115 #include <sys/systm.h>
116 #include <sys/refcount.h>
117 #include <sys/sched.h>
118 #include <sys/queue.h>
120 #include <sys/counter.h>
121 #include <sys/time.h>
122 #include <sys/kthread.h>
123 #include <sys/kern_prefetch.h>
128 #include <net/route.h>
129 #include <net/vnet.h>
132 #include <net/netisr.h>
133 #include <net/rss_config.h>
136 #define TCPSTATES /* for logging */
138 #include <netinet/in.h>
139 #include <netinet/in_kdtrace.h>
140 #include <netinet/in_pcb.h>
141 #include <netinet/ip.h>
142 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
143 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
144 #include <netinet/ip_var.h>
145 #include <netinet/ip6.h>
146 #include <netinet6/in6_pcb.h>
147 #include <netinet6/ip6_var.h>
148 #include <netinet/tcp.h>
149 #include <netinet/tcp_fsm.h>
150 #include <netinet/tcp_seq.h>
151 #include <netinet/tcp_timer.h>
152 #include <netinet/tcp_var.h>
153 #include <netinet/tcpip.h>
154 #include <netinet/cc/cc.h>
155 #include <netinet/tcp_hpts.h>
156 #include <netinet/tcp_log_buf.h>
159 #include <netinet/tcp_offload.h>
163 * The hpts uses a 102400 wheel. The wheel
164 * defines the time in 10 usec increments (102400 x 10).
165 * This gives a range of 10usec - 1024ms to place
166 * an entry within. If the user requests more than
167 * 1.024 second, a remaineder is attached and the hpts
168 * when seeing the remainder will re-insert the
169 * inpcb forward in time from where it is until
170 * the remainder is zero.
173 #define NUM_OF_HPTSI_SLOTS 102400
175 /* Each hpts has its own p_mtx which is used for locking */
176 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
177 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
178 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
179 struct tcp_hpts_entry {
180 /* Cache line 0x00 */
181 struct mtx p_mtx; /* Mutex for hpts */
182 struct timeval p_mysleep; /* Our min sleep time */
183 uint64_t syscall_cnt;
184 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
185 uint16_t p_hpts_active; /* Flag that says hpts is awake */
186 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
187 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
188 uint32_t p_runningslot; /* Current tick we are at if we are running */
189 uint32_t p_prev_slot; /* Previous slot we were on */
190 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
191 uint32_t p_nxt_slot; /* The next slot outside the current range of
192 * slots that the hpts is running on. */
193 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
194 uint32_t p_lasttick; /* Last tick before the current one */
195 uint8_t p_direct_wake :1, /* boolean */
196 p_on_min_sleep:1, /* boolean */
197 p_hpts_wake_scheduled:1, /* boolean */
199 uint8_t p_fill[3]; /* Fill to 32 bits */
200 /* Cache line 0x40 */
202 TAILQ_HEAD(, inpcb) head;
205 } *p_hptss; /* Hptsi wheel */
206 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
208 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
209 uint32_t saved_lasttick; /* for logging */
210 uint32_t saved_curtick; /* for logging */
211 uint32_t saved_curslot; /* for logging */
212 uint32_t saved_prev_slot; /* for logging */
213 uint32_t p_delayed_by; /* How much were we delayed by */
214 /* Cache line 0x80 */
215 struct sysctl_ctx_list hpts_ctx;
216 struct sysctl_oid *hpts_root;
217 struct intr_event *ie;
219 uint16_t p_num; /* The hpts number one per cpu */
220 uint16_t p_cpu; /* The hpts CPU */
221 /* There is extra space in here */
222 /* Cache line 0x100 */
223 struct callout co __aligned(CACHE_LINE_SIZE);
224 } __aligned(CACHE_LINE_SIZE);
226 static struct tcp_hptsi {
227 struct cpu_group **grps;
228 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
229 uint32_t *cts_last_ran;
231 uint32_t rp_num_hptss; /* Number of hpts threads */
234 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
236 static int tcp_bind_threads = 1;
238 static int tcp_bind_threads = 2;
240 static int tcp_use_irq_cpu = 0;
241 static uint32_t *cts_last_ran;
242 static int hpts_does_tp_logging = 0;
244 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
245 static void tcp_hpts_thread(void *ctx);
246 static void tcp_init_hptsi(void *st);
248 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
249 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
250 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
251 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
254 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
255 "TCP Hpts controls");
256 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
257 "TCP Hpts statistics");
259 #define timersub(tvp, uvp, vvp) \
261 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
262 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
263 if ((vvp)->tv_usec < 0) { \
265 (vvp)->tv_usec += 1000000; \
269 static int32_t tcp_hpts_precision = 120;
271 static struct hpts_domain_info {
274 } hpts_domains[MAXMEMDOM];
282 counter_u64_t hpts_hopelessly_behind;
284 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
285 &hpts_hopelessly_behind,
286 "Number of times hpts could not catch up and was behind hopelessly");
288 counter_u64_t hpts_loops;
290 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
291 &hpts_loops, "Number of times hpts had to loop to catch up");
293 counter_u64_t back_tosleep;
295 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
296 &back_tosleep, "Number of times hpts found no tcbs");
298 counter_u64_t combined_wheel_wrap;
300 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
301 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
303 counter_u64_t wheel_wrap;
305 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
306 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
308 counter_u64_t hpts_direct_call;
309 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
310 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
312 counter_u64_t hpts_wake_timeout;
314 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
315 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
317 counter_u64_t hpts_direct_awakening;
319 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
320 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
322 counter_u64_t hpts_back_tosleep;
324 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
325 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
327 counter_u64_t cpu_uses_flowid;
328 counter_u64_t cpu_uses_random;
330 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
331 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
332 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
333 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
335 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
336 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
337 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
338 &tcp_bind_threads, 2,
339 "Thread Binding tunable");
340 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
342 "Use of irq CPU tunable");
343 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
344 &tcp_hpts_precision, 120,
345 "Value for PRE() precision of callout");
346 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
348 "How many connections (below) make us use the callout based mechanism");
349 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
350 &hpts_does_tp_logging, 0,
351 "Do we add to any tp that has logging on pacer logs");
352 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
353 &dynamic_min_sleep, 250,
354 "What is the dynamic minsleep value?");
355 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
356 &dynamic_max_sleep, 5000,
357 "What is the dynamic maxsleep value?");
359 static int32_t max_pacer_loops = 10;
360 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
361 &max_pacer_loops, 10,
362 "What is the maximum number of times the pacer will loop trying to catch up");
364 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
366 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
369 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
374 new = hpts_sleep_max;
375 error = sysctl_handle_int(oidp, &new, 0, req);
376 if (error == 0 && req->newptr) {
377 if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) ||
378 (new > HPTS_MAX_SLEEP_ALLOWED))
381 hpts_sleep_max = new;
387 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
392 new = tcp_min_hptsi_time;
393 error = sysctl_handle_int(oidp, &new, 0, req);
394 if (error == 0 && req->newptr) {
395 if (new < LOWEST_SLEEP_ALLOWED)
398 tcp_min_hptsi_time = new;
403 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
404 CTLTYPE_UINT | CTLFLAG_RW,
406 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
407 "Maximum time hpts will sleep in slots");
409 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
410 CTLTYPE_UINT | CTLFLAG_RW,
411 &tcp_min_hptsi_time, 0,
412 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
413 "The minimum time the hpts must sleep before processing more slots");
415 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
416 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
417 static int tcp_hpts_no_wake_over_thresh = 1;
419 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
420 &ticks_indicate_more_sleep, 0,
421 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
422 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
423 &ticks_indicate_less_sleep, 0,
424 "If we process this many or more on a timeout, we need less sleep on the next callout");
425 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
426 &tcp_hpts_no_wake_over_thresh, 0,
427 "When we are over the threshold on the pacer do we prohibit wakeups?");
430 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
431 int slots_to_run, int idx, int from_callout)
433 union tcp_log_stackspecific log;
436 * 64 bit - delRate, rttProp, bw_inuse
438 * 8 bit - bbr_state, bbr_substate, inhpts;
440 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
441 log.u_bbr.flex1 = hpts->p_nxt_slot;
442 log.u_bbr.flex2 = hpts->p_cur_slot;
443 log.u_bbr.flex3 = hpts->p_prev_slot;
444 log.u_bbr.flex4 = idx;
445 log.u_bbr.flex5 = hpts->p_curtick;
446 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
447 log.u_bbr.flex7 = hpts->p_cpu;
448 log.u_bbr.flex8 = (uint8_t)from_callout;
449 log.u_bbr.inflight = slots_to_run;
450 log.u_bbr.applimited = hpts->overidden_sleep;
451 log.u_bbr.delivered = hpts->saved_curtick;
452 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
453 log.u_bbr.epoch = hpts->saved_curslot;
454 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
455 log.u_bbr.pkts_out = hpts->p_delayed_by;
456 log.u_bbr.lost = hpts->p_hpts_sleep_time;
457 log.u_bbr.pacing_gain = hpts->p_cpu;
458 log.u_bbr.pkt_epoch = hpts->p_runningslot;
459 log.u_bbr.use_lt_bw = 1;
460 TCP_LOG_EVENTP(tp, NULL,
461 &tptosocket(tp)->so_rcv,
462 &tptosocket(tp)->so_snd,
468 tcp_wakehpts(struct tcp_hpts_entry *hpts)
470 HPTS_MTX_ASSERT(hpts);
472 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
473 hpts->p_direct_wake = 0;
476 if (hpts->p_hpts_wake_scheduled == 0) {
477 hpts->p_hpts_wake_scheduled = 1;
478 swi_sched(hpts->ie_cookie, 0);
483 hpts_timeout_swi(void *arg)
485 struct tcp_hpts_entry *hpts;
487 hpts = (struct tcp_hpts_entry *)arg;
488 swi_sched(hpts->ie_cookie, 0);
492 inp_hpts_insert(struct inpcb *inp, struct tcp_hpts_entry *hpts)
496 INP_WLOCK_ASSERT(inp);
497 HPTS_MTX_ASSERT(hpts);
498 MPASS(hpts->p_cpu == inp->inp_hpts_cpu);
499 MPASS(!(inp->inp_flags & INP_DROPPED));
501 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
503 if (inp->inp_in_hpts == IHPTS_NONE) {
504 inp->inp_in_hpts = IHPTS_ONQUEUE;
506 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
507 inp->inp_in_hpts = IHPTS_ONQUEUE;
509 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
510 inp->inp_hpts_gencnt = hptsh->gencnt;
512 TAILQ_INSERT_TAIL(&hptsh->head, inp, inp_hpts);
514 hpts->p_on_queue_cnt++;
517 static struct tcp_hpts_entry *
518 tcp_hpts_lock(struct inpcb *inp)
520 struct tcp_hpts_entry *hpts;
522 INP_LOCK_ASSERT(inp);
524 hpts = tcp_pace.rp_ent[inp->inp_hpts_cpu];
531 inp_hpts_release(struct inpcb *inp)
533 bool released __diagused;
535 inp->inp_in_hpts = IHPTS_NONE;
536 released = in_pcbrele_wlocked(inp);
537 MPASS(released == false);
541 * Called normally with the INP_LOCKED but it
542 * does not matter, the hpts lock is the key
543 * but the lock order allows us to hold the
544 * INP lock and then get the hpts lock.
547 tcp_hpts_remove(struct inpcb *inp)
549 struct tcp_hpts_entry *hpts;
552 INP_WLOCK_ASSERT(inp);
554 hpts = tcp_hpts_lock(inp);
555 if (inp->inp_in_hpts == IHPTS_ONQUEUE) {
556 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
557 inp->inp_hpts_request = 0;
558 if (__predict_true(inp->inp_hpts_gencnt == hptsh->gencnt)) {
559 TAILQ_REMOVE(&hptsh->head, inp, inp_hpts);
560 MPASS(hptsh->count > 0);
562 MPASS(hpts->p_on_queue_cnt > 0);
563 hpts->p_on_queue_cnt--;
564 inp_hpts_release(inp);
567 * tcp_hptsi() now owns the TAILQ head of this inp.
568 * Can't TAILQ_REMOVE, just mark it.
573 TAILQ_FOREACH(tmp, &hptsh->head, inp_hpts)
576 inp->inp_in_hpts = IHPTS_MOVING;
577 inp->inp_hptsslot = -1;
579 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
581 * Handle a special race condition:
582 * tcp_hptsi() moves inpcb to detached tailq
583 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
584 * tcp_hpts_insert() sets slot to a meaningful value
585 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
586 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
588 inp->inp_hptsslot = -1;
594 tcp_in_hpts(struct inpcb *inp)
597 return (inp->inp_in_hpts == IHPTS_ONQUEUE);
601 hpts_slot(uint32_t wheel_slot, uint32_t plus)
604 * Given a slot on the wheel, what slot
605 * is that plus ticks out?
607 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
608 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
612 tick_to_wheel(uint32_t cts_in_wticks)
615 * Given a timestamp in ticks (so by
616 * default to get it to a real time one
617 * would multiply by 10.. i.e the number
618 * of ticks in a slot) map it to our limited
621 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
625 hpts_slots_diff(int prev_slot, int slot_now)
628 * Given two slots that are someplace
629 * on our wheel. How far are they apart?
631 if (slot_now > prev_slot)
632 return (slot_now - prev_slot);
633 else if (slot_now == prev_slot)
635 * Special case, same means we can go all of our
636 * wheel less one slot.
638 return (NUM_OF_HPTSI_SLOTS - 1);
640 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
644 * Given a slot on the wheel that is the current time
645 * mapped to the wheel (wheel_slot), what is the maximum
646 * distance forward that can be obtained without
647 * wrapping past either prev_slot or running_slot
648 * depending on the htps state? Also if passed
649 * a uint32_t *, fill it with the slot location.
651 * Note if you do not give this function the current
652 * time (that you think it is) mapped to the wheel slot
653 * then the results will not be what you expect and
654 * could lead to invalid inserts.
656 static inline int32_t
657 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
659 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
661 if ((hpts->p_hpts_active == 1) &&
662 (hpts->p_wheel_complete == 0)) {
663 end_slot = hpts->p_runningslot;
664 /* Back up one tick */
666 end_slot = NUM_OF_HPTSI_SLOTS - 1;
670 *target_slot = end_slot;
673 * For the case where we are
674 * not active, or we have
675 * completed the pass over
676 * the wheel, we can use the
677 * prev tick and subtract one from it. This puts us
678 * as far out as possible on the wheel.
680 end_slot = hpts->p_prev_slot;
682 end_slot = NUM_OF_HPTSI_SLOTS - 1;
686 *target_slot = end_slot;
688 * Now we have close to the full wheel left minus the
689 * time it has been since the pacer went to sleep. Note
690 * that wheel_tick, passed in, should be the current time
691 * from the perspective of the caller, mapped to the wheel.
693 if (hpts->p_prev_slot != wheel_slot)
694 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
698 * dis_to_travel in this case is the space from when the
699 * pacer stopped (p_prev_slot) and where our wheel_slot
700 * is now. To know how many slots we can put it in we
701 * subtract from the wheel size. We would not want
702 * to place something after p_prev_slot or it will
705 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
708 * So how many slots are open between p_runningslot -> p_cur_slot
709 * that is what is currently un-available for insertion. Special
710 * case when we are at the last slot, this gets 1, so that
711 * the answer to how many slots are available is all but 1.
713 if (hpts->p_runningslot == hpts->p_cur_slot)
716 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
718 * How long has the pacer been running?
720 if (hpts->p_cur_slot != wheel_slot) {
721 /* The pacer is a bit late */
722 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
724 /* The pacer is right on time, now == pacers start time */
728 * To get the number left we can insert into we simply
729 * subtract the distance the pacer has to run from how
730 * many slots there are.
732 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
734 * Now how many of those we will eat due to the pacer's
735 * time (p_cur_slot) of start being behind the
736 * real time (wheel_slot)?
738 if (avail_on_wheel <= pacer_to_now) {
740 * Wheel wrap, we can't fit on the wheel, that
741 * is unusual the system must be way overloaded!
742 * Insert into the assured slot, and return special
745 counter_u64_add(combined_wheel_wrap, 1);
746 *target_slot = hpts->p_nxt_slot;
750 * We know how many slots are open
751 * on the wheel (the reverse of what
752 * is left to run. Take away the time
753 * the pacer started to now (wheel_slot)
754 * and that tells you how many slots are
755 * open that can be inserted into that won't
756 * be touched by the pacer until later.
758 return (avail_on_wheel - pacer_to_now);
765 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
768 * Sanity checks for the pacer with invariants
771 KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
772 ("hpts:%p inp:%p slot:%d > max",
773 hpts, inp, inp_hptsslot));
774 if ((hpts->p_hpts_active) &&
775 (hpts->p_wheel_complete == 0)) {
777 * If the pacer is processing a arc
778 * of the wheel, we need to make
779 * sure we are not inserting within
782 int distance, yet_to_run;
784 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
785 if (hpts->p_runningslot != hpts->p_cur_slot)
786 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
788 yet_to_run = 0; /* processing last slot */
789 KASSERT(yet_to_run <= distance,
790 ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
791 hpts, inp, inp_hptsslot,
792 distance, yet_to_run,
793 hpts->p_runningslot, hpts->p_cur_slot));
799 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
801 struct tcp_hpts_entry *hpts;
803 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
804 int32_t wheel_slot, maxslots;
805 bool need_wakeup = false;
807 INP_WLOCK_ASSERT(inp);
808 MPASS(!tcp_in_hpts(inp));
809 MPASS(!(inp->inp_flags & INP_DROPPED));
812 * We now return the next-slot the hpts will be on, beyond its
813 * current run (if up) or where it was when it stopped if it is
816 hpts = tcp_hpts_lock(inp);
819 memset(diag, 0, sizeof(struct hpts_diag));
820 diag->p_hpts_active = hpts->p_hpts_active;
821 diag->p_prev_slot = hpts->p_prev_slot;
822 diag->p_runningslot = hpts->p_runningslot;
823 diag->p_nxt_slot = hpts->p_nxt_slot;
824 diag->p_cur_slot = hpts->p_cur_slot;
825 diag->p_curtick = hpts->p_curtick;
826 diag->p_lasttick = hpts->p_lasttick;
827 diag->slot_req = slot;
828 diag->p_on_min_sleep = hpts->p_on_min_sleep;
829 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
832 /* Ok we need to set it on the hpts in the current slot */
833 inp->inp_hpts_request = 0;
834 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
836 * A sleeping hpts we want in next slot to run
837 * note that in this state p_prev_slot == p_cur_slot
839 inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
840 if ((hpts->p_on_min_sleep == 0) &&
841 (hpts->p_hpts_active == 0))
844 inp->inp_hptsslot = hpts->p_runningslot;
845 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
846 inp_hpts_insert(inp, hpts);
849 * Activate the hpts if it is sleeping and its
852 hpts->p_direct_wake = 1;
855 slot_on = hpts->p_nxt_slot;
860 /* Get the current time relative to the wheel */
861 wheel_cts = tcp_tv_to_hptstick(&tv);
862 /* Map it onto the wheel */
863 wheel_slot = tick_to_wheel(wheel_cts);
864 /* Now what's the max we can place it at? */
865 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
867 diag->wheel_slot = wheel_slot;
868 diag->maxslots = maxslots;
869 diag->wheel_cts = wheel_cts;
872 /* The pacer is in a wheel wrap behind, yikes! */
875 * Reduce by 1 to prevent a forever loop in
876 * case something else is wrong. Note this
877 * probably does not hurt because the pacer
878 * if its true is so far behind we will be
879 * > 1second late calling anyway.
883 inp->inp_hptsslot = last_slot;
884 inp->inp_hpts_request = slot;
885 } else if (maxslots >= slot) {
886 /* It all fits on the wheel */
887 inp->inp_hpts_request = 0;
888 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
890 /* It does not fit */
891 inp->inp_hpts_request = slot - maxslots;
892 inp->inp_hptsslot = last_slot;
895 diag->slot_remaining = inp->inp_hpts_request;
896 diag->inp_hptsslot = inp->inp_hptsslot;
899 check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
901 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
902 inp_hpts_insert(inp, hpts);
903 if ((hpts->p_hpts_active == 0) &&
904 (inp->inp_hpts_request == 0) &&
905 (hpts->p_on_min_sleep == 0)) {
907 * The hpts is sleeping and NOT on a minimum
908 * sleep time, we need to figure out where
909 * it will wake up at and if we need to reschedule
912 uint32_t have_slept, yet_to_sleep;
914 /* Now do we need to restart the hpts's timer? */
915 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
916 if (have_slept < hpts->p_hpts_sleep_time)
917 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
919 /* We are over-due */
924 diag->have_slept = have_slept;
925 diag->yet_to_sleep = yet_to_sleep;
928 (yet_to_sleep > slot)) {
930 * We need to reschedule the hpts's time-out.
932 hpts->p_hpts_sleep_time = slot;
933 need_new_to = slot * HPTS_TICKS_PER_SLOT;
937 * Now how far is the hpts sleeping to? if active is 1, its
938 * up and ticking we do nothing, otherwise we may need to
939 * reschedule its callout if need_new_to is set from above.
942 hpts->p_direct_wake = 1;
945 diag->need_new_to = 0;
946 diag->co_ret = 0xffff0000;
948 } else if (need_new_to) {
955 while (need_new_to > HPTS_USEC_IN_SEC) {
957 need_new_to -= HPTS_USEC_IN_SEC;
959 tv.tv_usec = need_new_to;
961 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
962 hpts_timeout_swi, hpts, hpts->p_cpu,
963 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
965 diag->need_new_to = need_new_to;
966 diag->co_ret = co_ret;
969 slot_on = hpts->p_nxt_slot;
976 hpts_random_cpu(struct inpcb *inp){
978 * No flow type set distribute the load randomly.
984 * Shortcut if it is already set. XXXGL: does it happen?
986 if (inp->inp_hpts_cpu_set) {
987 return (inp->inp_hpts_cpu);
989 /* Nothing set use a random number */
991 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
996 hpts_cpuid(struct inpcb *inp, int *failed)
1000 struct hpts_domain_info *di;
1004 if (inp->inp_hpts_cpu_set) {
1005 return (inp->inp_hpts_cpu);
1008 * If we are using the irq cpu set by LRO or
1009 * the driver then it overrides all other domains.
1011 if (tcp_use_irq_cpu) {
1012 if (inp->inp_irq_cpu_set == 0) {
1016 return(inp->inp_irq_cpu);
1018 /* If one is set the other must be the same */
1020 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1021 if (cpuid == NETISR_CPUID_NONE)
1022 return (hpts_random_cpu(inp));
1027 * We don't have a flowid -> cpuid mapping, so cheat and just map
1028 * unknown cpuids to curcpu. Not the best, but apparently better
1029 * than defaulting to swi 0.
1031 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1032 counter_u64_add(cpu_uses_random, 1);
1033 return (hpts_random_cpu(inp));
1036 * Hash to a thread based on the flowid. If we are using numa,
1037 * then restrict the hash to the numa domain where the inp lives.
1041 if ((vm_ndomains == 1) ||
1042 (inp->inp_numa_domain == M_NODOM)) {
1044 cpuid = inp->inp_flowid % mp_ncpus;
1047 /* Hash into the cpu's that use that domain */
1048 di = &hpts_domains[inp->inp_numa_domain];
1049 cpuid = di->cpu[inp->inp_flowid % di->count];
1052 counter_u64_add(cpu_uses_flowid, 1);
1057 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1061 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1063 * Find next slot that is occupied and use that to
1064 * be the sleep time.
1066 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1067 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1070 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1072 KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1073 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1075 /* No one on the wheel sleep for all but 400 slots or sleep max */
1076 hpts->p_hpts_sleep_time = hpts_sleep_max;
1081 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1086 int32_t slots_to_run, i, error;
1087 int32_t loop_cnt = 0;
1088 int32_t did_prefetch = 0;
1089 int32_t prefetch_ninp = 0;
1090 int32_t prefetch_tp = 0;
1091 int32_t wrap_loop_cnt = 0;
1092 int32_t slot_pos_of_endpoint = 0;
1093 int32_t orig_exit_slot;
1094 int8_t completed_measure = 0, seen_endpoint = 0;
1096 HPTS_MTX_ASSERT(hpts);
1098 /* record previous info for any logging */
1099 hpts->saved_lasttick = hpts->p_lasttick;
1100 hpts->saved_curtick = hpts->p_curtick;
1101 hpts->saved_curslot = hpts->p_cur_slot;
1102 hpts->saved_prev_slot = hpts->p_prev_slot;
1104 hpts->p_lasttick = hpts->p_curtick;
1105 hpts->p_curtick = tcp_gethptstick(&tv);
1106 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1107 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1108 if ((hpts->p_on_queue_cnt == 0) ||
1109 (hpts->p_lasttick == hpts->p_curtick)) {
1111 * No time has yet passed,
1114 hpts->p_prev_slot = hpts->p_cur_slot;
1115 hpts->p_lasttick = hpts->p_curtick;
1119 hpts->p_wheel_complete = 0;
1120 HPTS_MTX_ASSERT(hpts);
1121 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1122 if (((hpts->p_curtick - hpts->p_lasttick) >
1123 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1124 (hpts->p_on_queue_cnt != 0)) {
1126 * Wheel wrap is occuring, basically we
1127 * are behind and the distance between
1128 * run's has spread so much it has exceeded
1129 * the time on the wheel (1.024 seconds). This
1130 * is ugly and should NOT be happening. We
1131 * need to run the entire wheel. We last processed
1132 * p_prev_slot, so that needs to be the last slot
1133 * we run. The next slot after that should be our
1134 * reserved first slot for new, and then starts
1135 * the running position. Now the problem is the
1136 * reserved "not to yet" place does not exist
1137 * and there may be inp's in there that need
1138 * running. We can merge those into the
1139 * first slot at the head.
1142 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1143 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1145 * Adjust p_cur_slot to be where we are starting from
1146 * hopefully we will catch up (fat chance if something
1147 * is broken this bad :( )
1149 hpts->p_cur_slot = hpts->p_prev_slot;
1151 * The next slot has guys to run too, and that would
1152 * be where we would normally start, lets move them into
1153 * the next slot (p_prev_slot + 2) so that we will
1154 * run them, the extra 10usecs of late (by being
1155 * put behind) does not really matter in this situation.
1157 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1159 MPASS(inp->inp_hptsslot == hpts->p_nxt_slot);
1160 MPASS(inp->inp_hpts_gencnt ==
1161 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1162 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1165 * Update gencnt and nextslot accordingly to match
1166 * the new location. This is safe since it takes both
1167 * the INP lock and the pacer mutex to change the
1168 * inp_hptsslot and inp_hpts_gencnt.
1170 inp->inp_hpts_gencnt =
1171 hpts->p_hptss[hpts->p_runningslot].gencnt;
1172 inp->inp_hptsslot = hpts->p_runningslot;
1174 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1175 &hpts->p_hptss[hpts->p_nxt_slot].head, inp_hpts);
1176 hpts->p_hptss[hpts->p_runningslot].count +=
1177 hpts->p_hptss[hpts->p_nxt_slot].count;
1178 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1179 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1180 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1181 counter_u64_add(wheel_wrap, 1);
1184 * Nxt slot is always one after p_runningslot though
1185 * its not used usually unless we are doing wheel wrap.
1187 hpts->p_nxt_slot = hpts->p_prev_slot;
1188 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1190 if (hpts->p_on_queue_cnt == 0) {
1193 for (i = 0; i < slots_to_run; i++) {
1194 struct inpcb *inp, *ninp;
1195 TAILQ_HEAD(, inpcb) head = TAILQ_HEAD_INITIALIZER(head);
1196 struct hptsh *hptsh;
1197 uint32_t runningslot;
1200 * Calculate our delay, if there are no extra ticks there
1201 * was not any (i.e. if slots_to_run == 1, no delay).
1203 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1204 HPTS_TICKS_PER_SLOT;
1206 runningslot = hpts->p_runningslot;
1207 hptsh = &hpts->p_hptss[runningslot];
1208 TAILQ_SWAP(&head, &hptsh->head, inpcb, inp_hpts);
1209 hpts->p_on_queue_cnt -= hptsh->count;
1215 TAILQ_FOREACH_SAFE(inp, &head, inp_hpts, ninp) {
1219 /* We prefetch the next inp if possible */
1220 kern_prefetch(ninp, &prefetch_ninp);
1225 if (seen_endpoint == 0) {
1227 orig_exit_slot = slot_pos_of_endpoint =
1229 } else if (completed_measure == 0) {
1230 /* Record the new position */
1231 orig_exit_slot = runningslot;
1235 if (inp->inp_hpts_cpu_set == 0) {
1241 if (__predict_false(inp->inp_in_hpts == IHPTS_MOVING)) {
1242 if (inp->inp_hptsslot == -1) {
1243 inp->inp_in_hpts = IHPTS_NONE;
1244 if (in_pcbrele_wlocked(inp) == false)
1248 inp_hpts_insert(inp, hpts);
1255 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1256 MPASS(!(inp->inp_flags & INP_DROPPED));
1257 KASSERT(runningslot == inp->inp_hptsslot,
1258 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1259 hpts, inp, runningslot, inp->inp_hptsslot));
1261 if (inp->inp_hpts_request) {
1263 * This guy is deferred out further in time
1264 * then our wheel had available on it.
1265 * Push him back on the wheel or run it
1268 uint32_t maxslots, last_slot, remaining_slots;
1270 remaining_slots = slots_to_run - (i + 1);
1271 if (inp->inp_hpts_request > remaining_slots) {
1274 * How far out can we go?
1276 maxslots = max_slots_available(hpts,
1277 hpts->p_cur_slot, &last_slot);
1278 if (maxslots >= inp->inp_hpts_request) {
1279 /* We can place it finally to
1281 inp->inp_hptsslot = hpts_slot(
1282 hpts->p_runningslot,
1283 inp->inp_hpts_request);
1284 inp->inp_hpts_request = 0;
1286 /* Work off some more time */
1287 inp->inp_hptsslot = last_slot;
1288 inp->inp_hpts_request -=
1291 inp_hpts_insert(inp, hpts);
1296 inp->inp_hpts_request = 0;
1297 /* Fall through we will so do it now */
1300 inp_hpts_release(inp);
1301 tp = intotcpcb(inp);
1305 * Setup so the next time we will move to
1306 * the right CPU. This should be a rare
1307 * event. It will sometimes happens when we
1308 * are the client side (usually not the
1309 * server). Somehow tcp_output() gets called
1310 * before the tcp_do_segment() sets the
1311 * intial state. This means the r_cpu and
1312 * r_hpts_cpu is 0. We get on the hpts, and
1313 * then tcp_input() gets called setting up
1314 * the r_cpu to the correct value. The hpts
1315 * goes off and sees the mis-match. We
1316 * simply correct it here and the CPU will
1317 * switch to the new hpts nextime the tcb
1318 * gets added to the hpts (not this one)
1323 CURVNET_SET(inp->inp_vnet);
1324 /* Lets do any logging that we might want to */
1325 if (hpts_does_tp_logging && tcp_bblogging_on(tp)) {
1326 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1329 if (tp->t_fb_ptr != NULL) {
1330 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1334 * We set inp_hpts_calls to 1 before any possible output.
1335 * The contract with the transport is that if it cares about
1336 * hpts calling it should clear the flag. That way next time
1337 * it is called it will know it is hpts.
1339 * We also only call tfb_do_queued_segments() <or> tcp_output()
1340 * it is expected that if segments are queued and come in that
1341 * the final input mbuf will cause a call to output if it is needed.
1343 inp->inp_hpts_calls = 1;
1344 if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) &&
1345 !STAILQ_EMPTY(&tp->t_inqueue)) {
1346 error = (*tp->t_fb->tfb_do_queued_segments)(tp, 0);
1348 /* The input killed the connection */
1352 error = tcp_output(tp);
1358 * If we have a nxt inp, see if we can
1359 * prefetch it. Note this may seem
1360 * "risky" since we have no locks (other
1361 * than the previous inp) and there no
1362 * assurance that ninp was not pulled while
1363 * we were processing inp and freed. If this
1364 * occurred it could mean that either:
1366 * a) Its NULL (which is fine we won't go
1367 * here) <or> b) Its valid (which is cool we
1368 * will prefetch it) <or> c) The inp got
1369 * freed back to the slab which was
1370 * reallocated. Then the piece of memory was
1371 * re-used and something else (not an
1372 * address) is in inp_ppcb. If that occurs
1373 * we don't crash, but take a TLB shootdown
1374 * performance hit (same as if it was NULL
1375 * and we tried to pre-fetch it).
1377 * Considering that the likelyhood of <c> is
1378 * quite rare we will take a risk on doing
1379 * this. If performance drops after testing
1380 * we can always take this out. NB: the
1381 * kern_prefetch on amd64 actually has
1382 * protection against a bad address now via
1383 * the DMAP_() tests. This will prevent the
1384 * TLB hit, and instead if <c> occurs just
1385 * cause us to load cache with a useless
1388 * XXXGL: with tcpcb == inpcb, I'm unsure this
1389 * prefetch is still correct and useful.
1391 kern_prefetch(ninp, &prefetch_tp);
1398 if (seen_endpoint) {
1400 * We now have a accurate distance between
1401 * slot_pos_of_endpoint <-> orig_exit_slot
1402 * to tell us how late we were, orig_exit_slot
1403 * is where we calculated the end of our cycle to
1404 * be when we first entered.
1406 completed_measure = 1;
1409 hpts->p_runningslot++;
1410 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1411 hpts->p_runningslot = 0;
1415 HPTS_MTX_ASSERT(hpts);
1416 hpts->p_delayed_by = 0;
1418 * Check to see if we took an excess amount of time and need to run
1419 * more ticks (if we did not hit eno-bufs).
1421 hpts->p_prev_slot = hpts->p_cur_slot;
1422 hpts->p_lasttick = hpts->p_curtick;
1423 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1425 * Something is serious slow we have
1426 * looped through processing the wheel
1427 * and by the time we cleared the
1428 * needs to run max_pacer_loops time
1429 * we still needed to run. That means
1430 * the system is hopelessly behind and
1431 * can never catch up :(
1433 * We will just lie to this thread
1434 * and let it thing p_curtick is
1435 * correct. When it next awakens
1436 * it will find itself further behind.
1439 counter_u64_add(hpts_hopelessly_behind, 1);
1442 hpts->p_curtick = tcp_gethptstick(&tv);
1443 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1444 if (seen_endpoint == 0) {
1445 /* We saw no endpoint but we may be looping */
1446 orig_exit_slot = hpts->p_cur_slot;
1448 if ((wrap_loop_cnt < 2) &&
1449 (hpts->p_lasttick != hpts->p_curtick)) {
1450 counter_u64_add(hpts_loops, 1);
1455 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1457 * Set flag to tell that we are done for
1458 * any slot input that happens during
1461 hpts->p_wheel_complete = 1;
1463 * Now did we spend too long running input and need to run more ticks?
1464 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1465 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1466 * is greater than 2, then the condtion most likely are *not* true.
1467 * Also if we are called not from the callout, we don't run the wheel
1468 * multiple times so the slots may not align either.
1470 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1471 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1472 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1473 hpts->p_prev_slot, hpts->p_cur_slot));
1474 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1475 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1476 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1477 hpts->p_lasttick, hpts->p_curtick));
1478 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1479 hpts->p_curtick = tcp_gethptstick(&tv);
1480 counter_u64_add(hpts_loops, 1);
1481 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1486 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1489 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1495 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1497 struct tcp_hpts_entry *hpts;
1500 INP_WLOCK_ASSERT(inp);
1501 hpts = tcp_hpts_lock(inp);
1502 if ((inp->inp_in_hpts == 0) &&
1503 (inp->inp_hpts_cpu_set == 0)) {
1504 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1506 inp->inp_hpts_cpu_set = 1;
1508 mtx_unlock(&hpts->p_mtx);
1512 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1516 if (hpts->p_hpts_active) {
1517 /* Already active */
1520 if (mtx_trylock(&hpts->p_mtx) == 0) {
1521 /* Someone else got the lock */
1524 if (hpts->p_hpts_active)
1526 hpts->syscall_cnt++;
1527 counter_u64_add(hpts_direct_call, 1);
1528 hpts->p_hpts_active = 1;
1529 ticks_ran = tcp_hptsi(hpts, 0);
1530 /* We may want to adjust the sleep values here */
1531 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1532 if (ticks_ran > ticks_indicate_less_sleep) {
1536 hpts->p_mysleep.tv_usec /= 2;
1537 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1538 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1539 /* Reschedule with new to value */
1540 tcp_hpts_set_max_sleep(hpts, 0);
1541 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1542 /* Validate its in the right ranges */
1543 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1544 hpts->overidden_sleep = tv.tv_usec;
1545 tv.tv_usec = hpts->p_mysleep.tv_usec;
1546 } else if (tv.tv_usec > dynamic_max_sleep) {
1547 /* Lets not let sleep get above this value */
1548 hpts->overidden_sleep = tv.tv_usec;
1549 tv.tv_usec = dynamic_max_sleep;
1552 * In this mode the timer is a backstop to
1553 * all the userret/lro_flushes so we use
1554 * the dynamic value and set the on_min_sleep
1555 * flag so we will not be awoken.
1558 /* Store off to make visible the actual sleep time */
1559 hpts->sleeping = tv.tv_usec;
1560 callout_reset_sbt_on(&hpts->co, sb, 0,
1561 hpts_timeout_swi, hpts, hpts->p_cpu,
1562 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1563 } else if (ticks_ran < ticks_indicate_more_sleep) {
1564 /* For the further sleep, don't reschedule hpts */
1565 hpts->p_mysleep.tv_usec *= 2;
1566 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1567 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1569 hpts->p_on_min_sleep = 1;
1571 hpts->p_hpts_active = 0;
1573 HPTS_MTX_ASSERT(hpts);
1574 mtx_unlock(&hpts->p_mtx);
1577 static struct tcp_hpts_entry *
1578 tcp_choose_hpts_to_run(void)
1580 int i, oldest_idx, start, end;
1581 uint32_t cts, time_since_ran, calc;
1583 cts = tcp_get_usecs(NULL);
1585 /* Default is all one group */
1587 end = tcp_pace.rp_num_hptss;
1589 * If we have more than one L3 group figure out which one
1592 if (tcp_pace.grp_cnt > 1) {
1593 for (i = 0; i < tcp_pace.grp_cnt; i++) {
1594 if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
1595 start = tcp_pace.grps[i]->cg_first;
1596 end = (tcp_pace.grps[i]->cg_last + 1);
1602 for (i = start; i < end; i++) {
1603 if (TSTMP_GT(cts, cts_last_ran[i]))
1604 calc = cts - cts_last_ran[i];
1607 if (calc > time_since_ran) {
1609 time_since_ran = calc;
1612 if (oldest_idx >= 0)
1613 return(tcp_pace.rp_ent[oldest_idx]);
1615 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1622 static struct tcp_hpts_entry *hpts;
1623 struct epoch_tracker et;
1625 NET_EPOCH_ENTER(et);
1626 hpts = tcp_choose_hpts_to_run();
1627 __tcp_run_hpts(hpts);
1633 tcp_hpts_thread(void *ctx)
1635 struct tcp_hpts_entry *hpts;
1636 struct epoch_tracker et;
1641 hpts = (struct tcp_hpts_entry *)ctx;
1642 mtx_lock(&hpts->p_mtx);
1643 if (hpts->p_direct_wake) {
1644 /* Signaled by input or output with low occupancy count. */
1645 callout_stop(&hpts->co);
1646 counter_u64_add(hpts_direct_awakening, 1);
1648 /* Timed out, the normal case. */
1649 counter_u64_add(hpts_wake_timeout, 1);
1650 if (callout_pending(&hpts->co) ||
1651 !callout_active(&hpts->co)) {
1652 mtx_unlock(&hpts->p_mtx);
1656 callout_deactivate(&hpts->co);
1657 hpts->p_hpts_wake_scheduled = 0;
1658 NET_EPOCH_ENTER(et);
1659 if (hpts->p_hpts_active) {
1661 * We are active already. This means that a syscall
1662 * trap or LRO is running in behalf of hpts. In that case
1663 * we need to double our timeout since there seems to be
1664 * enough activity in the system that we don't need to
1665 * run as often (if we were not directly woken).
1667 if (hpts->p_direct_wake == 0) {
1668 counter_u64_add(hpts_back_tosleep, 1);
1669 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1670 hpts->p_mysleep.tv_usec *= 2;
1671 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1672 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1673 tv.tv_usec = hpts->p_mysleep.tv_usec;
1674 hpts->p_on_min_sleep = 1;
1677 * Here we have low count on the wheel, but
1678 * somehow we still collided with one of the
1679 * connections. Lets go back to sleep for a
1680 * min sleep time, but clear the flag so we
1681 * can be awoken by insert.
1683 hpts->p_on_min_sleep = 0;
1684 tv.tv_usec = tcp_min_hptsi_time;
1688 * Directly woken most likely to reset the
1692 tv.tv_usec = hpts->p_mysleep.tv_usec;
1697 hpts->p_hpts_active = 1;
1698 ticks_ran = tcp_hptsi(hpts, 1);
1700 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1701 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1702 if(hpts->p_direct_wake == 0) {
1704 * Only adjust sleep time if we were
1705 * called from the callout i.e. direct_wake == 0.
1707 if (ticks_ran < ticks_indicate_more_sleep) {
1708 hpts->p_mysleep.tv_usec *= 2;
1709 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1710 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1711 } else if (ticks_ran > ticks_indicate_less_sleep) {
1712 hpts->p_mysleep.tv_usec /= 2;
1713 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1714 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1717 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1718 hpts->overidden_sleep = tv.tv_usec;
1719 tv.tv_usec = hpts->p_mysleep.tv_usec;
1720 } else if (tv.tv_usec > dynamic_max_sleep) {
1721 /* Lets not let sleep get above this value */
1722 hpts->overidden_sleep = tv.tv_usec;
1723 tv.tv_usec = dynamic_max_sleep;
1726 * In this mode the timer is a backstop to
1727 * all the userret/lro_flushes so we use
1728 * the dynamic value and set the on_min_sleep
1729 * flag so we will not be awoken.
1731 hpts->p_on_min_sleep = 1;
1732 } else if (hpts->p_on_queue_cnt == 0) {
1734 * No one on the wheel, please wake us up
1735 * if you insert on the wheel.
1737 hpts->p_on_min_sleep = 0;
1738 hpts->overidden_sleep = 0;
1741 * We hit here when we have a low number of
1742 * clients on the wheel (our else clause).
1743 * We may need to go on min sleep, if we set
1744 * the flag we will not be awoken if someone
1745 * is inserted ahead of us. Clearing the flag
1746 * means we can be awoken. This is "old mode"
1747 * where the timer is what runs hpts mainly.
1749 if (tv.tv_usec < tcp_min_hptsi_time) {
1751 * Yes on min sleep, which means
1752 * we cannot be awoken.
1754 hpts->overidden_sleep = tv.tv_usec;
1755 tv.tv_usec = tcp_min_hptsi_time;
1756 hpts->p_on_min_sleep = 1;
1758 /* Clear the min sleep flag */
1759 hpts->overidden_sleep = 0;
1760 hpts->p_on_min_sleep = 0;
1763 HPTS_MTX_ASSERT(hpts);
1764 hpts->p_hpts_active = 0;
1766 hpts->p_direct_wake = 0;
1768 /* Store off to make visible the actual sleep time */
1769 hpts->sleeping = tv.tv_usec;
1770 callout_reset_sbt_on(&hpts->co, sb, 0,
1771 hpts_timeout_swi, hpts, hpts->p_cpu,
1772 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1774 mtx_unlock(&hpts->p_mtx);
1780 hpts_count_level(struct cpu_group *cg)
1782 int32_t count_l3, i;
1785 if (cg->cg_level == CG_SHARE_L3)
1787 /* Walk all the children looking for L3 */
1788 for (i = 0; i < cg->cg_children; i++) {
1789 count_l3 += hpts_count_level(&cg->cg_child[i]);
1795 hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
1800 if (cg->cg_level == CG_SHARE_L3) {
1809 /* Walk all the children looking for L3 */
1810 for (i = 0; i < cg->cg_children; i++) {
1811 hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
1816 tcp_init_hptsi(void *st)
1818 struct cpu_group *cpu_top;
1819 int32_t error __diagused;
1820 int32_t i, j, bound = 0, created = 0;
1824 struct tcp_hpts_entry *hpts;
1827 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1831 cpu_top = smp_topo();
1835 tcp_pace.rp_num_hptss = ncpus;
1836 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1837 hpts_loops = counter_u64_alloc(M_WAITOK);
1838 back_tosleep = counter_u64_alloc(M_WAITOK);
1839 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1840 wheel_wrap = counter_u64_alloc(M_WAITOK);
1841 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1842 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1843 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1844 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1845 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1846 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1848 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1849 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1850 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1851 cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1852 tcp_pace.grp_cnt = 0;
1853 if (cpu_top == NULL) {
1854 tcp_pace.grp_cnt = 1;
1856 /* Find out how many cache level 3 domains we have */
1858 tcp_pace.grp_cnt = hpts_count_level(cpu_top);
1859 if (tcp_pace.grp_cnt == 0) {
1860 tcp_pace.grp_cnt = 1;
1862 sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
1863 tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
1864 /* Now populate the groups */
1865 if (tcp_pace.grp_cnt == 1) {
1867 * All we need is the top level all cpu's are in
1868 * the same cache so when we use grp[0]->cg_mask
1869 * with the cg_first <-> cg_last it will include
1870 * all cpu's in it. The level here is probably
1873 tcp_pace.grps[0] = cpu_top;
1876 * Here we must find all the level three cache domains
1877 * and setup our pointers to them.
1880 hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
1883 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1884 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1885 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1886 M_TCPHPTS, M_WAITOK | M_ZERO);
1887 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
1888 hpts = tcp_pace.rp_ent[i];
1890 * Init all the hpts structures that are not specifically
1891 * zero'd by the allocations. Also lets attach them to the
1892 * appropriate sysctl block as well.
1894 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1895 "hpts", MTX_DEF | MTX_DUPOK);
1896 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1897 TAILQ_INIT(&hpts->p_hptss[j].head);
1898 hpts->p_hptss[j].count = 0;
1899 hpts->p_hptss[j].gencnt = 0;
1901 sysctl_ctx_init(&hpts->hpts_ctx);
1902 sprintf(unit, "%d", i);
1903 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1904 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1907 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1909 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1910 SYSCTL_CHILDREN(hpts->hpts_root),
1911 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1912 &hpts->p_on_queue_cnt, 0,
1913 "Count TCB's awaiting output processing");
1914 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1915 SYSCTL_CHILDREN(hpts->hpts_root),
1916 OID_AUTO, "active", CTLFLAG_RD,
1917 &hpts->p_hpts_active, 0,
1918 "Is the hpts active");
1919 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1920 SYSCTL_CHILDREN(hpts->hpts_root),
1921 OID_AUTO, "curslot", CTLFLAG_RD,
1922 &hpts->p_cur_slot, 0,
1923 "What the current running pacers goal");
1924 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1925 SYSCTL_CHILDREN(hpts->hpts_root),
1926 OID_AUTO, "runtick", CTLFLAG_RD,
1927 &hpts->p_runningslot, 0,
1928 "What the running pacers current slot is");
1929 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1930 SYSCTL_CHILDREN(hpts->hpts_root),
1931 OID_AUTO, "curtick", CTLFLAG_RD,
1932 &hpts->p_curtick, 0,
1933 "What the running pacers last tick mapped to the wheel was");
1934 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1935 SYSCTL_CHILDREN(hpts->hpts_root),
1936 OID_AUTO, "lastran", CTLFLAG_RD,
1937 &cts_last_ran[i], 0,
1938 "The last usec tick that this hpts ran");
1939 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1940 SYSCTL_CHILDREN(hpts->hpts_root),
1941 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1942 &hpts->p_mysleep.tv_usec,
1943 "What the running pacers is using for p_mysleep.tv_usec");
1944 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1945 SYSCTL_CHILDREN(hpts->hpts_root),
1946 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1948 "What the running pacers is actually sleeping for");
1949 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1950 SYSCTL_CHILDREN(hpts->hpts_root),
1951 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1952 &hpts->syscall_cnt, 0,
1953 "How many times we had syscalls on this hpts");
1955 hpts->p_hpts_sleep_time = hpts_sleep_max;
1957 hpts->p_curtick = tcp_gethptstick(&tv);
1958 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1959 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1960 hpts->p_cpu = 0xffff;
1961 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1962 callout_init(&hpts->co, 1);
1964 /* Don't try to bind to NUMA domains if we don't have any */
1965 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1966 tcp_bind_threads = 0;
1969 * Now lets start ithreads to handle the hptss.
1971 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1972 hpts = tcp_pace.rp_ent[i];
1975 error = swi_add(&hpts->ie, "hpts",
1976 tcp_hpts_thread, (void *)hpts,
1977 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1979 ("Can't add hpts:%p i:%d err:%d",
1982 hpts->p_mysleep.tv_sec = 0;
1983 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
1984 if (tcp_bind_threads == 1) {
1985 if (intr_event_bind(hpts->ie, i) == 0)
1987 } else if (tcp_bind_threads == 2) {
1988 /* Find the group for this CPU (i) and bind into it */
1989 for (j = 0; j < tcp_pace.grp_cnt; j++) {
1990 if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
1991 if (intr_event_bind_ithread_cpuset(hpts->ie,
1992 &tcp_pace.grps[j]->cg_mask) == 0) {
1995 domain = pc->pc_domain;
1996 count = hpts_domains[domain].count;
1997 hpts_domains[domain].cpu[count] = i;
1998 hpts_domains[domain].count++;
2005 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2006 hpts->sleeping = tv.tv_usec;
2008 callout_reset_sbt_on(&hpts->co, sb, 0,
2009 hpts_timeout_swi, hpts, hpts->p_cpu,
2010 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2013 * If we somehow have an empty domain, fall back to choosing
2014 * among all htps threads.
2016 for (i = 0; i < vm_ndomains; i++) {
2017 if (hpts_domains[i].count == 0) {
2018 tcp_bind_threads = 0;
2022 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2024 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2026 printf("HPTS is in INVARIANT mode!!\n");
2030 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
2031 MODULE_VERSION(tcphpts, 1);