2 * Copyright (c) 2016-2018 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 #include "opt_inet6.h"
32 #include "opt_tcpdebug.h"
35 * Some notes about usage.
37 * The tcp_hpts system is designed to provide a high precision timer
38 * system for tcp. Its main purpose is to provide a mechanism for
39 * pacing packets out onto the wire. It can be used in two ways
40 * by a given TCP stack (and those two methods can be used simultaneously).
42 * First, and probably the main thing its used by Rack and BBR, it can
43 * be used to call tcp_output() of a transport stack at some time in the future.
44 * The normal way this is done is that tcp_output() of the stack schedules
45 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
46 * slot is the time from now that the stack wants to be called but it
47 * must be converted to tcp_hpts's notion of slot. This is done with
48 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
49 * call from the tcp_output() routine might look like:
51 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
53 * The above would schedule tcp_ouput() to be called in 550 useconds.
54 * Note that if using this mechanism the stack will want to add near
55 * its top a check to prevent unwanted calls (from user land or the
56 * arrival of incoming ack's). So it would add something like:
58 * if (tcp_in_hpts(inp))
61 * to prevent output processing until the time alotted has gone by.
62 * Of course this is a bare bones example and the stack will probably
63 * have more consideration then just the above.
65 * In order to run input queued segments from the HPTS context the
66 * tcp stack must define an input function for
67 * tfb_do_queued_segments(). This function understands
68 * how to dequeue a array of packets that were input and
69 * knows how to call the correct processing routine.
71 * Locking in this is important as well so most likely the
72 * stack will need to define the tfb_do_segment_nounlock()
73 * splitting tfb_do_segment() into two parts. The main processing
74 * part that does not unlock the INP and returns a value of 1 or 0.
75 * It returns 0 if all is well and the lock was not released. It
76 * returns 1 if we had to destroy the TCB (a reset received etc).
77 * The remains of tfb_do_segment() then become just a simple call
78 * to the tfb_do_segment_nounlock() function and check the return
79 * code and possibly unlock.
81 * The stack must also set the flag on the INP that it supports this
82 * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
83 * this flag as well and will queue packets when it is set.
84 * There are other flags as well INP_MBUF_QUEUE_READY and
85 * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
86 * that we are in the pacer for output so there is no
87 * need to wake up the hpts system to get immediate
88 * input. The second tells the LRO code that its okay
89 * if a SACK arrives you can still defer input and let
90 * the current hpts timer run (this is usually set when
91 * a rack timer is up so we know SACK's are happening
92 * on the connection already and don't want to wakeup yet).
94 * There is a common functions within the rack_bbr_common code
95 * version i.e. ctf_do_queued_segments(). This function
96 * knows how to take the input queue of packets from
97 * tp->t_in_pkts and process them digging out
98 * all the arguments, calling any bpf tap and
99 * calling into tfb_do_segment_nounlock(). The common
100 * function (ctf_do_queued_segments()) requires that
101 * you have defined the tfb_do_segment_nounlock() as
105 #include <sys/param.h>
107 #include <sys/interrupt.h>
108 #include <sys/module.h>
109 #include <sys/kernel.h>
110 #include <sys/hhook.h>
111 #include <sys/malloc.h>
112 #include <sys/mbuf.h>
113 #include <sys/proc.h> /* for proc0 declaration */
114 #include <sys/socket.h>
115 #include <sys/socketvar.h>
116 #include <sys/sysctl.h>
117 #include <sys/systm.h>
118 #include <sys/refcount.h>
119 #include <sys/sched.h>
120 #include <sys/queue.h>
122 #include <sys/counter.h>
123 #include <sys/time.h>
124 #include <sys/kthread.h>
125 #include <sys/kern_prefetch.h>
130 #include <net/route.h>
131 #include <net/vnet.h>
134 #include <net/netisr.h>
135 #include <net/rss_config.h>
138 #define TCPSTATES /* for logging */
140 #include <netinet/in.h>
141 #include <netinet/in_kdtrace.h>
142 #include <netinet/in_pcb.h>
143 #include <netinet/ip.h>
144 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
145 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
146 #include <netinet/ip_var.h>
147 #include <netinet/ip6.h>
148 #include <netinet6/in6_pcb.h>
149 #include <netinet6/ip6_var.h>
150 #include <netinet/tcp.h>
151 #include <netinet/tcp_fsm.h>
152 #include <netinet/tcp_seq.h>
153 #include <netinet/tcp_timer.h>
154 #include <netinet/tcp_var.h>
155 #include <netinet/tcpip.h>
156 #include <netinet/cc/cc.h>
157 #include <netinet/tcp_hpts.h>
158 #include <netinet/tcp_log_buf.h>
161 #include <netinet/tcp_debug.h>
162 #endif /* tcpdebug */
164 #include <netinet/tcp_offload.h>
168 * The hpts uses a 102400 wheel. The wheel
169 * defines the time in 10 usec increments (102400 x 10).
170 * This gives a range of 10usec - 1024ms to place
171 * an entry within. If the user requests more than
172 * 1.024 second, a remaineder is attached and the hpts
173 * when seeing the remainder will re-insert the
174 * inpcb forward in time from where it is until
175 * the remainder is zero.
178 #define NUM_OF_HPTSI_SLOTS 102400
180 /* Each hpts has its own p_mtx which is used for locking */
181 #define HPTS_MTX_ASSERT(hpts) mtx_assert(&(hpts)->p_mtx, MA_OWNED)
182 #define HPTS_LOCK(hpts) mtx_lock(&(hpts)->p_mtx)
183 #define HPTS_UNLOCK(hpts) mtx_unlock(&(hpts)->p_mtx)
184 struct tcp_hpts_entry {
185 /* Cache line 0x00 */
186 struct mtx p_mtx; /* Mutex for hpts */
187 struct timeval p_mysleep; /* Our min sleep time */
188 uint64_t syscall_cnt;
189 uint64_t sleeping; /* What the actual sleep was (if sleeping) */
190 uint16_t p_hpts_active; /* Flag that says hpts is awake */
191 uint8_t p_wheel_complete; /* have we completed the wheel arc walk? */
192 uint32_t p_curtick; /* Tick in 10 us the hpts is going to */
193 uint32_t p_runningslot; /* Current tick we are at if we are running */
194 uint32_t p_prev_slot; /* Previous slot we were on */
195 uint32_t p_cur_slot; /* Current slot in wheel hpts is draining */
196 uint32_t p_nxt_slot; /* The next slot outside the current range of
197 * slots that the hpts is running on. */
198 int32_t p_on_queue_cnt; /* Count on queue in this hpts */
199 uint32_t p_lasttick; /* Last tick before the current one */
200 uint8_t p_direct_wake :1, /* boolean */
201 p_on_min_sleep:1, /* boolean */
202 p_hpts_wake_scheduled:1, /* boolean */
204 uint8_t p_fill[3]; /* Fill to 32 bits */
205 /* Cache line 0x40 */
207 TAILQ_HEAD(, inpcb) head;
210 } *p_hptss; /* Hptsi wheel */
211 uint32_t p_hpts_sleep_time; /* Current sleep interval having a max
213 uint32_t overidden_sleep; /* what was overrided by min-sleep for logging */
214 uint32_t saved_lasttick; /* for logging */
215 uint32_t saved_curtick; /* for logging */
216 uint32_t saved_curslot; /* for logging */
217 uint32_t saved_prev_slot; /* for logging */
218 uint32_t p_delayed_by; /* How much were we delayed by */
219 /* Cache line 0x80 */
220 struct sysctl_ctx_list hpts_ctx;
221 struct sysctl_oid *hpts_root;
222 struct intr_event *ie;
224 uint16_t p_num; /* The hpts number one per cpu */
225 uint16_t p_cpu; /* The hpts CPU */
226 /* There is extra space in here */
227 /* Cache line 0x100 */
228 struct callout co __aligned(CACHE_LINE_SIZE);
229 } __aligned(CACHE_LINE_SIZE);
231 static struct tcp_hptsi {
232 struct cpu_group **grps;
233 struct tcp_hpts_entry **rp_ent; /* Array of hptss */
234 uint32_t *cts_last_ran;
236 uint32_t rp_num_hptss; /* Number of hpts threads */
239 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
241 static int tcp_bind_threads = 1;
243 static int tcp_bind_threads = 2;
245 static int tcp_use_irq_cpu = 0;
246 static uint32_t *cts_last_ran;
247 static int hpts_does_tp_logging = 0;
249 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
250 static void tcp_hpts_thread(void *ctx);
251 static void tcp_init_hptsi(void *st);
253 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
254 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
255 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
256 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
259 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
260 "TCP Hpts controls");
261 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
262 "TCP Hpts statistics");
264 #define timersub(tvp, uvp, vvp) \
266 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
267 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
268 if ((vvp)->tv_usec < 0) { \
270 (vvp)->tv_usec += 1000000; \
274 static int32_t tcp_hpts_precision = 120;
276 static struct hpts_domain_info {
279 } hpts_domains[MAXMEMDOM];
287 counter_u64_t hpts_hopelessly_behind;
289 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
290 &hpts_hopelessly_behind,
291 "Number of times hpts could not catch up and was behind hopelessly");
293 counter_u64_t hpts_loops;
295 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
296 &hpts_loops, "Number of times hpts had to loop to catch up");
298 counter_u64_t back_tosleep;
300 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
301 &back_tosleep, "Number of times hpts found no tcbs");
303 counter_u64_t combined_wheel_wrap;
305 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
306 &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
308 counter_u64_t wheel_wrap;
310 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
311 &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
313 counter_u64_t hpts_direct_call;
314 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
315 &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
317 counter_u64_t hpts_wake_timeout;
319 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
320 &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
322 counter_u64_t hpts_direct_awakening;
324 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
325 &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
327 counter_u64_t hpts_back_tosleep;
329 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
330 &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
332 counter_u64_t cpu_uses_flowid;
333 counter_u64_t cpu_uses_random;
335 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
336 &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
337 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
338 &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
340 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
341 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
342 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
343 &tcp_bind_threads, 2,
344 "Thread Binding tunable");
345 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
347 "Use of irq CPU tunable");
348 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
349 &tcp_hpts_precision, 120,
350 "Value for PRE() precision of callout");
351 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
353 "How many connections (below) make us use the callout based mechanism");
354 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
355 &hpts_does_tp_logging, 0,
356 "Do we add to any tp that has logging on pacer logs");
357 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
358 &dynamic_min_sleep, 250,
359 "What is the dynamic minsleep value?");
360 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
361 &dynamic_max_sleep, 5000,
362 "What is the dynamic maxsleep value?");
364 static int32_t max_pacer_loops = 10;
365 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
366 &max_pacer_loops, 10,
367 "What is the maximum number of times the pacer will loop trying to catch up");
369 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
371 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
374 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
379 new = hpts_sleep_max;
380 error = sysctl_handle_int(oidp, &new, 0, req);
381 if (error == 0 && req->newptr) {
382 if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) ||
383 (new > HPTS_MAX_SLEEP_ALLOWED))
386 hpts_sleep_max = new;
392 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
397 new = tcp_min_hptsi_time;
398 error = sysctl_handle_int(oidp, &new, 0, req);
399 if (error == 0 && req->newptr) {
400 if (new < LOWEST_SLEEP_ALLOWED)
403 tcp_min_hptsi_time = new;
408 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
409 CTLTYPE_UINT | CTLFLAG_RW,
411 &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
412 "Maximum time hpts will sleep in slots");
414 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
415 CTLTYPE_UINT | CTLFLAG_RW,
416 &tcp_min_hptsi_time, 0,
417 &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
418 "The minimum time the hpts must sleep before processing more slots");
420 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
421 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
422 static int tcp_hpts_no_wake_over_thresh = 1;
424 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
425 &ticks_indicate_more_sleep, 0,
426 "If we only process this many or less on a timeout, we need longer sleep on the next callout");
427 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
428 &ticks_indicate_less_sleep, 0,
429 "If we process this many or more on a timeout, we need less sleep on the next callout");
430 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
431 &tcp_hpts_no_wake_over_thresh, 0,
432 "When we are over the threshold on the pacer do we prohibit wakeups?");
435 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
436 int slots_to_run, int idx, int from_callout)
438 union tcp_log_stackspecific log;
441 * 64 bit - delRate, rttProp, bw_inuse
443 * 8 bit - bbr_state, bbr_substate, inhpts;
445 memset(&log.u_bbr, 0, sizeof(log.u_bbr));
446 log.u_bbr.flex1 = hpts->p_nxt_slot;
447 log.u_bbr.flex2 = hpts->p_cur_slot;
448 log.u_bbr.flex3 = hpts->p_prev_slot;
449 log.u_bbr.flex4 = idx;
450 log.u_bbr.flex5 = hpts->p_curtick;
451 log.u_bbr.flex6 = hpts->p_on_queue_cnt;
452 log.u_bbr.flex7 = hpts->p_cpu;
453 log.u_bbr.flex8 = (uint8_t)from_callout;
454 log.u_bbr.inflight = slots_to_run;
455 log.u_bbr.applimited = hpts->overidden_sleep;
456 log.u_bbr.delivered = hpts->saved_curtick;
457 log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
458 log.u_bbr.epoch = hpts->saved_curslot;
459 log.u_bbr.lt_epoch = hpts->saved_prev_slot;
460 log.u_bbr.pkts_out = hpts->p_delayed_by;
461 log.u_bbr.lost = hpts->p_hpts_sleep_time;
462 log.u_bbr.pacing_gain = hpts->p_cpu;
463 log.u_bbr.pkt_epoch = hpts->p_runningslot;
464 log.u_bbr.use_lt_bw = 1;
465 TCP_LOG_EVENTP(tp, NULL,
466 &tp->t_inpcb->inp_socket->so_rcv,
467 &tp->t_inpcb->inp_socket->so_snd,
473 tcp_wakehpts(struct tcp_hpts_entry *hpts)
475 HPTS_MTX_ASSERT(hpts);
477 if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
478 hpts->p_direct_wake = 0;
481 if (hpts->p_hpts_wake_scheduled == 0) {
482 hpts->p_hpts_wake_scheduled = 1;
483 swi_sched(hpts->ie_cookie, 0);
488 hpts_timeout_swi(void *arg)
490 struct tcp_hpts_entry *hpts;
492 hpts = (struct tcp_hpts_entry *)arg;
493 swi_sched(hpts->ie_cookie, 0);
497 inp_hpts_insert(struct inpcb *inp, struct tcp_hpts_entry *hpts)
501 INP_WLOCK_ASSERT(inp);
502 HPTS_MTX_ASSERT(hpts);
503 MPASS(hpts->p_cpu == inp->inp_hpts_cpu);
504 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
506 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
508 if (inp->inp_in_hpts == IHPTS_NONE) {
509 inp->inp_in_hpts = IHPTS_ONQUEUE;
511 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
512 inp->inp_in_hpts = IHPTS_ONQUEUE;
514 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
515 inp->inp_hpts_gencnt = hptsh->gencnt;
517 TAILQ_INSERT_TAIL(&hptsh->head, inp, inp_hpts);
519 hpts->p_on_queue_cnt++;
522 static struct tcp_hpts_entry *
523 tcp_hpts_lock(struct inpcb *inp)
525 struct tcp_hpts_entry *hpts;
527 INP_LOCK_ASSERT(inp);
529 hpts = tcp_pace.rp_ent[inp->inp_hpts_cpu];
536 inp_hpts_release(struct inpcb *inp)
538 bool released __diagused;
540 inp->inp_in_hpts = IHPTS_NONE;
541 released = in_pcbrele_wlocked(inp);
542 MPASS(released == false);
546 * Called normally with the INP_LOCKED but it
547 * does not matter, the hpts lock is the key
548 * but the lock order allows us to hold the
549 * INP lock and then get the hpts lock.
552 tcp_hpts_remove(struct inpcb *inp)
554 struct tcp_hpts_entry *hpts;
557 INP_WLOCK_ASSERT(inp);
559 hpts = tcp_hpts_lock(inp);
560 if (inp->inp_in_hpts == IHPTS_ONQUEUE) {
561 hptsh = &hpts->p_hptss[inp->inp_hptsslot];
562 inp->inp_hpts_request = 0;
563 if (__predict_true(inp->inp_hpts_gencnt == hptsh->gencnt)) {
564 TAILQ_REMOVE(&hptsh->head, inp, inp_hpts);
565 MPASS(hptsh->count > 0);
567 MPASS(hpts->p_on_queue_cnt > 0);
568 hpts->p_on_queue_cnt--;
569 inp_hpts_release(inp);
572 * tcp_hptsi() now owns the TAILQ head of this inp.
573 * Can't TAILQ_REMOVE, just mark it.
578 TAILQ_FOREACH(tmp, &hptsh->head, inp_hpts)
581 inp->inp_in_hpts = IHPTS_MOVING;
582 inp->inp_hptsslot = -1;
584 } else if (inp->inp_in_hpts == IHPTS_MOVING) {
586 * Handle a special race condition:
587 * tcp_hptsi() moves inpcb to detached tailq
588 * tcp_hpts_remove() marks as IHPTS_MOVING, slot = -1
589 * tcp_hpts_insert() sets slot to a meaningful value
590 * tcp_hpts_remove() again (we are here!), then in_pcbdrop()
591 * tcp_hptsi() finds pcb with meaningful slot and INP_DROPPED
593 inp->inp_hptsslot = -1;
599 tcp_in_hpts(struct inpcb *inp)
602 return (inp->inp_in_hpts == IHPTS_ONQUEUE);
606 hpts_slot(uint32_t wheel_slot, uint32_t plus)
609 * Given a slot on the wheel, what slot
610 * is that plus ticks out?
612 KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
613 return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
617 tick_to_wheel(uint32_t cts_in_wticks)
620 * Given a timestamp in ticks (so by
621 * default to get it to a real time one
622 * would multiply by 10.. i.e the number
623 * of ticks in a slot) map it to our limited
626 return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
630 hpts_slots_diff(int prev_slot, int slot_now)
633 * Given two slots that are someplace
634 * on our wheel. How far are they apart?
636 if (slot_now > prev_slot)
637 return (slot_now - prev_slot);
638 else if (slot_now == prev_slot)
640 * Special case, same means we can go all of our
641 * wheel less one slot.
643 return (NUM_OF_HPTSI_SLOTS - 1);
645 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
649 * Given a slot on the wheel that is the current time
650 * mapped to the wheel (wheel_slot), what is the maximum
651 * distance forward that can be obtained without
652 * wrapping past either prev_slot or running_slot
653 * depending on the htps state? Also if passed
654 * a uint32_t *, fill it with the slot location.
656 * Note if you do not give this function the current
657 * time (that you think it is) mapped to the wheel slot
658 * then the results will not be what you expect and
659 * could lead to invalid inserts.
661 static inline int32_t
662 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
664 uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
666 if ((hpts->p_hpts_active == 1) &&
667 (hpts->p_wheel_complete == 0)) {
668 end_slot = hpts->p_runningslot;
669 /* Back up one tick */
671 end_slot = NUM_OF_HPTSI_SLOTS - 1;
675 *target_slot = end_slot;
678 * For the case where we are
679 * not active, or we have
680 * completed the pass over
681 * the wheel, we can use the
682 * prev tick and subtract one from it. This puts us
683 * as far out as possible on the wheel.
685 end_slot = hpts->p_prev_slot;
687 end_slot = NUM_OF_HPTSI_SLOTS - 1;
691 *target_slot = end_slot;
693 * Now we have close to the full wheel left minus the
694 * time it has been since the pacer went to sleep. Note
695 * that wheel_tick, passed in, should be the current time
696 * from the perspective of the caller, mapped to the wheel.
698 if (hpts->p_prev_slot != wheel_slot)
699 dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
703 * dis_to_travel in this case is the space from when the
704 * pacer stopped (p_prev_slot) and where our wheel_slot
705 * is now. To know how many slots we can put it in we
706 * subtract from the wheel size. We would not want
707 * to place something after p_prev_slot or it will
710 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
713 * So how many slots are open between p_runningslot -> p_cur_slot
714 * that is what is currently un-available for insertion. Special
715 * case when we are at the last slot, this gets 1, so that
716 * the answer to how many slots are available is all but 1.
718 if (hpts->p_runningslot == hpts->p_cur_slot)
721 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
723 * How long has the pacer been running?
725 if (hpts->p_cur_slot != wheel_slot) {
726 /* The pacer is a bit late */
727 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
729 /* The pacer is right on time, now == pacers start time */
733 * To get the number left we can insert into we simply
734 * subract the distance the pacer has to run from how
735 * many slots there are.
737 avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
739 * Now how many of those we will eat due to the pacer's
740 * time (p_cur_slot) of start being behind the
741 * real time (wheel_slot)?
743 if (avail_on_wheel <= pacer_to_now) {
745 * Wheel wrap, we can't fit on the wheel, that
746 * is unusual the system must be way overloaded!
747 * Insert into the assured slot, and return special
750 counter_u64_add(combined_wheel_wrap, 1);
751 *target_slot = hpts->p_nxt_slot;
755 * We know how many slots are open
756 * on the wheel (the reverse of what
757 * is left to run. Take away the time
758 * the pacer started to now (wheel_slot)
759 * and that tells you how many slots are
760 * open that can be inserted into that won't
761 * be touched by the pacer until later.
763 return (avail_on_wheel - pacer_to_now);
770 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
773 * Sanity checks for the pacer with invariants
776 KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
777 ("hpts:%p inp:%p slot:%d > max",
778 hpts, inp, inp_hptsslot));
779 if ((hpts->p_hpts_active) &&
780 (hpts->p_wheel_complete == 0)) {
782 * If the pacer is processing a arc
783 * of the wheel, we need to make
784 * sure we are not inserting within
787 int distance, yet_to_run;
789 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
790 if (hpts->p_runningslot != hpts->p_cur_slot)
791 yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
793 yet_to_run = 0; /* processing last slot */
794 KASSERT(yet_to_run <= distance,
795 ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
796 hpts, inp, inp_hptsslot,
797 distance, yet_to_run,
798 hpts->p_runningslot, hpts->p_cur_slot));
804 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
806 struct tcp_hpts_entry *hpts;
808 uint32_t slot_on, wheel_cts, last_slot, need_new_to = 0;
809 int32_t wheel_slot, maxslots;
810 bool need_wakeup = false;
812 INP_WLOCK_ASSERT(inp);
813 MPASS(!tcp_in_hpts(inp));
814 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
817 * We now return the next-slot the hpts will be on, beyond its
818 * current run (if up) or where it was when it stopped if it is
821 hpts = tcp_hpts_lock(inp);
824 memset(diag, 0, sizeof(struct hpts_diag));
825 diag->p_hpts_active = hpts->p_hpts_active;
826 diag->p_prev_slot = hpts->p_prev_slot;
827 diag->p_runningslot = hpts->p_runningslot;
828 diag->p_nxt_slot = hpts->p_nxt_slot;
829 diag->p_cur_slot = hpts->p_cur_slot;
830 diag->p_curtick = hpts->p_curtick;
831 diag->p_lasttick = hpts->p_lasttick;
832 diag->slot_req = slot;
833 diag->p_on_min_sleep = hpts->p_on_min_sleep;
834 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
837 /* Ok we need to set it on the hpts in the current slot */
838 inp->inp_hpts_request = 0;
839 if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
841 * A sleeping hpts we want in next slot to run
842 * note that in this state p_prev_slot == p_cur_slot
844 inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
845 if ((hpts->p_on_min_sleep == 0) &&
846 (hpts->p_hpts_active == 0))
849 inp->inp_hptsslot = hpts->p_runningslot;
850 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
851 inp_hpts_insert(inp, hpts);
854 * Activate the hpts if it is sleeping and its
857 hpts->p_direct_wake = 1;
860 slot_on = hpts->p_nxt_slot;
865 /* Get the current time relative to the wheel */
866 wheel_cts = tcp_tv_to_hptstick(&tv);
867 /* Map it onto the wheel */
868 wheel_slot = tick_to_wheel(wheel_cts);
869 /* Now what's the max we can place it at? */
870 maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
872 diag->wheel_slot = wheel_slot;
873 diag->maxslots = maxslots;
874 diag->wheel_cts = wheel_cts;
877 /* The pacer is in a wheel wrap behind, yikes! */
880 * Reduce by 1 to prevent a forever loop in
881 * case something else is wrong. Note this
882 * probably does not hurt because the pacer
883 * if its true is so far behind we will be
884 * > 1second late calling anyway.
888 inp->inp_hptsslot = last_slot;
889 inp->inp_hpts_request = slot;
890 } else if (maxslots >= slot) {
891 /* It all fits on the wheel */
892 inp->inp_hpts_request = 0;
893 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
895 /* It does not fit */
896 inp->inp_hpts_request = slot - maxslots;
897 inp->inp_hptsslot = last_slot;
900 diag->slot_remaining = inp->inp_hpts_request;
901 diag->inp_hptsslot = inp->inp_hptsslot;
904 check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
906 if (__predict_true(inp->inp_in_hpts != IHPTS_MOVING))
907 inp_hpts_insert(inp, hpts);
908 if ((hpts->p_hpts_active == 0) &&
909 (inp->inp_hpts_request == 0) &&
910 (hpts->p_on_min_sleep == 0)) {
912 * The hpts is sleeping and NOT on a minimum
913 * sleep time, we need to figure out where
914 * it will wake up at and if we need to reschedule
917 uint32_t have_slept, yet_to_sleep;
919 /* Now do we need to restart the hpts's timer? */
920 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
921 if (have_slept < hpts->p_hpts_sleep_time)
922 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
924 /* We are over-due */
929 diag->have_slept = have_slept;
930 diag->yet_to_sleep = yet_to_sleep;
933 (yet_to_sleep > slot)) {
935 * We need to reschedule the hpts's time-out.
937 hpts->p_hpts_sleep_time = slot;
938 need_new_to = slot * HPTS_TICKS_PER_SLOT;
942 * Now how far is the hpts sleeping to? if active is 1, its
943 * up and ticking we do nothing, otherwise we may need to
944 * reschedule its callout if need_new_to is set from above.
947 hpts->p_direct_wake = 1;
950 diag->need_new_to = 0;
951 diag->co_ret = 0xffff0000;
953 } else if (need_new_to) {
960 while (need_new_to > HPTS_USEC_IN_SEC) {
962 need_new_to -= HPTS_USEC_IN_SEC;
964 tv.tv_usec = need_new_to;
966 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
967 hpts_timeout_swi, hpts, hpts->p_cpu,
968 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
970 diag->need_new_to = need_new_to;
971 diag->co_ret = co_ret;
974 slot_on = hpts->p_nxt_slot;
981 hpts_random_cpu(struct inpcb *inp){
983 * No flow type set distribute the load randomly.
989 * Shortcut if it is already set. XXXGL: does it happen?
991 if (inp->inp_hpts_cpu_set) {
992 return (inp->inp_hpts_cpu);
994 /* Nothing set use a random number */
996 cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
1001 hpts_cpuid(struct inpcb *inp, int *failed)
1005 struct hpts_domain_info *di;
1009 if (inp->inp_hpts_cpu_set) {
1010 return (inp->inp_hpts_cpu);
1013 * If we are using the irq cpu set by LRO or
1014 * the driver then it overrides all other domains.
1016 if (tcp_use_irq_cpu) {
1017 if (inp->inp_irq_cpu_set == 0) {
1021 return(inp->inp_irq_cpu);
1023 /* If one is set the other must be the same */
1025 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1026 if (cpuid == NETISR_CPUID_NONE)
1027 return (hpts_random_cpu(inp));
1032 * We don't have a flowid -> cpuid mapping, so cheat and just map
1033 * unknown cpuids to curcpu. Not the best, but apparently better
1034 * than defaulting to swi 0.
1036 if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1037 counter_u64_add(cpu_uses_random, 1);
1038 return (hpts_random_cpu(inp));
1041 * Hash to a thread based on the flowid. If we are using numa,
1042 * then restrict the hash to the numa domain where the inp lives.
1046 if ((vm_ndomains == 1) ||
1047 (inp->inp_numa_domain == M_NODOM)) {
1049 cpuid = inp->inp_flowid % mp_ncpus;
1052 /* Hash into the cpu's that use that domain */
1053 di = &hpts_domains[inp->inp_numa_domain];
1054 cpuid = di->cpu[inp->inp_flowid % di->count];
1057 counter_u64_add(cpu_uses_flowid, 1);
1061 #ifdef not_longer_used_gleb
1063 tcp_drop_in_pkts(struct tcpcb *tp)
1072 tp->t_in_pkt = NULL;
1083 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1087 if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1089 * Find next slot that is occupied and use that to
1090 * be the sleep time.
1092 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1093 if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
1096 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1098 KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1099 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1101 /* No one on the wheel sleep for all but 400 slots or sleep max */
1102 hpts->p_hpts_sleep_time = hpts_sleep_max;
1107 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1112 uint64_t total_slots_processed = 0;
1113 int32_t slots_to_run, i, error;
1114 int32_t paced_cnt = 0;
1115 int32_t loop_cnt = 0;
1116 int32_t did_prefetch = 0;
1117 int32_t prefetch_ninp = 0;
1118 int32_t prefetch_tp = 0;
1119 int32_t wrap_loop_cnt = 0;
1120 int32_t slot_pos_of_endpoint = 0;
1121 int32_t orig_exit_slot;
1122 int8_t completed_measure = 0, seen_endpoint = 0;
1124 HPTS_MTX_ASSERT(hpts);
1126 /* record previous info for any logging */
1127 hpts->saved_lasttick = hpts->p_lasttick;
1128 hpts->saved_curtick = hpts->p_curtick;
1129 hpts->saved_curslot = hpts->p_cur_slot;
1130 hpts->saved_prev_slot = hpts->p_prev_slot;
1132 hpts->p_lasttick = hpts->p_curtick;
1133 hpts->p_curtick = tcp_gethptstick(&tv);
1134 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1135 orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1136 if ((hpts->p_on_queue_cnt == 0) ||
1137 (hpts->p_lasttick == hpts->p_curtick)) {
1139 * No time has yet passed,
1142 hpts->p_prev_slot = hpts->p_cur_slot;
1143 hpts->p_lasttick = hpts->p_curtick;
1147 hpts->p_wheel_complete = 0;
1148 HPTS_MTX_ASSERT(hpts);
1149 slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1150 if (((hpts->p_curtick - hpts->p_lasttick) >
1151 ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1152 (hpts->p_on_queue_cnt != 0)) {
1154 * Wheel wrap is occuring, basically we
1155 * are behind and the distance between
1156 * run's has spread so much it has exceeded
1157 * the time on the wheel (1.024 seconds). This
1158 * is ugly and should NOT be happening. We
1159 * need to run the entire wheel. We last processed
1160 * p_prev_slot, so that needs to be the last slot
1161 * we run. The next slot after that should be our
1162 * reserved first slot for new, and then starts
1163 * the running position. Now the problem is the
1164 * reserved "not to yet" place does not exist
1165 * and there may be inp's in there that need
1166 * running. We can merge those into the
1167 * first slot at the head.
1170 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1171 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1173 * Adjust p_cur_slot to be where we are starting from
1174 * hopefully we will catch up (fat chance if something
1175 * is broken this bad :( )
1177 hpts->p_cur_slot = hpts->p_prev_slot;
1179 * The next slot has guys to run too, and that would
1180 * be where we would normally start, lets move them into
1181 * the next slot (p_prev_slot + 2) so that we will
1182 * run them, the extra 10usecs of late (by being
1183 * put behind) does not really matter in this situation.
1185 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot].head,
1187 MPASS(inp->inp_hptsslot == hpts->p_nxt_slot);
1188 MPASS(inp->inp_hpts_gencnt ==
1189 hpts->p_hptss[hpts->p_nxt_slot].gencnt);
1190 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1193 * Update gencnt and nextslot accordingly to match
1194 * the new location. This is safe since it takes both
1195 * the INP lock and the pacer mutex to change the
1196 * inp_hptsslot and inp_hpts_gencnt.
1198 inp->inp_hpts_gencnt =
1199 hpts->p_hptss[hpts->p_runningslot].gencnt;
1200 inp->inp_hptsslot = hpts->p_runningslot;
1202 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
1203 &hpts->p_hptss[hpts->p_nxt_slot].head, inp_hpts);
1204 hpts->p_hptss[hpts->p_runningslot].count +=
1205 hpts->p_hptss[hpts->p_nxt_slot].count;
1206 hpts->p_hptss[hpts->p_nxt_slot].count = 0;
1207 hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
1208 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1209 counter_u64_add(wheel_wrap, 1);
1212 * Nxt slot is always one after p_runningslot though
1213 * its not used usually unless we are doing wheel wrap.
1215 hpts->p_nxt_slot = hpts->p_prev_slot;
1216 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1218 if (hpts->p_on_queue_cnt == 0) {
1221 for (i = 0; i < slots_to_run; i++) {
1222 struct inpcb *inp, *ninp;
1223 TAILQ_HEAD(, inpcb) head = TAILQ_HEAD_INITIALIZER(head);
1224 struct hptsh *hptsh;
1225 uint32_t runningslot;
1228 * Calculate our delay, if there are no extra ticks there
1229 * was not any (i.e. if slots_to_run == 1, no delay).
1231 hpts->p_delayed_by = (slots_to_run - (i + 1)) *
1232 HPTS_TICKS_PER_SLOT;
1234 runningslot = hpts->p_runningslot;
1235 hptsh = &hpts->p_hptss[runningslot];
1236 TAILQ_SWAP(&head, &hptsh->head, inpcb, inp_hpts);
1237 hpts->p_on_queue_cnt -= hptsh->count;
1243 TAILQ_FOREACH_SAFE(inp, &head, inp_hpts, ninp) {
1247 /* We prefetch the next inp if possible */
1248 kern_prefetch(ninp, &prefetch_ninp);
1253 if (seen_endpoint == 0) {
1255 orig_exit_slot = slot_pos_of_endpoint =
1257 } else if (completed_measure == 0) {
1258 /* Record the new position */
1259 orig_exit_slot = runningslot;
1261 total_slots_processed++;
1265 if (inp->inp_hpts_cpu_set == 0) {
1271 if (__predict_false(inp->inp_in_hpts == IHPTS_MOVING)) {
1272 if (inp->inp_hptsslot == -1) {
1273 inp->inp_in_hpts = IHPTS_NONE;
1274 if (in_pcbrele_wlocked(inp) == false)
1278 inp_hpts_insert(inp, hpts);
1285 MPASS(inp->inp_in_hpts == IHPTS_ONQUEUE);
1286 MPASS(!(inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)));
1287 KASSERT(runningslot == inp->inp_hptsslot,
1288 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1289 hpts, inp, runningslot, inp->inp_hptsslot));
1291 if (inp->inp_hpts_request) {
1293 * This guy is deferred out further in time
1294 * then our wheel had available on it.
1295 * Push him back on the wheel or run it
1298 uint32_t maxslots, last_slot, remaining_slots;
1300 remaining_slots = slots_to_run - (i + 1);
1301 if (inp->inp_hpts_request > remaining_slots) {
1304 * How far out can we go?
1306 maxslots = max_slots_available(hpts,
1307 hpts->p_cur_slot, &last_slot);
1308 if (maxslots >= inp->inp_hpts_request) {
1309 /* We can place it finally to
1311 inp->inp_hptsslot = hpts_slot(
1312 hpts->p_runningslot,
1313 inp->inp_hpts_request);
1314 inp->inp_hpts_request = 0;
1316 /* Work off some more time */
1317 inp->inp_hptsslot = last_slot;
1318 inp->inp_hpts_request -=
1321 inp_hpts_insert(inp, hpts);
1326 inp->inp_hpts_request = 0;
1327 /* Fall through we will so do it now */
1330 inp_hpts_release(inp);
1331 tp = intotcpcb(inp);
1335 * Setup so the next time we will move to
1336 * the right CPU. This should be a rare
1337 * event. It will sometimes happens when we
1338 * are the client side (usually not the
1339 * server). Somehow tcp_output() gets called
1340 * before the tcp_do_segment() sets the
1341 * intial state. This means the r_cpu and
1342 * r_hpts_cpu is 0. We get on the hpts, and
1343 * then tcp_input() gets called setting up
1344 * the r_cpu to the correct value. The hpts
1345 * goes off and sees the mis-match. We
1346 * simply correct it here and the CPU will
1347 * switch to the new hpts nextime the tcb
1348 * gets added to the the hpts (not this one)
1353 CURVNET_SET(inp->inp_vnet);
1354 /* Lets do any logging that we might want to */
1355 if (hpts_does_tp_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1356 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1359 if (tp->t_fb_ptr != NULL) {
1360 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1363 if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && tp->t_in_pkt) {
1364 error = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1366 /* The input killed the connection */
1370 inp->inp_hpts_calls = 1;
1371 error = tcp_output(tp);
1374 inp->inp_hpts_calls = 0;
1375 if (ninp && ninp->inp_ppcb) {
1377 * If we have a nxt inp, see if we can
1378 * prefetch its ppcb. Note this may seem
1379 * "risky" since we have no locks (other
1380 * than the previous inp) and there no
1381 * assurance that ninp was not pulled while
1382 * we were processing inp and freed. If this
1383 * occured it could mean that either:
1385 * a) Its NULL (which is fine we won't go
1386 * here) <or> b) Its valid (which is cool we
1387 * will prefetch it) <or> c) The inp got
1388 * freed back to the slab which was
1389 * reallocated. Then the piece of memory was
1390 * re-used and something else (not an
1391 * address) is in inp_ppcb. If that occurs
1392 * we don't crash, but take a TLB shootdown
1393 * performance hit (same as if it was NULL
1394 * and we tried to pre-fetch it).
1396 * Considering that the likelyhood of <c> is
1397 * quite rare we will take a risk on doing
1398 * this. If performance drops after testing
1399 * we can always take this out. NB: the
1400 * kern_prefetch on amd64 actually has
1401 * protection against a bad address now via
1402 * the DMAP_() tests. This will prevent the
1403 * TLB hit, and instead if <c> occurs just
1404 * cause us to load cache with a useless
1407 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1414 if (seen_endpoint) {
1416 * We now have a accurate distance between
1417 * slot_pos_of_endpoint <-> orig_exit_slot
1418 * to tell us how late we were, orig_exit_slot
1419 * is where we calculated the end of our cycle to
1420 * be when we first entered.
1422 completed_measure = 1;
1425 hpts->p_runningslot++;
1426 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1427 hpts->p_runningslot = 0;
1431 HPTS_MTX_ASSERT(hpts);
1432 hpts->p_delayed_by = 0;
1434 * Check to see if we took an excess amount of time and need to run
1435 * more ticks (if we did not hit eno-bufs).
1437 hpts->p_prev_slot = hpts->p_cur_slot;
1438 hpts->p_lasttick = hpts->p_curtick;
1439 if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1441 * Something is serious slow we have
1442 * looped through processing the wheel
1443 * and by the time we cleared the
1444 * needs to run max_pacer_loops time
1445 * we still needed to run. That means
1446 * the system is hopelessly behind and
1447 * can never catch up :(
1449 * We will just lie to this thread
1450 * and let it thing p_curtick is
1451 * correct. When it next awakens
1452 * it will find itself further behind.
1455 counter_u64_add(hpts_hopelessly_behind, 1);
1458 hpts->p_curtick = tcp_gethptstick(&tv);
1459 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1460 if (seen_endpoint == 0) {
1461 /* We saw no endpoint but we may be looping */
1462 orig_exit_slot = hpts->p_cur_slot;
1464 if ((wrap_loop_cnt < 2) &&
1465 (hpts->p_lasttick != hpts->p_curtick)) {
1466 counter_u64_add(hpts_loops, 1);
1471 cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1473 * Set flag to tell that we are done for
1474 * any slot input that happens during
1477 hpts->p_wheel_complete = 1;
1479 * Now did we spend too long running input and need to run more ticks?
1480 * Note that if wrap_loop_cnt < 2 then we should have the conditions
1481 * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1482 * is greater than 2, then the condtion most likely are *not* true.
1483 * Also if we are called not from the callout, we don't run the wheel
1484 * multiple times so the slots may not align either.
1486 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1487 (wrap_loop_cnt >= 2) || (from_callout == 0)),
1488 ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1489 hpts->p_prev_slot, hpts->p_cur_slot));
1490 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1491 || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1492 ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1493 hpts->p_lasttick, hpts->p_curtick));
1494 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1495 hpts->p_curtick = tcp_gethptstick(&tv);
1496 counter_u64_add(hpts_loops, 1);
1497 hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1502 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1505 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1511 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1513 struct tcp_hpts_entry *hpts;
1516 INP_WLOCK_ASSERT(inp);
1517 hpts = tcp_hpts_lock(inp);
1518 if ((inp->inp_in_hpts == 0) &&
1519 (inp->inp_hpts_cpu_set == 0)) {
1520 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1522 inp->inp_hpts_cpu_set = 1;
1524 mtx_unlock(&hpts->p_mtx);
1528 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1532 if (hpts->p_hpts_active) {
1533 /* Already active */
1536 if (mtx_trylock(&hpts->p_mtx) == 0) {
1537 /* Someone else got the lock */
1540 if (hpts->p_hpts_active)
1542 hpts->syscall_cnt++;
1543 counter_u64_add(hpts_direct_call, 1);
1544 hpts->p_hpts_active = 1;
1545 ticks_ran = tcp_hptsi(hpts, 0);
1546 /* We may want to adjust the sleep values here */
1547 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1548 if (ticks_ran > ticks_indicate_less_sleep) {
1552 hpts->p_mysleep.tv_usec /= 2;
1553 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1554 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1555 /* Reschedule with new to value */
1556 tcp_hpts_set_max_sleep(hpts, 0);
1557 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1558 /* Validate its in the right ranges */
1559 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1560 hpts->overidden_sleep = tv.tv_usec;
1561 tv.tv_usec = hpts->p_mysleep.tv_usec;
1562 } else if (tv.tv_usec > dynamic_max_sleep) {
1563 /* Lets not let sleep get above this value */
1564 hpts->overidden_sleep = tv.tv_usec;
1565 tv.tv_usec = dynamic_max_sleep;
1568 * In this mode the timer is a backstop to
1569 * all the userret/lro_flushes so we use
1570 * the dynamic value and set the on_min_sleep
1571 * flag so we will not be awoken.
1574 /* Store off to make visible the actual sleep time */
1575 hpts->sleeping = tv.tv_usec;
1576 callout_reset_sbt_on(&hpts->co, sb, 0,
1577 hpts_timeout_swi, hpts, hpts->p_cpu,
1578 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1579 } else if (ticks_ran < ticks_indicate_more_sleep) {
1580 /* For the further sleep, don't reschedule hpts */
1581 hpts->p_mysleep.tv_usec *= 2;
1582 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1583 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1585 hpts->p_on_min_sleep = 1;
1587 hpts->p_hpts_active = 0;
1589 HPTS_MTX_ASSERT(hpts);
1590 mtx_unlock(&hpts->p_mtx);
1593 static struct tcp_hpts_entry *
1594 tcp_choose_hpts_to_run()
1596 int i, oldest_idx, start, end;
1597 uint32_t cts, time_since_ran, calc;
1599 cts = tcp_get_usecs(NULL);
1601 /* Default is all one group */
1603 end = tcp_pace.rp_num_hptss;
1605 * If we have more than one L3 group figure out which one
1608 if (tcp_pace.grp_cnt > 1) {
1609 for (i = 0; i < tcp_pace.grp_cnt; i++) {
1610 if (CPU_ISSET(curcpu, &tcp_pace.grps[i]->cg_mask)) {
1611 start = tcp_pace.grps[i]->cg_first;
1612 end = (tcp_pace.grps[i]->cg_last + 1);
1618 for (i = start; i < end; i++) {
1619 if (TSTMP_GT(cts, cts_last_ran[i]))
1620 calc = cts - cts_last_ran[i];
1623 if (calc > time_since_ran) {
1625 time_since_ran = calc;
1628 if (oldest_idx >= 0)
1629 return(tcp_pace.rp_ent[oldest_idx]);
1631 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1638 static struct tcp_hpts_entry *hpts;
1639 struct epoch_tracker et;
1641 NET_EPOCH_ENTER(et);
1642 hpts = tcp_choose_hpts_to_run();
1643 __tcp_run_hpts(hpts);
1649 tcp_hpts_thread(void *ctx)
1651 struct tcp_hpts_entry *hpts;
1652 struct epoch_tracker et;
1657 hpts = (struct tcp_hpts_entry *)ctx;
1658 mtx_lock(&hpts->p_mtx);
1659 if (hpts->p_direct_wake) {
1660 /* Signaled by input or output with low occupancy count. */
1661 callout_stop(&hpts->co);
1662 counter_u64_add(hpts_direct_awakening, 1);
1664 /* Timed out, the normal case. */
1665 counter_u64_add(hpts_wake_timeout, 1);
1666 if (callout_pending(&hpts->co) ||
1667 !callout_active(&hpts->co)) {
1668 mtx_unlock(&hpts->p_mtx);
1672 callout_deactivate(&hpts->co);
1673 hpts->p_hpts_wake_scheduled = 0;
1674 NET_EPOCH_ENTER(et);
1675 if (hpts->p_hpts_active) {
1677 * We are active already. This means that a syscall
1678 * trap or LRO is running in behalf of hpts. In that case
1679 * we need to double our timeout since there seems to be
1680 * enough activity in the system that we don't need to
1681 * run as often (if we were not directly woken).
1683 if (hpts->p_direct_wake == 0) {
1684 counter_u64_add(hpts_back_tosleep, 1);
1685 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1686 hpts->p_mysleep.tv_usec *= 2;
1687 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1688 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1689 tv.tv_usec = hpts->p_mysleep.tv_usec;
1690 hpts->p_on_min_sleep = 1;
1693 * Here we have low count on the wheel, but
1694 * somehow we still collided with one of the
1695 * connections. Lets go back to sleep for a
1696 * min sleep time, but clear the flag so we
1697 * can be awoken by insert.
1699 hpts->p_on_min_sleep = 0;
1700 tv.tv_usec = tcp_min_hptsi_time;
1704 * Directly woken most likely to reset the
1708 tv.tv_usec = hpts->p_mysleep.tv_usec;
1713 hpts->p_hpts_active = 1;
1714 ticks_ran = tcp_hptsi(hpts, 1);
1716 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1717 if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1718 if(hpts->p_direct_wake == 0) {
1720 * Only adjust sleep time if we were
1721 * called from the callout i.e. direct_wake == 0.
1723 if (ticks_ran < ticks_indicate_more_sleep) {
1724 hpts->p_mysleep.tv_usec *= 2;
1725 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1726 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1727 } else if (ticks_ran > ticks_indicate_less_sleep) {
1728 hpts->p_mysleep.tv_usec /= 2;
1729 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1730 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1733 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1734 hpts->overidden_sleep = tv.tv_usec;
1735 tv.tv_usec = hpts->p_mysleep.tv_usec;
1736 } else if (tv.tv_usec > dynamic_max_sleep) {
1737 /* Lets not let sleep get above this value */
1738 hpts->overidden_sleep = tv.tv_usec;
1739 tv.tv_usec = dynamic_max_sleep;
1742 * In this mode the timer is a backstop to
1743 * all the userret/lro_flushes so we use
1744 * the dynamic value and set the on_min_sleep
1745 * flag so we will not be awoken.
1747 hpts->p_on_min_sleep = 1;
1748 } else if (hpts->p_on_queue_cnt == 0) {
1750 * No one on the wheel, please wake us up
1751 * if you insert on the wheel.
1753 hpts->p_on_min_sleep = 0;
1754 hpts->overidden_sleep = 0;
1757 * We hit here when we have a low number of
1758 * clients on the wheel (our else clause).
1759 * We may need to go on min sleep, if we set
1760 * the flag we will not be awoken if someone
1761 * is inserted ahead of us. Clearing the flag
1762 * means we can be awoken. This is "old mode"
1763 * where the timer is what runs hpts mainly.
1765 if (tv.tv_usec < tcp_min_hptsi_time) {
1767 * Yes on min sleep, which means
1768 * we cannot be awoken.
1770 hpts->overidden_sleep = tv.tv_usec;
1771 tv.tv_usec = tcp_min_hptsi_time;
1772 hpts->p_on_min_sleep = 1;
1774 /* Clear the min sleep flag */
1775 hpts->overidden_sleep = 0;
1776 hpts->p_on_min_sleep = 0;
1779 HPTS_MTX_ASSERT(hpts);
1780 hpts->p_hpts_active = 0;
1782 hpts->p_direct_wake = 0;
1784 /* Store off to make visible the actual sleep time */
1785 hpts->sleeping = tv.tv_usec;
1786 callout_reset_sbt_on(&hpts->co, sb, 0,
1787 hpts_timeout_swi, hpts, hpts->p_cpu,
1788 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1790 mtx_unlock(&hpts->p_mtx);
1796 hpts_count_level(struct cpu_group *cg)
1798 int32_t count_l3, i;
1801 if (cg->cg_level == CG_SHARE_L3)
1803 /* Walk all the children looking for L3 */
1804 for (i = 0; i < cg->cg_children; i++) {
1805 count_l3 += hpts_count_level(&cg->cg_child[i]);
1811 hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
1816 if (cg->cg_level == CG_SHARE_L3) {
1825 /* Walk all the children looking for L3 */
1826 for (i = 0; i < cg->cg_children; i++) {
1827 hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
1832 tcp_init_hptsi(void *st)
1834 struct cpu_group *cpu_top;
1835 int32_t error __diagused;
1836 int32_t i, j, bound = 0, created = 0;
1840 struct tcp_hpts_entry *hpts;
1843 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1847 cpu_top = smp_topo();
1851 tcp_pace.rp_num_hptss = ncpus;
1852 hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
1853 hpts_loops = counter_u64_alloc(M_WAITOK);
1854 back_tosleep = counter_u64_alloc(M_WAITOK);
1855 combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
1856 wheel_wrap = counter_u64_alloc(M_WAITOK);
1857 hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
1858 hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
1859 hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
1860 hpts_direct_call = counter_u64_alloc(M_WAITOK);
1861 cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
1862 cpu_uses_random = counter_u64_alloc(M_WAITOK);
1864 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1865 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1866 sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
1867 cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
1868 tcp_pace.grp_cnt = 0;
1869 if (cpu_top == NULL) {
1870 tcp_pace.grp_cnt = 1;
1872 /* Find out how many cache level 3 domains we have */
1874 tcp_pace.grp_cnt = hpts_count_level(cpu_top);
1875 if (tcp_pace.grp_cnt == 0) {
1876 tcp_pace.grp_cnt = 1;
1878 sz = (tcp_pace.grp_cnt * sizeof(struct cpu_group *));
1879 tcp_pace.grps = malloc(sz, M_TCPHPTS, M_WAITOK);
1880 /* Now populate the groups */
1881 if (tcp_pace.grp_cnt == 1) {
1883 * All we need is the top level all cpu's are in
1884 * the same cache so when we use grp[0]->cg_mask
1885 * with the cg_first <-> cg_last it will include
1886 * all cpu's in it. The level here is probably
1889 tcp_pace.grps[0] = cpu_top;
1892 * Here we must find all the level three cache domains
1893 * and setup our pointers to them.
1896 hpts_gather_grps(tcp_pace.grps, &count, tcp_pace.grp_cnt, cpu_top);
1899 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1900 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1901 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1902 M_TCPHPTS, M_WAITOK | M_ZERO);
1903 tcp_pace.rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS, M_WAITOK);
1904 hpts = tcp_pace.rp_ent[i];
1906 * Init all the hpts structures that are not specifically
1907 * zero'd by the allocations. Also lets attach them to the
1908 * appropriate sysctl block as well.
1910 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1911 "hpts", MTX_DEF | MTX_DUPOK);
1912 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1913 TAILQ_INIT(&hpts->p_hptss[j].head);
1914 hpts->p_hptss[j].count = 0;
1915 hpts->p_hptss[j].gencnt = 0;
1917 sysctl_ctx_init(&hpts->hpts_ctx);
1918 sprintf(unit, "%d", i);
1919 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1920 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1923 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
1925 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1926 SYSCTL_CHILDREN(hpts->hpts_root),
1927 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1928 &hpts->p_on_queue_cnt, 0,
1929 "Count TCB's awaiting output processing");
1930 SYSCTL_ADD_U16(&hpts->hpts_ctx,
1931 SYSCTL_CHILDREN(hpts->hpts_root),
1932 OID_AUTO, "active", CTLFLAG_RD,
1933 &hpts->p_hpts_active, 0,
1934 "Is the hpts active");
1935 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1936 SYSCTL_CHILDREN(hpts->hpts_root),
1937 OID_AUTO, "curslot", CTLFLAG_RD,
1938 &hpts->p_cur_slot, 0,
1939 "What the current running pacers goal");
1940 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1941 SYSCTL_CHILDREN(hpts->hpts_root),
1942 OID_AUTO, "runtick", CTLFLAG_RD,
1943 &hpts->p_runningslot, 0,
1944 "What the running pacers current slot is");
1945 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1946 SYSCTL_CHILDREN(hpts->hpts_root),
1947 OID_AUTO, "curtick", CTLFLAG_RD,
1948 &hpts->p_curtick, 0,
1949 "What the running pacers last tick mapped to the wheel was");
1950 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1951 SYSCTL_CHILDREN(hpts->hpts_root),
1952 OID_AUTO, "lastran", CTLFLAG_RD,
1953 &cts_last_ran[i], 0,
1954 "The last usec tick that this hpts ran");
1955 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
1956 SYSCTL_CHILDREN(hpts->hpts_root),
1957 OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
1958 &hpts->p_mysleep.tv_usec,
1959 "What the running pacers is using for p_mysleep.tv_usec");
1960 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1961 SYSCTL_CHILDREN(hpts->hpts_root),
1962 OID_AUTO, "now_sleeping", CTLFLAG_RD,
1964 "What the running pacers is actually sleeping for");
1965 SYSCTL_ADD_U64(&hpts->hpts_ctx,
1966 SYSCTL_CHILDREN(hpts->hpts_root),
1967 OID_AUTO, "syscall_cnt", CTLFLAG_RD,
1968 &hpts->syscall_cnt, 0,
1969 "How many times we had syscalls on this hpts");
1971 hpts->p_hpts_sleep_time = hpts_sleep_max;
1973 hpts->p_curtick = tcp_gethptstick(&tv);
1974 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
1975 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1976 hpts->p_cpu = 0xffff;
1977 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
1978 callout_init(&hpts->co, 1);
1980 /* Don't try to bind to NUMA domains if we don't have any */
1981 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1982 tcp_bind_threads = 0;
1985 * Now lets start ithreads to handle the hptss.
1987 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1988 hpts = tcp_pace.rp_ent[i];
1991 error = swi_add(&hpts->ie, "hpts",
1992 tcp_hpts_thread, (void *)hpts,
1993 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1995 ("Can't add hpts:%p i:%d err:%d",
1998 hpts->p_mysleep.tv_sec = 0;
1999 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
2000 if (tcp_bind_threads == 1) {
2001 if (intr_event_bind(hpts->ie, i) == 0)
2003 } else if (tcp_bind_threads == 2) {
2004 /* Find the group for this CPU (i) and bind into it */
2005 for (j = 0; j < tcp_pace.grp_cnt; j++) {
2006 if (CPU_ISSET(i, &tcp_pace.grps[j]->cg_mask)) {
2007 if (intr_event_bind_ithread_cpuset(hpts->ie,
2008 &tcp_pace.grps[j]->cg_mask) == 0) {
2011 domain = pc->pc_domain;
2012 count = hpts_domains[domain].count;
2013 hpts_domains[domain].cpu[count] = i;
2014 hpts_domains[domain].count++;
2021 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2022 hpts->sleeping = tv.tv_usec;
2024 callout_reset_sbt_on(&hpts->co, sb, 0,
2025 hpts_timeout_swi, hpts, hpts->p_cpu,
2026 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2029 * If we somehow have an empty domain, fall back to choosing
2030 * among all htps threads.
2032 for (i = 0; i < vm_ndomains; i++) {
2033 if (hpts_domains[i].count == 0) {
2034 tcp_bind_threads = 0;
2038 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2040 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2042 printf("HPTS is in INVARIANT mode!!\n");
2046 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
2047 MODULE_VERSION(tcphpts, 1);