2 * Copyright (c) 2016-2018 Netflix, Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 #include "opt_inet6.h"
31 #include "opt_tcpdebug.h"
33 * Some notes about usage.
35 * The tcp_hpts system is designed to provide a high precision timer
36 * system for tcp. Its main purpose is to provide a mechanism for
37 * pacing packets out onto the wire. It can be used in two ways
38 * by a given TCP stack (and those two methods can be used simultaneously).
40 * First, and probably the main thing its used by Rack and BBR for, it can
41 * be used to call tcp_output() of a transport stack at some time in the future.
42 * The normal way this is done is that tcp_output() of the stack schedules
43 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
44 * slot is the time from now that the stack wants to be called but it
45 * must be converted to tcp_hpts's notion of slot. This is done with
46 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
47 * call from the tcp_output() routine might look like:
49 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
51 * The above would schedule tcp_ouput() to be called in 550 useconds.
52 * Note that if using this mechanism the stack will want to add near
53 * its top a check to prevent unwanted calls (from user land or the
54 * arrival of incoming ack's). So it would add something like:
56 * if (inp->inp_in_hpts)
59 * to prevent output processing until the time alotted has gone by.
60 * Of course this is a bare bones example and the stack will probably
61 * have more consideration then just the above.
63 * Now the tcp_hpts system will call tcp_output in one of two forms,
64 * it will first check to see if the stack as defined a
65 * tfb_tcp_output_wtime() function, if so that is the routine it
66 * will call, if that function is not defined then it will call the
67 * tfb_tcp_output() function. The only difference between these
68 * two calls is that the former passes the time in to the function
69 * so the function does not have to access the time (which tcp_hpts
70 * already has). What these functions do is of course totally up
71 * to the individual tcp stack.
73 * Now the second function (actually two functions I guess :D)
74 * the tcp_hpts system provides is the ability to either abort
75 * a connection (later) or process input on a connection.
76 * Why would you want to do this? To keep processor locality.
78 * So in order to use the input redirection function the
79 * stack changes its tcp_do_segment() routine to instead
80 * of process the data call the function:
82 * tcp_queue_pkt_to_input()
84 * You will note that the arguments to this function look
85 * a lot like tcp_do_segments's arguments. This function
86 * will assure that the tcp_hpts system will
87 * call the functions tfb_tcp_hpts_do_segment() from the
88 * correct CPU. Note that multiple calls can get pushed
89 * into the tcp_hpts system this will be indicated by
90 * the next to last argument to tfb_tcp_hpts_do_segment()
91 * (nxt_pkt). If nxt_pkt is a 1 then another packet is
92 * coming. If nxt_pkt is a 0 then this is the last call
93 * that the tcp_hpts system has available for the tcp stack.
95 * The other point of the input system is to be able to safely
96 * drop a tcp connection without worrying about the recursive
97 * locking that may be occuring on the INP_WLOCK. So if
98 * a stack wants to drop a connection it calls:
100 * tcp_set_inp_to_drop(tp, ETIMEDOUT)
102 * To schedule the tcp_hpts system to call
104 * tcp_drop(tp, drop_reason)
106 * at a future point. This is quite handy to prevent locking
107 * issues when dropping connections.
111 #include <sys/param.h>
113 #include <sys/interrupt.h>
114 #include <sys/module.h>
115 #include <sys/kernel.h>
116 #include <sys/hhook.h>
117 #include <sys/malloc.h>
118 #include <sys/mbuf.h>
119 #include <sys/proc.h> /* for proc0 declaration */
120 #include <sys/socket.h>
121 #include <sys/socketvar.h>
122 #include <sys/sysctl.h>
123 #include <sys/systm.h>
124 #include <sys/refcount.h>
125 #include <sys/sched.h>
126 #include <sys/queue.h>
128 #include <sys/counter.h>
129 #include <sys/time.h>
130 #include <sys/kthread.h>
131 #include <sys/kern_prefetch.h>
136 #include <net/route.h>
137 #include <net/vnet.h>
139 #define TCPSTATES /* for logging */
141 #include <netinet/in.h>
142 #include <netinet/in_kdtrace.h>
143 #include <netinet/in_pcb.h>
144 #include <netinet/ip.h>
145 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
146 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
147 #include <netinet/ip_var.h>
148 #include <netinet/ip6.h>
149 #include <netinet6/in6_pcb.h>
150 #include <netinet6/ip6_var.h>
151 #include <netinet/tcp.h>
152 #include <netinet/tcp_fsm.h>
153 #include <netinet/tcp_seq.h>
154 #include <netinet/tcp_timer.h>
155 #include <netinet/tcp_var.h>
156 #include <netinet/tcpip.h>
157 #include <netinet/cc/cc.h>
158 #include <netinet/tcp_hpts.h>
161 #include <netinet/tcp_debug.h>
162 #endif /* tcpdebug */
164 #include <netinet/tcp_offload.h>
169 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
171 #include <net/netisr.h>
172 #include <net/rss_config.h>
173 static int tcp_bind_threads = 1;
175 static int tcp_bind_threads = 2;
177 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
179 static uint32_t tcp_hpts_logging_size = DEFAULT_HPTS_LOG;
181 TUNABLE_INT("net.inet.tcp.hpts_logging_sz", &tcp_hpts_logging_size);
183 static struct tcp_hptsi tcp_pace;
185 static void tcp_wakehpts(struct tcp_hpts_entry *p);
186 static void tcp_wakeinput(struct tcp_hpts_entry *p);
187 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
188 static void tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick);
189 static void tcp_hpts_thread(void *ctx);
190 static void tcp_init_hptsi(void *st);
192 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
193 static int32_t tcp_hpts_callout_skip_swi = 0;
195 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW, 0, "TCP Hpts controls");
197 #define timersub(tvp, uvp, vvp) \
199 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
200 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
201 if ((vvp)->tv_usec < 0) { \
203 (vvp)->tv_usec += 1000000; \
207 static int32_t logging_on = 0;
208 static int32_t hpts_sleep_max = (NUM_OF_HPTSI_SLOTS - 2);
209 static int32_t tcp_hpts_precision = 120;
211 struct hpts_domain_info {
216 struct hpts_domain_info hpts_domains[MAXMEMDOM];
218 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
219 &tcp_hpts_precision, 120,
220 "Value for PRE() precision of callout");
222 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
224 "Turn on logging if compiled in");
226 counter_u64_t hpts_loops;
228 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD,
229 &hpts_loops, "Number of times hpts had to loop to catch up");
231 counter_u64_t back_tosleep;
233 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
234 &back_tosleep, "Number of times hpts found no tcbs");
236 static int32_t in_newts_every_tcb = 0;
238 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tsperpcb, CTLFLAG_RW,
239 &in_newts_every_tcb, 0,
240 "Do we have a new cts every tcb we process for input");
241 static int32_t in_ts_percision = 0;
243 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tspercision, CTLFLAG_RW,
245 "Do we use percise timestamp for clients on input");
246 static int32_t out_newts_every_tcb = 0;
248 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tsperpcb, CTLFLAG_RW,
249 &out_newts_every_tcb, 0,
250 "Do we have a new cts every tcb we process for output");
251 static int32_t out_ts_percision = 0;
253 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW,
254 &out_ts_percision, 0,
255 "Do we use a percise timestamp for every output cts");
257 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLFLAG_RW,
259 "The maximum time the hpts will sleep <1 - 254>");
261 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW,
262 &tcp_min_hptsi_time, 0,
263 "The minimum time the hpts must sleep before processing more slots");
265 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW,
266 &tcp_hpts_callout_skip_swi, 0,
267 "Do we have the callout call directly to the hpts?");
270 __tcp_hpts_log_it(struct tcp_hpts_entry *hpts, struct inpcb *inp, int event, uint32_t slot,
271 uint32_t ticknow, int32_t line)
275 HPTS_MTX_ASSERT(hpts);
276 if (hpts->p_log == NULL)
278 pl = &hpts->p_log[hpts->p_log_at];
280 if (hpts->p_log_at >= hpts->p_logsize) {
282 hpts->p_log_wrapped = 1;
286 pl->t_paceslot = inp->inp_hptsslot;
287 pl->t_hptsreq = inp->inp_hpts_request;
288 pl->p_onhpts = inp->inp_in_hpts;
289 pl->p_oninput = inp->inp_in_input;
299 pl->cts = tcp_get_usecs(NULL);
300 pl->p_curtick = hpts->p_curtick;
301 pl->p_prevtick = hpts->p_prevtick;
302 pl->p_on_queue_cnt = hpts->p_on_queue_cnt;
303 pl->ticknow = ticknow;
305 pl->p_nxt_slot = hpts->p_nxt_slot;
306 pl->p_cur_slot = hpts->p_cur_slot;
307 pl->p_hpts_sleep_time = hpts->p_hpts_sleep_time;
308 pl->p_flags = (hpts->p_cpu & 0x7f);
310 pl->p_flags |= (hpts->p_num & 0x7f);
312 if (hpts->p_hpts_active) {
313 pl->p_flags |= HPTS_HPTS_ACTIVE;
317 #define tcp_hpts_log_it(a, b, c, d, e) __tcp_hpts_log_it(a, b, c, d, e, __LINE__)
320 hpts_timeout_swi(void *arg)
322 struct tcp_hpts_entry *hpts;
324 hpts = (struct tcp_hpts_entry *)arg;
325 swi_sched(hpts->ie_cookie, 0);
329 hpts_timeout_dir(void *arg)
331 tcp_hpts_thread(arg);
335 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
338 if (mtx_owned(&hpts->p_mtx) == 0) {
339 /* We don't own the mutex? */
340 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
342 if (hpts->p_cpu != inp->inp_hpts_cpu) {
343 /* It is not the right cpu/mutex? */
344 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
346 if (inp->inp_in_hpts == 0) {
347 /* We are not on the hpts? */
348 panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp);
350 if (TAILQ_EMPTY(head) &&
351 (hpts->p_on_queue_cnt != 0)) {
352 /* We should not be empty with a queue count */
353 panic("%s hpts:%p hpts bucket empty but cnt:%d",
354 __FUNCTION__, hpts, hpts->p_on_queue_cnt);
357 TAILQ_REMOVE(head, inp, inp_hpts);
358 hpts->p_on_queue_cnt--;
359 if (hpts->p_on_queue_cnt < 0) {
360 /* Count should not go negative .. */
362 panic("Hpts goes negative inp:%p hpts:%p",
365 hpts->p_on_queue_cnt = 0;
368 inp->inp_hpts_request = 0;
369 inp->inp_in_hpts = 0;
374 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
377 if (mtx_owned(&hpts->p_mtx) == 0) {
378 /* We don't own the mutex? */
379 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
381 if (hpts->p_cpu != inp->inp_hpts_cpu) {
382 /* It is not the right cpu/mutex? */
383 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
385 if ((noref == 0) && (inp->inp_in_hpts == 1)) {
386 /* We are already on the hpts? */
387 panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp);
390 TAILQ_INSERT_TAIL(head, inp, inp_hpts);
391 inp->inp_in_hpts = 1;
392 hpts->p_on_queue_cnt++;
399 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
402 if (mtx_owned(&hpts->p_mtx) == 0) {
403 /* We don't own the mutex? */
404 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
406 if (hpts->p_cpu != inp->inp_input_cpu) {
407 /* It is not the right cpu/mutex? */
408 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
410 if (inp->inp_in_input == 0) {
411 /* We are not on the input hpts? */
412 panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp);
415 TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
416 hpts->p_on_inqueue_cnt--;
417 if (hpts->p_on_inqueue_cnt < 0) {
419 panic("Hpts in goes negative inp:%p hpts:%p",
422 hpts->p_on_inqueue_cnt = 0;
425 if (TAILQ_EMPTY(&hpts->p_input) &&
426 (hpts->p_on_inqueue_cnt != 0)) {
427 /* We should not be empty with a queue count */
428 panic("%s hpts:%p in_hpts input empty but cnt:%d",
429 __FUNCTION__, hpts, hpts->p_on_inqueue_cnt);
433 inp->inp_in_input = 0;
437 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
440 if (mtx_owned(&hpts->p_mtx) == 0) {
441 /* We don't own the mutex? */
442 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
444 if (hpts->p_cpu != inp->inp_input_cpu) {
445 /* It is not the right cpu/mutex? */
446 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
448 if (inp->inp_in_input == 1) {
449 /* We are already on the input hpts? */
450 panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp);
453 TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
454 inp->inp_in_input = 1;
455 hpts->p_on_inqueue_cnt++;
460 sysctl_tcp_hpts_log(SYSCTL_HANDLER_ARGS)
462 struct tcp_hpts_entry *hpts;
464 int32_t logging_was, i;
468 * HACK: Turn off logging so no locks are required this really needs
469 * a memory barrier :)
471 logging_was = logging_on;
476 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
477 hpts = tcp_pace.rp_ent[i];
478 if (hpts->p_log == NULL)
480 sz += (sizeof(struct hpts_log) * hpts->p_logsize);
482 error = SYSCTL_OUT(req, 0, sz);
484 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
485 hpts = tcp_pace.rp_ent[i];
486 if (hpts->p_log == NULL)
488 if (hpts->p_log_wrapped)
489 sz = (sizeof(struct hpts_log) * hpts->p_logsize);
491 sz = (sizeof(struct hpts_log) * hpts->p_log_at);
492 error = SYSCTL_OUT(req, hpts->p_log, sz);
495 logging_on = logging_was;
499 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, log, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
500 0, 0, sysctl_tcp_hpts_log, "A", "tcp hptsi log");
504 tcp_wakehpts(struct tcp_hpts_entry *hpts)
506 HPTS_MTX_ASSERT(hpts);
507 swi_sched(hpts->ie_cookie, 0);
508 if (hpts->p_hpts_active == 2) {
509 /* Rare sleeping on a ENOBUF */
515 tcp_wakeinput(struct tcp_hpts_entry *hpts)
517 HPTS_MTX_ASSERT(hpts);
518 swi_sched(hpts->ie_cookie, 0);
519 if (hpts->p_hpts_active == 2) {
520 /* Rare sleeping on a ENOBUF */
525 struct tcp_hpts_entry *
526 tcp_cur_hpts(struct inpcb *inp)
529 struct tcp_hpts_entry *hpts;
531 hpts_num = inp->inp_hpts_cpu;
532 hpts = tcp_pace.rp_ent[hpts_num];
536 struct tcp_hpts_entry *
537 tcp_hpts_lock(struct inpcb *inp)
539 struct tcp_hpts_entry *hpts;
543 hpts_num = inp->inp_hpts_cpu;
544 hpts = tcp_pace.rp_ent[hpts_num];
546 if (mtx_owned(&hpts->p_mtx)) {
547 panic("Hpts:%p owns mtx prior-to lock line:%d",
551 mtx_lock(&hpts->p_mtx);
552 if (hpts_num != inp->inp_hpts_cpu) {
553 mtx_unlock(&hpts->p_mtx);
559 struct tcp_hpts_entry *
560 tcp_input_lock(struct inpcb *inp)
562 struct tcp_hpts_entry *hpts;
566 hpts_num = inp->inp_input_cpu;
567 hpts = tcp_pace.rp_ent[hpts_num];
569 if (mtx_owned(&hpts->p_mtx)) {
570 panic("Hpts:%p owns mtx prior-to lock line:%d",
574 mtx_lock(&hpts->p_mtx);
575 if (hpts_num != inp->inp_input_cpu) {
576 mtx_unlock(&hpts->p_mtx);
583 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
587 if (inp->inp_flags2 & INP_FREED) {
589 * Need to play a special trick so that in_pcbrele_wlocked
590 * does not return 1 when it really should have returned 0.
593 inp->inp_flags2 &= ~INP_FREED;
597 #ifndef INP_REF_DEBUG
598 if (in_pcbrele_wlocked(inp)) {
600 * This should not happen. We have the inpcb referred to by
601 * the main socket (why we are called) and the hpts. It
602 * should always return 0.
604 panic("inpcb:%p release ret 1",
608 if (__in_pcbrele_wlocked(inp, line)) {
610 * This should not happen. We have the inpcb referred to by
611 * the main socket (why we are called) and the hpts. It
612 * should always return 0.
614 panic("inpcb:%p release ret 1",
619 inp->inp_flags2 |= INP_FREED;
624 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
626 if (inp->inp_in_hpts) {
627 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
628 tcp_remove_hpts_ref(inp, hpts, line);
633 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
635 HPTS_MTX_ASSERT(hpts);
636 if (inp->inp_in_input) {
637 hpts_sane_input_remove(hpts, inp, 1);
638 tcp_remove_hpts_ref(inp, hpts, line);
643 * Called normally with the INP_LOCKED but it
644 * does not matter, the hpts lock is the key
645 * but the lock order allows us to hold the
646 * INP lock and then get the hpts lock.
648 * Valid values in the flags are
649 * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
650 * HPTS_REMOVE_INPUT - remove from the input of the hpts.
651 * Note that you can or both values together and get two
655 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
657 struct tcp_hpts_entry *hpts;
659 INP_WLOCK_ASSERT(inp);
660 if (flags & HPTS_REMOVE_OUTPUT) {
661 hpts = tcp_hpts_lock(inp);
662 tcp_hpts_remove_locked_output(hpts, inp, flags, line);
663 mtx_unlock(&hpts->p_mtx);
665 if (flags & HPTS_REMOVE_INPUT) {
666 hpts = tcp_input_lock(inp);
667 tcp_hpts_remove_locked_input(hpts, inp, flags, line);
668 mtx_unlock(&hpts->p_mtx);
673 hpts_tick(struct tcp_hpts_entry *hpts, int32_t plus)
675 return ((hpts->p_prevtick + plus) % NUM_OF_HPTSI_SLOTS);
679 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
681 int32_t need_wake = 0;
682 uint32_t ticknow = 0;
684 HPTS_MTX_ASSERT(hpts);
685 if (inp->inp_in_hpts == 0) {
686 /* Ok we need to set it on the hpts in the current slot */
687 if (hpts->p_hpts_active == 0) {
688 /* A sleeping hpts we want in next slot to run */
690 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, 0,
693 inp->inp_hptsslot = hpts_tick(hpts, 1);
694 inp->inp_hpts_request = 0;
696 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEP_BEFORE, 1, ticknow);
699 } else if ((void *)inp == hpts->p_inp) {
701 * We can't allow you to go into the same slot we
702 * are in. We must put you out.
704 inp->inp_hptsslot = hpts->p_nxt_slot;
706 inp->inp_hptsslot = hpts->p_cur_slot;
707 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
708 inp->inp_hpts_request = 0;
710 tcp_hpts_log_it(hpts, inp, HPTSLOG_IMMEDIATE, 0, 0);
714 * Activate the hpts if it is sleeping and its
718 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_HPTS, 0, ticknow);
720 hpts->p_direct_wake = 1;
728 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
731 struct tcp_hpts_entry *hpts;
733 INP_WLOCK_ASSERT(inp);
734 hpts = tcp_hpts_lock(inp);
735 ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
736 mtx_unlock(&hpts->p_mtx);
741 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, uint32_t cts, int32_t line,
742 struct hpts_diag *diag, int32_t noref)
744 int32_t need_new_to = 0;
745 int32_t need_wakeup = 0;
746 uint32_t largest_slot;
747 uint32_t ticknow = 0;
750 HPTS_MTX_ASSERT(hpts);
752 memset(diag, 0, sizeof(struct hpts_diag));
753 diag->p_hpts_active = hpts->p_hpts_active;
754 diag->p_nxt_slot = hpts->p_nxt_slot;
755 diag->p_cur_slot = hpts->p_cur_slot;
756 diag->slot_req = slot;
758 if ((inp->inp_in_hpts == 0) || noref) {
759 inp->inp_hpts_request = slot;
762 tcp_queue_to_hpts_immediate_locked(inp, hpts, line, noref);
765 if (hpts->p_hpts_active) {
767 * Its slot - 1 since nxt_slot is the next tick that
768 * will go off since the hpts is awake
771 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_NORMAL, slot, 0);
774 * We want to make sure that we don't place a inp in
775 * the range of p_cur_slot <-> p_nxt_slot. If we
776 * take from p_nxt_slot to the end, plus p_cur_slot
777 * and then take away 2, we will know how many is
778 * the max slots we can use.
780 if (hpts->p_nxt_slot > hpts->p_cur_slot) {
782 * Non-wrap case nxt_slot <-> cur_slot we
783 * don't want to land in. So the diff gives
784 * us what is taken away from the number of
787 largest_slot = NUM_OF_HPTSI_SLOTS - (hpts->p_nxt_slot - hpts->p_cur_slot);
788 } else if (hpts->p_nxt_slot == hpts->p_cur_slot) {
789 largest_slot = NUM_OF_HPTSI_SLOTS - 2;
792 * Wrap case so the diff gives us the number
793 * of slots that we can land in.
795 largest_slot = hpts->p_cur_slot - hpts->p_nxt_slot;
798 * We take away two so we never have a problem (20
799 * usec's) out of 1024000 usecs
802 if (inp->inp_hpts_request > largest_slot) {
804 * Restrict max jump of slots and remember
808 inp->inp_hpts_request -= largest_slot;
810 /* This one will run when we hit it */
811 inp->inp_hpts_request = 0;
813 if (hpts->p_nxt_slot == hpts->p_cur_slot)
814 slot_calc = (hpts->p_nxt_slot + slot) % NUM_OF_HPTSI_SLOTS;
816 slot_calc = (hpts->p_nxt_slot + slot - 1) % NUM_OF_HPTSI_SLOTS;
817 if (slot_calc == hpts->p_cur_slot) {
820 panic("Hpts:%p impossible slot calculation slot_calc:%u slot:%u largest:%u\n",
821 hpts, slot_calc, slot, largest_slot);
826 slot_calc = NUM_OF_HPTSI_SLOTS - 1;
828 inp->inp_hptsslot = slot_calc;
830 diag->inp_hptsslot = inp->inp_hptsslot;
834 * The hpts is sleeping, we need to figure out where
835 * it will wake up at and if we need to reschedule
838 uint32_t have_slept, yet_to_sleep;
842 ticknow = tcp_gethptstick(&tv);
843 slot_now = ticknow % NUM_OF_HPTSI_SLOTS;
845 * The user wants to be inserted at (slot_now +
846 * slot) % NUM_OF_HPTSI_SLOTS, so lets set that up.
848 largest_slot = NUM_OF_HPTSI_SLOTS - 2;
849 if (inp->inp_hpts_request > largest_slot) {
850 /* Adjust the residual in inp_hpts_request */
852 inp->inp_hpts_request -= largest_slot;
854 /* No residual it all fits */
855 inp->inp_hpts_request = 0;
857 inp->inp_hptsslot = (slot_now + slot) % NUM_OF_HPTSI_SLOTS;
859 diag->slot_now = slot_now;
860 diag->inp_hptsslot = inp->inp_hptsslot;
861 diag->p_on_min_sleep = hpts->p_on_min_sleep;
864 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, slot, ticknow);
866 /* Now do we need to restart the hpts's timer? */
867 if (TSTMP_GT(ticknow, hpts->p_curtick))
868 have_slept = ticknow - hpts->p_curtick;
871 if (have_slept < hpts->p_hpts_sleep_time) {
872 /* This should be what happens */
873 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
875 /* We are over-due */
880 diag->have_slept = have_slept;
881 diag->yet_to_sleep = yet_to_sleep;
882 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
884 if ((hpts->p_on_min_sleep == 0) && (yet_to_sleep > slot)) {
886 * We need to reschedule the hptss time-out.
888 hpts->p_hpts_sleep_time = slot;
889 need_new_to = slot * HPTS_TICKS_PER_USEC;
892 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
894 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERTED, slot, ticknow);
897 * Now how far is the hpts sleeping to? if active is 1, its
898 * up and ticking we do nothing, otherwise we may need to
899 * reschedule its callout if need_new_to is set from above.
903 tcp_hpts_log_it(hpts, inp, HPTSLOG_RESCHEDULE, 1, 0);
905 hpts->p_direct_wake = 1;
908 diag->need_new_to = 0;
909 diag->co_ret = 0xffff0000;
911 } else if (need_new_to) {
918 while (need_new_to > HPTS_USEC_IN_SEC) {
920 need_new_to -= HPTS_USEC_IN_SEC;
922 tv.tv_usec = need_new_to;
924 if (tcp_hpts_callout_skip_swi == 0) {
925 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
926 hpts_timeout_swi, hpts, hpts->p_cpu,
927 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
929 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
930 hpts_timeout_dir, hpts,
932 C_PREL(tcp_hpts_precision));
935 diag->need_new_to = need_new_to;
936 diag->co_ret = co_ret;
941 panic("Hpts:%p tp:%p already on hpts and add?", hpts, inp);
947 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag){
948 struct tcp_hpts_entry *hpts;
949 uint32_t slot_on, cts;
953 * We now return the next-slot the hpts will be on, beyond its
954 * current run (if up) or where it was when it stopped if it is
957 INP_WLOCK_ASSERT(inp);
958 hpts = tcp_hpts_lock(inp);
963 cts = tcp_tv_to_usectick(&tv);
964 tcp_hpts_insert_locked(hpts, inp, slot, cts, line, diag, 0);
965 slot_on = hpts->p_nxt_slot;
966 mtx_unlock(&hpts->p_mtx);
971 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
972 return (tcp_hpts_insert_diag(inp, slot, line, NULL));
976 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
980 HPTS_MTX_ASSERT(hpts);
981 if (inp->inp_in_input == 0) {
982 /* Ok we need to set it on the hpts in the current slot */
983 hpts_sane_input_insert(hpts, inp, line);
985 if (hpts->p_hpts_active == 0) {
987 * Activate the hpts if it is sleeping.
990 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_INPUT, 0, 0);
993 hpts->p_direct_wake = 1;
996 } else if (hpts->p_hpts_active == 0) {
998 hpts->p_direct_wake = 1;
1005 tcp_queue_pkt_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1006 int32_t tlen, int32_t drop_hdrlen, uint8_t iptos)
1008 /* Setup packet for input first */
1009 INP_WLOCK_ASSERT(tp->t_inpcb);
1010 m->m_pkthdr.pace_thoff = (uint16_t) ((caddr_t)th - mtod(m, caddr_t));
1011 m->m_pkthdr.pace_tlen = (uint16_t) tlen;
1012 m->m_pkthdr.pace_drphdrlen = drop_hdrlen;
1013 m->m_pkthdr.pace_tos = iptos;
1014 m->m_pkthdr.pace_lock = (curthread->td_epochnest != 0);
1015 if (tp->t_in_pkt == NULL) {
1019 tp->t_tail_pkt->m_nextpkt = m;
1026 __tcp_queue_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1027 int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, int32_t line){
1028 struct tcp_hpts_entry *hpts;
1031 tcp_queue_pkt_to_input(tp, m, th, tlen, drop_hdrlen, iptos);
1032 hpts = tcp_input_lock(tp->t_inpcb);
1033 ret = __tcp_queue_to_input_locked(tp->t_inpcb, hpts, line);
1034 mtx_unlock(&hpts->p_mtx);
1039 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1041 struct tcp_hpts_entry *hpts;
1044 tp = intotcpcb(inp);
1045 hpts = tcp_input_lock(tp->t_inpcb);
1046 if (inp->inp_in_input == 0) {
1047 /* Ok we need to set it on the hpts in the current slot */
1048 hpts_sane_input_insert(hpts, inp, line);
1049 if (hpts->p_hpts_active == 0) {
1051 * Activate the hpts if it is sleeping.
1053 hpts->p_direct_wake = 1;
1054 tcp_wakeinput(hpts);
1056 } else if (hpts->p_hpts_active == 0) {
1057 hpts->p_direct_wake = 1;
1058 tcp_wakeinput(hpts);
1060 inp->inp_hpts_drop_reas = reason;
1061 mtx_unlock(&hpts->p_mtx);
1065 hpts_random_cpu(struct inpcb *inp){
1067 * No flow type set distribute the load randomly.
1073 * If one has been set use it i.e. we want both in and out on the
1076 if (inp->inp_input_cpu_set) {
1077 return (inp->inp_input_cpu);
1078 } else if (inp->inp_hpts_cpu_set) {
1079 return (inp->inp_hpts_cpu);
1081 /* Nothing set use a random number */
1083 cpuid = (ran & 0xffff) % mp_ncpus;
1088 hpts_cpuid(struct inpcb *inp){
1091 struct hpts_domain_info *di;
1095 * If one has been set use it i.e. we want both in and out on the
1098 if (inp->inp_input_cpu_set) {
1099 return (inp->inp_input_cpu);
1100 } else if (inp->inp_hpts_cpu_set) {
1101 return (inp->inp_hpts_cpu);
1103 /* If one is set the other must be the same */
1105 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1106 if (cpuid == NETISR_CPUID_NONE)
1107 return (hpts_random_cpu(inp));
1112 * We don't have a flowid -> cpuid mapping, so cheat and just map
1113 * unknown cpuids to curcpu. Not the best, but apparently better
1114 * than defaulting to swi 0.
1117 if (inp->inp_flowtype == M_HASHTYPE_NONE)
1118 return (hpts_random_cpu(inp));
1120 * Hash to a thread based on the flowid. If we are using numa,
1121 * then restrict the hash to the numa domain where the inp lives.
1124 if (tcp_bind_threads == 2 && inp->inp_numa_domain != M_NODOM) {
1125 di = &hpts_domains[inp->inp_numa_domain];
1126 cpuid = di->cpu[inp->inp_flowid % di->count];
1129 cpuid = inp->inp_flowid % mp_ncpus;
1136 * Do NOT try to optimize the processing of inp's
1137 * by first pulling off all the inp's into a temporary
1138 * list (e.g. TAILQ_CONCAT). If you do that the subtle
1139 * interactions of switching CPU's will kill because of
1140 * problems in the linked list manipulation. Basically
1141 * you would switch cpu's with the hpts mutex locked
1142 * but then while you were processing one of the inp's
1143 * some other one that you switch will get a new
1144 * packet on the different CPU. It will insert it
1145 * on the new hptss input list. Creating a temporary
1146 * link in the inp will not fix it either, since
1147 * the other hpts will be doing the same thing and
1148 * you will both end up using the temporary link.
1150 * You will die in an ASSERT for tailq corruption if you
1151 * run INVARIANTS or you will die horribly without
1152 * INVARIANTS in some unknown way with a corrupt linked
1156 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1161 uint16_t drop_reason;
1163 uint32_t did_prefetch = 0;
1164 int32_t ti_locked = TI_UNLOCKED;
1165 struct epoch_tracker et;
1167 HPTS_MTX_ASSERT(hpts);
1168 while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1169 HPTS_MTX_ASSERT(hpts);
1170 hpts_sane_input_remove(hpts, inp, 0);
1171 if (inp->inp_input_cpu_set == 0) {
1177 drop_reason = inp->inp_hpts_drop_reas;
1178 inp->inp_in_input = 0;
1179 mtx_unlock(&hpts->p_mtx);
1180 CURVNET_SET(inp->inp_vnet);
1182 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1183 ti_locked = TI_RLOCKED;
1185 ti_locked = TI_UNLOCKED;
1188 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1189 (inp->inp_flags2 & INP_FREED)) {
1192 if (ti_locked == TI_RLOCKED) {
1193 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1195 if (in_pcbrele_wlocked(inp) == 0) {
1198 ti_locked = TI_UNLOCKED;
1200 mtx_lock(&hpts->p_mtx);
1203 tp = intotcpcb(inp);
1204 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1208 /* This tcb is being destroyed for drop_reason */
1214 tp->t_in_pkt = NULL;
1221 tp = tcp_drop(tp, drop_reason);
1222 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1226 if (in_pcbrele_wlocked(inp) == 0)
1229 mtx_lock(&hpts->p_mtx);
1234 * Setup so the next time we will move to the right
1235 * CPU. This should be a rare event. It will
1236 * sometimes happens when we are the client side
1237 * (usually not the server). Somehow tcp_output()
1238 * gets called before the tcp_do_segment() sets the
1239 * intial state. This means the r_cpu and r_hpts_cpu
1240 * is 0. We get on the hpts, and then tcp_input()
1241 * gets called setting up the r_cpu to the correct
1242 * value. The hpts goes off and sees the mis-match.
1243 * We simply correct it here and the CPU will switch
1244 * to the new hpts nextime the tcb gets added to the
1245 * the hpts (not this time) :-)
1252 (m->m_pkthdr.pace_lock == TI_RLOCKED ||
1253 tp->t_state != TCPS_ESTABLISHED)) {
1254 ti_locked = TI_RLOCKED;
1255 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1258 if (in_newts_every_tcb) {
1259 if (in_ts_percision)
1264 if (tp->t_fb_ptr != NULL) {
1265 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1268 /* Any input work to do, if so do it first */
1269 if ((m != NULL) && (m == tp->t_in_pkt)) {
1271 int32_t tlen, drop_hdrlen, nxt_pkt;
1275 tp->t_in_pkt = tp->t_tail_pkt = NULL;
1277 th = (struct tcphdr *)(mtod(m, caddr_t)+m->m_pkthdr.pace_thoff);
1278 tlen = m->m_pkthdr.pace_tlen;
1279 drop_hdrlen = m->m_pkthdr.pace_drphdrlen;
1280 iptos = m->m_pkthdr.pace_tos;
1281 m->m_nextpkt = NULL;
1286 inp->inp_input_calls = 1;
1287 if (tp->t_fb->tfb_tcp_hpts_do_segment) {
1288 /* Use the hpts specific do_segment */
1289 (*tp->t_fb->tfb_tcp_hpts_do_segment) (m, th, inp->inp_socket,
1291 tlen, iptos, nxt_pkt, tv);
1293 /* Use the default do_segment */
1294 (*tp->t_fb->tfb_tcp_do_segment) (m, th, inp->inp_socket,
1298 if (ti_locked == TI_RLOCKED)
1299 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1301 * Do segment returns unlocked we need the
1302 * lock again but we also need some kasserts
1305 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo);
1306 INP_UNLOCK_ASSERT(inp);
1311 m->m_pkthdr.pace_lock == TI_RLOCKED) {
1312 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1313 ti_locked = TI_RLOCKED;
1315 ti_locked = TI_UNLOCKED;
1318 * Since we have an opening here we must
1319 * re-check if the tcb went away while we
1320 * were getting the lock(s).
1322 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1323 (inp->inp_flags2 & INP_FREED)) {
1333 * Now that we hold the INP lock, check if
1334 * we need to upgrade our lock.
1336 if (ti_locked == TI_UNLOCKED &&
1337 (tp->t_state != TCPS_ESTABLISHED)) {
1338 ti_locked = TI_RLOCKED;
1339 INP_INFO_RLOCK_ET(&V_tcbinfo, et);
1341 } /** end while(m) */
1342 } /** end if ((m != NULL) && (m == tp->t_in_pkt)) */
1343 if (in_pcbrele_wlocked(inp) == 0)
1345 if (ti_locked == TI_RLOCKED)
1346 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et);
1347 INP_INFO_WUNLOCK_ASSERT(&V_tcbinfo);
1348 INP_UNLOCK_ASSERT(inp);
1349 ti_locked = TI_UNLOCKED;
1350 mtx_lock(&hpts->p_mtx);
1357 tcp_hpts_est_run(struct tcp_hpts_entry *hpts)
1359 int32_t ticks_to_run;
1361 if (hpts->p_prevtick && (SEQ_GT(hpts->p_curtick, hpts->p_prevtick))) {
1362 ticks_to_run = hpts->p_curtick - hpts->p_prevtick;
1363 if (ticks_to_run >= (NUM_OF_HPTSI_SLOTS - 1)) {
1364 ticks_to_run = NUM_OF_HPTSI_SLOTS - 2;
1367 if (hpts->p_prevtick == hpts->p_curtick) {
1368 /* This happens when we get woken up right away */
1373 /* Set in where we will be when we catch up */
1374 hpts->p_nxt_slot = (hpts->p_cur_slot + ticks_to_run) % NUM_OF_HPTSI_SLOTS;
1375 if (hpts->p_nxt_slot == hpts->p_cur_slot) {
1376 panic("Impossible math -- hpts:%p p_nxt_slot:%d p_cur_slot:%d ticks_to_run:%d",
1377 hpts, hpts->p_nxt_slot, hpts->p_cur_slot, ticks_to_run);
1379 return (ticks_to_run);
1383 tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick)
1386 struct inpcb *inp = NULL, *ninp;
1388 int32_t ticks_to_run, i, error, tick_now, interum_tick;
1389 int32_t paced_cnt = 0;
1390 int32_t did_prefetch = 0;
1391 int32_t prefetch_ninp = 0;
1392 int32_t prefetch_tp = 0;
1396 HPTS_MTX_ASSERT(hpts);
1397 hpts->p_curtick = tcp_tv_to_hptstick(ctick);
1398 cts = tcp_tv_to_usectick(ctick);
1399 memcpy(&tv, ctick, sizeof(struct timeval));
1400 hpts->p_cur_slot = hpts_tick(hpts, 1);
1402 /* Figure out if we had missed ticks */
1404 HPTS_MTX_ASSERT(hpts);
1405 ticks_to_run = tcp_hpts_est_run(hpts);
1406 if (!TAILQ_EMPTY(&hpts->p_input)) {
1407 tcp_input_data(hpts, &tv);
1410 if (TAILQ_EMPTY(&hpts->p_input) &&
1411 (hpts->p_on_inqueue_cnt != 0)) {
1412 panic("tp:%p in_hpts input empty but cnt:%d",
1413 hpts, hpts->p_on_inqueue_cnt);
1416 HPTS_MTX_ASSERT(hpts);
1417 /* Reset the ticks to run and time if we need too */
1418 interum_tick = tcp_gethptstick(&tv);
1419 if (interum_tick != hpts->p_curtick) {
1420 /* Save off the new time we execute to */
1422 hpts->p_curtick = interum_tick;
1423 cts = tcp_tv_to_usectick(&tv);
1424 hpts->p_cur_slot = hpts_tick(hpts, 1);
1425 ticks_to_run = tcp_hpts_est_run(hpts);
1427 if (ticks_to_run == -1) {
1431 tcp_hpts_log_it(hpts, inp, HPTSLOG_SETTORUN, ticks_to_run, 0);
1433 if (hpts->p_on_queue_cnt == 0) {
1436 HPTS_MTX_ASSERT(hpts);
1437 for (i = 0; i < ticks_to_run; i++) {
1439 * Calculate our delay, if there are no extra ticks there
1442 hpts->p_delayed_by = (ticks_to_run - (i + 1)) * HPTS_TICKS_PER_USEC;
1443 HPTS_MTX_ASSERT(hpts);
1444 while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1447 tcp_hpts_log_it(hpts, inp, HPTSLOG_HPTSI, ticks_to_run, i);
1451 if (hpts->p_cur_slot != inp->inp_hptsslot) {
1452 panic("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1453 hpts, inp, hpts->p_cur_slot, inp->inp_hptsslot);
1456 if (inp->inp_hpts_cpu_set == 0) {
1461 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_cur_slot], 0);
1462 if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1463 /* We prefetch the next inp if possible */
1464 kern_prefetch(ninp, &prefetch_ninp);
1467 if (inp->inp_hpts_request) {
1469 * This guy is deferred out further in time
1470 * then our wheel had on it. Push him back
1473 int32_t remaining_slots;
1475 remaining_slots = ticks_to_run - (i + 1);
1476 if (inp->inp_hpts_request > remaining_slots) {
1478 * Keep INVARIANTS happy by clearing
1481 tcp_hpts_insert_locked(hpts, inp, inp->inp_hpts_request, cts, __LINE__, NULL, 1);
1485 inp->inp_hpts_request = 0;
1488 * We clear the hpts flag here after dealing with
1489 * remaining slots. This way anyone looking with the
1490 * TCB lock will see its on the hpts until just
1493 inp->inp_in_hpts = 0;
1494 mtx_unlock(&hpts->p_mtx);
1496 if (in_pcbrele_wlocked(inp)) {
1497 mtx_lock(&hpts->p_mtx);
1499 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 1);
1503 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1506 if (mtx_owned(&hpts->p_mtx)) {
1507 panic("Hpts:%p owns mtx prior-to lock line:%d",
1512 mtx_lock(&hpts->p_mtx);
1514 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 3);
1518 tp = intotcpcb(inp);
1519 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1524 * Setup so the next time we will move to
1525 * the right CPU. This should be a rare
1526 * event. It will sometimes happens when we
1527 * are the client side (usually not the
1528 * server). Somehow tcp_output() gets called
1529 * before the tcp_do_segment() sets the
1530 * intial state. This means the r_cpu and
1531 * r_hpts_cpu is 0. We get on the hpts, and
1532 * then tcp_input() gets called setting up
1533 * the r_cpu to the correct value. The hpts
1534 * goes off and sees the mis-match. We
1535 * simply correct it here and the CPU will
1536 * switch to the new hpts nextime the tcb
1537 * gets added to the the hpts (not this one)
1542 if (out_newts_every_tcb) {
1545 if (out_ts_percision)
1548 getmicrouptime(&sv);
1549 cts = tcp_tv_to_usectick(&sv);
1551 CURVNET_SET(inp->inp_vnet);
1553 * There is a hole here, we get the refcnt on the
1554 * inp so it will still be preserved but to make
1555 * sure we can get the INP we need to hold the p_mtx
1556 * above while we pull out the tp/inp, as long as
1557 * fini gets the lock first we are assured of having
1558 * a sane INP we can lock and test.
1561 if (mtx_owned(&hpts->p_mtx)) {
1562 panic("Hpts:%p owns mtx before tcp-output:%d",
1566 if (tp->t_fb_ptr != NULL) {
1567 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1570 inp->inp_hpts_calls = 1;
1571 if (tp->t_fb->tfb_tcp_output_wtime != NULL) {
1572 error = (*tp->t_fb->tfb_tcp_output_wtime) (tp, &tv);
1574 error = tp->t_fb->tfb_tcp_output(tp);
1576 if (ninp && ninp->inp_ppcb) {
1578 * If we have a nxt inp, see if we can
1579 * prefetch its ppcb. Note this may seem
1580 * "risky" since we have no locks (other
1581 * than the previous inp) and there no
1582 * assurance that ninp was not pulled while
1583 * we were processing inp and freed. If this
1584 * occured it could mean that either:
1586 * a) Its NULL (which is fine we won't go
1587 * here) <or> b) Its valid (which is cool we
1588 * will prefetch it) <or> c) The inp got
1589 * freed back to the slab which was
1590 * reallocated. Then the piece of memory was
1591 * re-used and something else (not an
1592 * address) is in inp_ppcb. If that occurs
1593 * we don't crash, but take a TLB shootdown
1594 * performance hit (same as if it was NULL
1595 * and we tried to pre-fetch it).
1597 * Considering that the likelyhood of <c> is
1598 * quite rare we will take a risk on doing
1599 * this. If performance drops after testing
1600 * we can always take this out. NB: the
1601 * kern_prefetch on amd64 actually has
1602 * protection against a bad address now via
1603 * the DMAP_() tests. This will prevent the
1604 * TLB hit, and instead if <c> occurs just
1605 * cause us to load cache with a useless
1608 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1612 INP_UNLOCK_ASSERT(inp);
1615 if (mtx_owned(&hpts->p_mtx)) {
1616 panic("Hpts:%p owns mtx prior-to lock line:%d",
1620 mtx_lock(&hpts->p_mtx);
1622 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 4);
1625 HPTS_MTX_ASSERT(hpts);
1628 if (hpts->p_cur_slot >= NUM_OF_HPTSI_SLOTS) {
1629 hpts->p_cur_slot = 0;
1633 HPTS_MTX_ASSERT(hpts);
1634 hpts->p_prevtick = hpts->p_curtick;
1635 hpts->p_delayed_by = 0;
1637 * Check to see if we took an excess amount of time and need to run
1638 * more ticks (if we did not hit eno-bufs).
1640 /* Re-run any input that may be there */
1641 (void)tcp_gethptstick(&tv);
1642 if (!TAILQ_EMPTY(&hpts->p_input)) {
1643 tcp_input_data(hpts, &tv);
1646 if (TAILQ_EMPTY(&hpts->p_input) &&
1647 (hpts->p_on_inqueue_cnt != 0)) {
1648 panic("tp:%p in_hpts input empty but cnt:%d",
1649 hpts, hpts->p_on_inqueue_cnt);
1652 tick_now = tcp_gethptstick(&tv);
1653 if (SEQ_GT(tick_now, hpts->p_prevtick)) {
1656 /* Did we really spend a full tick or more in here? */
1657 timersub(&tv, ctick, &res);
1658 if (res.tv_sec || (res.tv_usec >= HPTS_TICKS_PER_USEC)) {
1659 counter_u64_add(hpts_loops, 1);
1661 tcp_hpts_log_it(hpts, inp, HPTSLOG_TOLONG, (uint32_t) res.tv_usec, tick_now);
1664 hpts->p_curtick = tick_now;
1670 uint32_t t = 0, i, fnd = 0;
1672 if (hpts->p_on_queue_cnt) {
1676 * Find next slot that is occupied and use that to
1677 * be the sleep time.
1679 for (i = 1, t = hpts->p_nxt_slot; i < NUM_OF_HPTSI_SLOTS; i++) {
1680 if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1684 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1687 hpts->p_hpts_sleep_time = i;
1689 counter_u64_add(back_tosleep, 1);
1691 panic("Hpts:%p cnt:%d but non found", hpts, hpts->p_on_queue_cnt);
1693 hpts->p_on_queue_cnt = 0;
1698 /* No one on the wheel sleep for all but 2 slots */
1700 if (hpts_sleep_max == 0)
1702 hpts->p_hpts_sleep_time = min((NUM_OF_HPTSI_SLOTS - 2), hpts_sleep_max);
1706 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEPSET, t, (hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC));
1712 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1714 struct tcp_hpts_entry *hpts;
1716 INP_WLOCK_ASSERT(inp);
1717 hpts = tcp_hpts_lock(inp);
1718 if ((inp->inp_in_hpts == 0) &&
1719 (inp->inp_hpts_cpu_set == 0)) {
1720 inp->inp_hpts_cpu = hpts_cpuid(inp);
1721 inp->inp_hpts_cpu_set = 1;
1723 mtx_unlock(&hpts->p_mtx);
1724 hpts = tcp_input_lock(inp);
1725 if ((inp->inp_input_cpu_set == 0) &&
1726 (inp->inp_in_input == 0)) {
1727 inp->inp_input_cpu = hpts_cpuid(inp);
1728 inp->inp_input_cpu_set = 1;
1730 mtx_unlock(&hpts->p_mtx);
1734 tcp_hpts_delayedby(struct inpcb *inp){
1735 return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1739 tcp_hpts_thread(void *ctx)
1741 struct tcp_hpts_entry *hpts;
1745 hpts = (struct tcp_hpts_entry *)ctx;
1746 mtx_lock(&hpts->p_mtx);
1747 if (hpts->p_direct_wake) {
1748 /* Signaled by input */
1750 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 1, 1);
1751 callout_stop(&hpts->co);
1754 if (callout_pending(&hpts->co) ||
1755 !callout_active(&hpts->co)) {
1757 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 2, 2);
1758 mtx_unlock(&hpts->p_mtx);
1761 callout_deactivate(&hpts->co);
1763 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 3, 3);
1765 hpts->p_hpts_active = 1;
1766 (void)tcp_gethptstick(&tv);
1767 tcp_hptsi(hpts, &tv);
1768 HPTS_MTX_ASSERT(hpts);
1770 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1771 if (tcp_min_hptsi_time && (tv.tv_usec < tcp_min_hptsi_time)) {
1772 tv.tv_usec = tcp_min_hptsi_time;
1773 hpts->p_on_min_sleep = 1;
1775 /* Clear the min sleep flag */
1776 hpts->p_on_min_sleep = 0;
1778 hpts->p_hpts_active = 0;
1780 if (tcp_hpts_callout_skip_swi == 0) {
1781 callout_reset_sbt_on(&hpts->co, sb, 0,
1782 hpts_timeout_swi, hpts, hpts->p_cpu,
1783 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1785 callout_reset_sbt_on(&hpts->co, sb, 0,
1786 hpts_timeout_dir, hpts,
1788 C_PREL(tcp_hpts_precision));
1790 hpts->p_direct_wake = 0;
1791 mtx_unlock(&hpts->p_mtx);
1797 tcp_init_hptsi(void *st)
1799 int32_t i, j, error, bound = 0, created = 0;
1803 struct tcp_hpts_entry *hpts;
1807 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1810 tcp_pace.rp_proc = NULL;
1811 tcp_pace.rp_num_hptss = ncpus;
1812 hpts_loops = counter_u64_alloc(M_WAITOK);
1813 back_tosleep = counter_u64_alloc(M_WAITOK);
1815 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1816 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1817 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1818 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1819 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1820 M_TCPHPTS, M_WAITOK | M_ZERO);
1821 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1822 M_TCPHPTS, M_WAITOK);
1823 hpts = tcp_pace.rp_ent[i];
1825 * Init all the hpts structures that are not specifically
1826 * zero'd by the allocations. Also lets attach them to the
1827 * appropriate sysctl block as well.
1829 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1830 "hpts", MTX_DEF | MTX_DUPOK);
1831 TAILQ_INIT(&hpts->p_input);
1832 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1833 TAILQ_INIT(&hpts->p_hptss[j]);
1835 sysctl_ctx_init(&hpts->hpts_ctx);
1836 sprintf(unit, "%d", i);
1837 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1838 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1843 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1844 SYSCTL_CHILDREN(hpts->hpts_root),
1845 OID_AUTO, "in_qcnt", CTLFLAG_RD,
1846 &hpts->p_on_inqueue_cnt, 0,
1847 "Count TCB's awaiting input processing");
1848 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1849 SYSCTL_CHILDREN(hpts->hpts_root),
1850 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1851 &hpts->p_on_queue_cnt, 0,
1852 "Count TCB's awaiting output processing");
1853 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1854 SYSCTL_CHILDREN(hpts->hpts_root),
1855 OID_AUTO, "active", CTLFLAG_RD,
1856 &hpts->p_hpts_active, 0,
1857 "Is the hpts active");
1858 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1859 SYSCTL_CHILDREN(hpts->hpts_root),
1860 OID_AUTO, "curslot", CTLFLAG_RD,
1861 &hpts->p_cur_slot, 0,
1862 "What the current slot is if active");
1863 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1864 SYSCTL_CHILDREN(hpts->hpts_root),
1865 OID_AUTO, "curtick", CTLFLAG_RD,
1866 &hpts->p_curtick, 0,
1867 "What the current tick on if active");
1868 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1869 SYSCTL_CHILDREN(hpts->hpts_root),
1870 OID_AUTO, "logsize", CTLFLAG_RD,
1871 &hpts->p_logsize, 0,
1872 "Hpts logging buffer size");
1873 hpts->p_hpts_sleep_time = NUM_OF_HPTSI_SLOTS - 2;
1875 hpts->p_prevtick = hpts->p_curtick = tcp_gethptstick(&tv);
1876 hpts->p_prevtick -= 1;
1877 hpts->p_prevtick %= NUM_OF_HPTSI_SLOTS;
1878 hpts->p_cpu = 0xffff;
1879 hpts->p_nxt_slot = 1;
1880 hpts->p_logsize = tcp_hpts_logging_size;
1881 if (hpts->p_logsize) {
1882 sz = (sizeof(struct hpts_log) * hpts->p_logsize);
1883 hpts->p_log = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1885 callout_init(&hpts->co, 1);
1888 /* Don't try to bind to NUMA domains if we don't have any */
1889 if (vm_ndomains == 1 && tcp_bind_threads == 2)
1890 tcp_bind_threads = 0;
1893 * Now lets start ithreads to handle the hptss.
1896 hpts = tcp_pace.rp_ent[i];
1898 error = swi_add(&hpts->ie, "hpts",
1899 tcp_hpts_thread, (void *)hpts,
1900 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1902 panic("Can't add hpts:%p i:%d err:%d",
1906 if (tcp_bind_threads == 1) {
1907 if (intr_event_bind(hpts->ie, i) == 0)
1909 } else if (tcp_bind_threads == 2) {
1911 domain = pc->pc_domain;
1912 CPU_COPY(&cpuset_domain[domain], &cs);
1913 if (intr_event_bind_ithread_cpuset(hpts->ie, &cs)
1916 count = hpts_domains[domain].count;
1917 hpts_domains[domain].cpu[count] = i;
1918 hpts_domains[domain].count++;
1922 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1924 if (tcp_hpts_callout_skip_swi == 0) {
1925 callout_reset_sbt_on(&hpts->co, sb, 0,
1926 hpts_timeout_swi, hpts, hpts->p_cpu,
1927 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1929 callout_reset_sbt_on(&hpts->co, sb, 0,
1930 hpts_timeout_dir, hpts,
1932 C_PREL(tcp_hpts_precision));
1936 * If we somehow have an empty domain, fall back to choosing
1937 * among all htps threads.
1939 for (i = 0; i < vm_ndomains; i++) {
1940 if (hpts_domains[i].count == 0) {
1941 tcp_bind_threads = 0;
1946 printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
1948 tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
1951 SYSINIT(tcphptsi, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1952 MODULE_VERSION(tcphpts, 1);