2 * Copyright (c) 2016-2018 Netflix Inc.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 #include "opt_inet6.h"
31 #include "opt_ipsec.h"
32 #include "opt_tcpdebug.h"
34 * Some notes about usage.
36 * The tcp_hpts system is designed to provide a high precision timer
37 * system for tcp. Its main purpose is to provide a mechanism for
38 * pacing packets out onto the wire. It can be used in two ways
39 * by a given TCP stack (and those two methods can be used simultaneously).
41 * First, and probably the main thing its used by Rack and BBR for, it can
42 * be used to call tcp_output() of a transport stack at some time in the future.
43 * The normal way this is done is that tcp_output() of the stack schedules
44 * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45 * slot is the time from now that the stack wants to be called but it
46 * must be converted to tcp_hpts's notion of slot. This is done with
47 * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48 * call from the tcp_output() routine might look like:
50 * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
52 * The above would schedule tcp_ouput() to be called in 550 useconds.
53 * Note that if using this mechanism the stack will want to add near
54 * its top a check to prevent unwanted calls (from user land or the
55 * arrival of incoming ack's). So it would add something like:
57 * if (inp->inp_in_hpts)
60 * to prevent output processing until the time alotted has gone by.
61 * Of course this is a bare bones example and the stack will probably
62 * have more consideration then just the above.
64 * Now the tcp_hpts system will call tcp_output in one of two forms,
65 * it will first check to see if the stack as defined a
66 * tfb_tcp_output_wtime() function, if so that is the routine it
67 * will call, if that function is not defined then it will call the
68 * tfb_tcp_output() function. The only difference between these
69 * two calls is that the former passes the time in to the function
70 * so the function does not have to access the time (which tcp_hpts
71 * already has). What these functions do is of course totally up
72 * to the individual tcp stack.
74 * Now the second function (actually two functions I guess :D)
75 * the tcp_hpts system provides is the ability to either abort
76 * a connection (later) or process input on a connection.
77 * Why would you want to do this? To keep processor locality.
79 * So in order to use the input redirection function the
80 * stack changes its tcp_do_segment() routine to instead
81 * of process the data call the function:
83 * tcp_queue_pkt_to_input()
85 * You will note that the arguments to this function look
86 * a lot like tcp_do_segments's arguments. This function
87 * will assure that the tcp_hpts system will
88 * call the functions tfb_tcp_hpts_do_segment() from the
89 * correct CPU. Note that multiple calls can get pushed
90 * into the tcp_hpts system this will be indicated by
91 * the next to last argument to tfb_tcp_hpts_do_segment()
92 * (nxt_pkt). If nxt_pkt is a 1 then another packet is
93 * coming. If nxt_pkt is a 0 then this is the last call
94 * that the tcp_hpts system has available for the tcp stack.
96 * The other point of the input system is to be able to safely
97 * drop a tcp connection without worrying about the recursive
98 * locking that may be occuring on the INP_WLOCK. So if
99 * a stack wants to drop a connection it calls:
101 * tcp_set_inp_to_drop(tp, ETIMEDOUT)
103 * To schedule the tcp_hpts system to call
105 * tcp_drop(tp, drop_reason)
107 * at a future point. This is quite handy to prevent locking
108 * issues when dropping connections.
112 #include <sys/param.h>
114 #include <sys/interrupt.h>
115 #include <sys/module.h>
116 #include <sys/kernel.h>
117 #include <sys/hhook.h>
118 #include <sys/malloc.h>
119 #include <sys/mbuf.h>
120 #include <sys/proc.h> /* for proc0 declaration */
121 #include <sys/socket.h>
122 #include <sys/socketvar.h>
123 #include <sys/sysctl.h>
124 #include <sys/systm.h>
125 #include <sys/refcount.h>
126 #include <sys/sched.h>
127 #include <sys/queue.h>
129 #include <sys/counter.h>
130 #include <sys/time.h>
131 #include <sys/kthread.h>
132 #include <sys/kern_prefetch.h>
136 #include <net/route.h>
137 #include <net/vnet.h>
139 #define TCPSTATES /* for logging */
141 #include <netinet/in.h>
142 #include <netinet/in_kdtrace.h>
143 #include <netinet/in_pcb.h>
144 #include <netinet/ip.h>
145 #include <netinet/ip_icmp.h> /* required for icmp_var.h */
146 #include <netinet/icmp_var.h> /* for ICMP_BANDLIM */
147 #include <netinet/ip_var.h>
148 #include <netinet/ip6.h>
149 #include <netinet6/in6_pcb.h>
150 #include <netinet6/ip6_var.h>
151 #include <netinet/tcp.h>
152 #include <netinet/tcp_fsm.h>
153 #include <netinet/tcp_seq.h>
154 #include <netinet/tcp_timer.h>
155 #include <netinet/tcp_var.h>
156 #include <netinet/tcpip.h>
157 #include <netinet/cc/cc.h>
158 #include <netinet/tcp_hpts.h>
161 #include <netinet/tcp_debug.h>
162 #endif /* tcpdebug */
164 #include <netinet/tcp_offload.h>
168 #include <netipsec/ipsec.h>
169 #include <netipsec/ipsec6.h>
173 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
175 static int tcp_bind_threads = 1;
177 static int tcp_bind_threads = 0;
179 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
181 static uint32_t tcp_hpts_logging_size = DEFAULT_HPTS_LOG;
183 TUNABLE_INT("net.inet.tcp.hpts_logging_sz", &tcp_hpts_logging_size);
185 static struct tcp_hptsi tcp_pace;
188 tcp_hptsi_lock_inpinfo(struct inpcb *inp,
190 static void tcp_wakehpts(struct tcp_hpts_entry *p);
191 static void tcp_wakeinput(struct tcp_hpts_entry *p);
192 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
193 static void tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick);
194 static void tcp_hpts_thread(void *ctx);
195 static void tcp_init_hptsi(void *st);
197 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
198 static int32_t tcp_hpts_callout_skip_swi = 0;
200 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW, 0, "TCP Hpts controls");
202 #define timersub(tvp, uvp, vvp) \
204 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
205 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
206 if ((vvp)->tv_usec < 0) { \
208 (vvp)->tv_usec += 1000000; \
212 static int32_t logging_on = 0;
213 static int32_t hpts_sleep_max = (NUM_OF_HPTSI_SLOTS - 2);
214 static int32_t tcp_hpts_precision = 120;
216 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
217 &tcp_hpts_precision, 120,
218 "Value for PRE() precision of callout");
220 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
222 "Turn on logging if compiled in");
224 counter_u64_t hpts_loops;
226 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD,
227 &hpts_loops, "Number of times hpts had to loop to catch up");
229 counter_u64_t back_tosleep;
231 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
232 &back_tosleep, "Number of times hpts found no tcbs");
234 static int32_t in_newts_every_tcb = 0;
236 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tsperpcb, CTLFLAG_RW,
237 &in_newts_every_tcb, 0,
238 "Do we have a new cts every tcb we process for input");
239 static int32_t in_ts_percision = 0;
241 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tspercision, CTLFLAG_RW,
243 "Do we use percise timestamp for clients on input");
244 static int32_t out_newts_every_tcb = 0;
246 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tsperpcb, CTLFLAG_RW,
247 &out_newts_every_tcb, 0,
248 "Do we have a new cts every tcb we process for output");
249 static int32_t out_ts_percision = 0;
251 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW,
252 &out_ts_percision, 0,
253 "Do we use a percise timestamp for every output cts");
255 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLFLAG_RW,
257 "The maximum time the hpts will sleep <1 - 254>");
259 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW,
260 &tcp_min_hptsi_time, 0,
261 "The minimum time the hpts must sleep before processing more slots");
263 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW,
264 &tcp_hpts_callout_skip_swi, 0,
265 "Do we have the callout call directly to the hpts?");
268 __tcp_hpts_log_it(struct tcp_hpts_entry *hpts, struct inpcb *inp, int event, uint32_t slot,
269 uint32_t ticknow, int32_t line)
273 HPTS_MTX_ASSERT(hpts);
274 if (hpts->p_log == NULL)
276 pl = &hpts->p_log[hpts->p_log_at];
278 if (hpts->p_log_at >= hpts->p_logsize) {
280 hpts->p_log_wrapped = 1;
284 pl->t_paceslot = inp->inp_hptsslot;
285 pl->t_hptsreq = inp->inp_hpts_request;
286 pl->p_onhpts = inp->inp_in_hpts;
287 pl->p_oninput = inp->inp_in_input;
297 pl->cts = tcp_get_usecs(NULL);
298 pl->p_curtick = hpts->p_curtick;
299 pl->p_prevtick = hpts->p_prevtick;
300 pl->p_on_queue_cnt = hpts->p_on_queue_cnt;
301 pl->ticknow = ticknow;
303 pl->p_nxt_slot = hpts->p_nxt_slot;
304 pl->p_cur_slot = hpts->p_cur_slot;
305 pl->p_hpts_sleep_time = hpts->p_hpts_sleep_time;
306 pl->p_flags = (hpts->p_cpu & 0x7f);
308 pl->p_flags |= (hpts->p_num & 0x7f);
310 if (hpts->p_hpts_active) {
311 pl->p_flags |= HPTS_HPTS_ACTIVE;
315 #define tcp_hpts_log_it(a, b, c, d, e) __tcp_hpts_log_it(a, b, c, d, e, __LINE__)
318 hpts_timeout_swi(void *arg)
320 struct tcp_hpts_entry *hpts;
322 hpts = (struct tcp_hpts_entry *)arg;
323 swi_sched(hpts->ie_cookie, 0);
327 hpts_timeout_dir(void *arg)
329 tcp_hpts_thread(arg);
333 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
336 if (mtx_owned(&hpts->p_mtx) == 0) {
337 /* We don't own the mutex? */
338 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
340 if (hpts->p_cpu != inp->inp_hpts_cpu) {
341 /* It is not the right cpu/mutex? */
342 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
344 if (inp->inp_in_hpts == 0) {
345 /* We are not on the hpts? */
346 panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp);
348 if (TAILQ_EMPTY(head) &&
349 (hpts->p_on_queue_cnt != 0)) {
350 /* We should not be empty with a queue count */
351 panic("%s hpts:%p hpts bucket empty but cnt:%d",
352 __FUNCTION__, hpts, hpts->p_on_queue_cnt);
355 TAILQ_REMOVE(head, inp, inp_hpts);
356 hpts->p_on_queue_cnt--;
357 if (hpts->p_on_queue_cnt < 0) {
358 /* Count should not go negative .. */
360 panic("Hpts goes negative inp:%p hpts:%p",
363 hpts->p_on_queue_cnt = 0;
366 inp->inp_hpts_request = 0;
367 inp->inp_in_hpts = 0;
372 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
375 if (mtx_owned(&hpts->p_mtx) == 0) {
376 /* We don't own the mutex? */
377 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
379 if (hpts->p_cpu != inp->inp_hpts_cpu) {
380 /* It is not the right cpu/mutex? */
381 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
383 if ((noref == 0) && (inp->inp_in_hpts == 1)) {
384 /* We are already on the hpts? */
385 panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp);
388 TAILQ_INSERT_TAIL(head, inp, inp_hpts);
389 inp->inp_in_hpts = 1;
390 hpts->p_on_queue_cnt++;
397 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
400 if (mtx_owned(&hpts->p_mtx) == 0) {
401 /* We don't own the mutex? */
402 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
404 if (hpts->p_cpu != inp->inp_input_cpu) {
405 /* It is not the right cpu/mutex? */
406 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
408 if (inp->inp_in_input == 0) {
409 /* We are not on the input hpts? */
410 panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp);
413 TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
414 hpts->p_on_inqueue_cnt--;
415 if (hpts->p_on_inqueue_cnt < 0) {
417 panic("Hpts in goes negative inp:%p hpts:%p",
420 hpts->p_on_inqueue_cnt = 0;
423 if (TAILQ_EMPTY(&hpts->p_input) &&
424 (hpts->p_on_inqueue_cnt != 0)) {
425 /* We should not be empty with a queue count */
426 panic("%s hpts:%p in_hpts input empty but cnt:%d",
427 __FUNCTION__, hpts, hpts->p_on_inqueue_cnt);
431 inp->inp_in_input = 0;
435 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
438 if (mtx_owned(&hpts->p_mtx) == 0) {
439 /* We don't own the mutex? */
440 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
442 if (hpts->p_cpu != inp->inp_input_cpu) {
443 /* It is not the right cpu/mutex? */
444 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
446 if (inp->inp_in_input == 1) {
447 /* We are already on the input hpts? */
448 panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp);
451 TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
452 inp->inp_in_input = 1;
453 hpts->p_on_inqueue_cnt++;
458 sysctl_tcp_hpts_log(SYSCTL_HANDLER_ARGS)
460 struct tcp_hpts_entry *hpts;
462 int32_t logging_was, i;
466 * HACK: Turn off logging so no locks are required this really needs
467 * a memory barrier :)
469 logging_was = logging_on;
474 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
475 hpts = tcp_pace.rp_ent[i];
476 if (hpts->p_log == NULL)
478 sz += (sizeof(struct hpts_log) * hpts->p_logsize);
480 error = SYSCTL_OUT(req, 0, sz);
482 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
483 hpts = tcp_pace.rp_ent[i];
484 if (hpts->p_log == NULL)
486 if (hpts->p_log_wrapped)
487 sz = (sizeof(struct hpts_log) * hpts->p_logsize);
489 sz = (sizeof(struct hpts_log) * hpts->p_log_at);
490 error = SYSCTL_OUT(req, hpts->p_log, sz);
493 logging_on = logging_was;
497 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, log, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
498 0, 0, sysctl_tcp_hpts_log, "A", "tcp hptsi log");
502 * Try to get the INP_INFO lock.
504 * This function always succeeds in getting the lock. It will clear
505 * *tpp and return (1) if something critical changed while the inpcb
506 * was unlocked. Otherwise, it will leave *tpp unchanged and return (0).
508 * This function relies on the fact that the hpts always holds a
509 * reference on the inpcb while the segment is on the hptsi wheel and
510 * in the input queue.
514 tcp_hptsi_lock_inpinfo(struct inpcb *inp, struct tcpcb **tpp)
516 struct tcp_function_block *tfb;
520 /* Try the easy way. */
521 if (INP_INFO_TRY_RLOCK(&V_tcbinfo))
525 * OK, let's try the hard way. We'll save the function pointer block
526 * to make sure that doesn't change while we aren't holding the
533 INP_INFO_RLOCK(&V_tcbinfo);
535 /* If the session went away, return an error. */
536 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
537 (inp->inp_flags2 & INP_FREED)) {
542 * If the function block or stack-specific data block changed,
546 if ((tp->t_fb != tfb) && (tp->t_fb_ptr != ptr)) {
555 tcp_wakehpts(struct tcp_hpts_entry *hpts)
557 HPTS_MTX_ASSERT(hpts);
558 swi_sched(hpts->ie_cookie, 0);
559 if (hpts->p_hpts_active == 2) {
560 /* Rare sleeping on a ENOBUF */
566 tcp_wakeinput(struct tcp_hpts_entry *hpts)
568 HPTS_MTX_ASSERT(hpts);
569 swi_sched(hpts->ie_cookie, 0);
570 if (hpts->p_hpts_active == 2) {
571 /* Rare sleeping on a ENOBUF */
576 struct tcp_hpts_entry *
577 tcp_cur_hpts(struct inpcb *inp)
580 struct tcp_hpts_entry *hpts;
582 hpts_num = inp->inp_hpts_cpu;
583 hpts = tcp_pace.rp_ent[hpts_num];
587 struct tcp_hpts_entry *
588 tcp_hpts_lock(struct inpcb *inp)
590 struct tcp_hpts_entry *hpts;
594 hpts_num = inp->inp_hpts_cpu;
595 hpts = tcp_pace.rp_ent[hpts_num];
597 if (mtx_owned(&hpts->p_mtx)) {
598 panic("Hpts:%p owns mtx prior-to lock line:%d",
602 mtx_lock(&hpts->p_mtx);
603 if (hpts_num != inp->inp_hpts_cpu) {
604 mtx_unlock(&hpts->p_mtx);
610 struct tcp_hpts_entry *
611 tcp_input_lock(struct inpcb *inp)
613 struct tcp_hpts_entry *hpts;
617 hpts_num = inp->inp_input_cpu;
618 hpts = tcp_pace.rp_ent[hpts_num];
620 if (mtx_owned(&hpts->p_mtx)) {
621 panic("Hpts:%p owns mtx prior-to lock line:%d",
625 mtx_lock(&hpts->p_mtx);
626 if (hpts_num != inp->inp_input_cpu) {
627 mtx_unlock(&hpts->p_mtx);
634 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
638 if (inp->inp_flags2 & INP_FREED) {
640 * Need to play a special trick so that in_pcbrele_wlocked
641 * does not return 1 when it really should have returned 0.
644 inp->inp_flags2 &= ~INP_FREED;
648 #ifndef INP_REF_DEBUG
649 if (in_pcbrele_wlocked(inp)) {
651 * This should not happen. We have the inpcb referred to by
652 * the main socket (why we are called) and the hpts. It
653 * should always return 0.
655 panic("inpcb:%p release ret 1",
659 if (__in_pcbrele_wlocked(inp, line)) {
661 * This should not happen. We have the inpcb referred to by
662 * the main socket (why we are called) and the hpts. It
663 * should always return 0.
665 panic("inpcb:%p release ret 1",
670 inp->inp_flags2 |= INP_FREED;
675 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
677 if (inp->inp_in_hpts) {
678 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
679 tcp_remove_hpts_ref(inp, hpts, line);
684 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
686 HPTS_MTX_ASSERT(hpts);
687 if (inp->inp_in_input) {
688 hpts_sane_input_remove(hpts, inp, 1);
689 tcp_remove_hpts_ref(inp, hpts, line);
694 * Called normally with the INP_LOCKED but it
695 * does not matter, the hpts lock is the key
696 * but the lock order allows us to hold the
697 * INP lock and then get the hpts lock.
699 * Valid values in the flags are
700 * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
701 * HPTS_REMOVE_INPUT - remove from the input of the hpts.
702 * Note that you can or both values together and get two
706 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
708 struct tcp_hpts_entry *hpts;
710 INP_WLOCK_ASSERT(inp);
711 if (flags & HPTS_REMOVE_OUTPUT) {
712 hpts = tcp_hpts_lock(inp);
713 tcp_hpts_remove_locked_output(hpts, inp, flags, line);
714 mtx_unlock(&hpts->p_mtx);
716 if (flags & HPTS_REMOVE_INPUT) {
717 hpts = tcp_input_lock(inp);
718 tcp_hpts_remove_locked_input(hpts, inp, flags, line);
719 mtx_unlock(&hpts->p_mtx);
724 hpts_tick(struct tcp_hpts_entry *hpts, int32_t plus)
726 return ((hpts->p_prevtick + plus) % NUM_OF_HPTSI_SLOTS);
730 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
732 int32_t need_wake = 0;
733 uint32_t ticknow = 0;
735 HPTS_MTX_ASSERT(hpts);
736 if (inp->inp_in_hpts == 0) {
737 /* Ok we need to set it on the hpts in the current slot */
738 if (hpts->p_hpts_active == 0) {
739 /* A sleeping hpts we want in next slot to run */
741 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, 0,
744 inp->inp_hptsslot = hpts_tick(hpts, 1);
745 inp->inp_hpts_request = 0;
747 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEP_BEFORE, 1, ticknow);
750 } else if ((void *)inp == hpts->p_inp) {
752 * We can't allow you to go into the same slot we
753 * are in. We must put you out.
755 inp->inp_hptsslot = hpts->p_nxt_slot;
757 inp->inp_hptsslot = hpts->p_cur_slot;
758 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
759 inp->inp_hpts_request = 0;
761 tcp_hpts_log_it(hpts, inp, HPTSLOG_IMMEDIATE, 0, 0);
765 * Activate the hpts if it is sleeping and its
769 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_HPTS, 0, ticknow);
771 hpts->p_direct_wake = 1;
779 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
782 struct tcp_hpts_entry *hpts;
784 INP_WLOCK_ASSERT(inp);
785 hpts = tcp_hpts_lock(inp);
786 ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
787 mtx_unlock(&hpts->p_mtx);
792 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, uint32_t cts, int32_t line,
793 struct hpts_diag *diag, int32_t noref)
795 int32_t need_new_to = 0;
796 int32_t need_wakeup = 0;
797 uint32_t largest_slot;
798 uint32_t ticknow = 0;
801 HPTS_MTX_ASSERT(hpts);
803 memset(diag, 0, sizeof(struct hpts_diag));
804 diag->p_hpts_active = hpts->p_hpts_active;
805 diag->p_nxt_slot = hpts->p_nxt_slot;
806 diag->p_cur_slot = hpts->p_cur_slot;
807 diag->slot_req = slot;
809 if ((inp->inp_in_hpts == 0) || noref) {
810 inp->inp_hpts_request = slot;
813 tcp_queue_to_hpts_immediate_locked(inp, hpts, line, noref);
816 if (hpts->p_hpts_active) {
818 * Its slot - 1 since nxt_slot is the next tick that
819 * will go off since the hpts is awake
822 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_NORMAL, slot, 0);
825 * We want to make sure that we don't place a inp in
826 * the range of p_cur_slot <-> p_nxt_slot. If we
827 * take from p_nxt_slot to the end, plus p_cur_slot
828 * and then take away 2, we will know how many is
829 * the max slots we can use.
831 if (hpts->p_nxt_slot > hpts->p_cur_slot) {
833 * Non-wrap case nxt_slot <-> cur_slot we
834 * don't want to land in. So the diff gives
835 * us what is taken away from the number of
838 largest_slot = NUM_OF_HPTSI_SLOTS - (hpts->p_nxt_slot - hpts->p_cur_slot);
839 } else if (hpts->p_nxt_slot == hpts->p_cur_slot) {
840 largest_slot = NUM_OF_HPTSI_SLOTS - 2;
843 * Wrap case so the diff gives us the number
844 * of slots that we can land in.
846 largest_slot = hpts->p_cur_slot - hpts->p_nxt_slot;
849 * We take away two so we never have a problem (20
850 * usec's) out of 1024000 usecs
853 if (inp->inp_hpts_request > largest_slot) {
855 * Restrict max jump of slots and remember
859 inp->inp_hpts_request -= largest_slot;
861 /* This one will run when we hit it */
862 inp->inp_hpts_request = 0;
864 if (hpts->p_nxt_slot == hpts->p_cur_slot)
865 slot_calc = (hpts->p_nxt_slot + slot) % NUM_OF_HPTSI_SLOTS;
867 slot_calc = (hpts->p_nxt_slot + slot - 1) % NUM_OF_HPTSI_SLOTS;
868 if (slot_calc == hpts->p_cur_slot) {
871 panic("Hpts:%p impossible slot calculation slot_calc:%u slot:%u largest:%u\n",
872 hpts, slot_calc, slot, largest_slot);
877 slot_calc = NUM_OF_HPTSI_SLOTS - 1;
879 inp->inp_hptsslot = slot_calc;
881 diag->inp_hptsslot = inp->inp_hptsslot;
885 * The hpts is sleeping, we need to figure out where
886 * it will wake up at and if we need to reschedule
889 uint32_t have_slept, yet_to_sleep;
893 ticknow = tcp_gethptstick(&tv);
894 slot_now = ticknow % NUM_OF_HPTSI_SLOTS;
896 * The user wants to be inserted at (slot_now +
897 * slot) % NUM_OF_HPTSI_SLOTS, so lets set that up.
899 largest_slot = NUM_OF_HPTSI_SLOTS - 2;
900 if (inp->inp_hpts_request > largest_slot) {
901 /* Adjust the residual in inp_hpts_request */
903 inp->inp_hpts_request -= largest_slot;
905 /* No residual it all fits */
906 inp->inp_hpts_request = 0;
908 inp->inp_hptsslot = (slot_now + slot) % NUM_OF_HPTSI_SLOTS;
910 diag->slot_now = slot_now;
911 diag->inp_hptsslot = inp->inp_hptsslot;
912 diag->p_on_min_sleep = hpts->p_on_min_sleep;
915 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, slot, ticknow);
917 /* Now do we need to restart the hpts's timer? */
918 if (TSTMP_GT(ticknow, hpts->p_curtick))
919 have_slept = ticknow - hpts->p_curtick;
922 if (have_slept < hpts->p_hpts_sleep_time) {
923 /* This should be what happens */
924 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
926 /* We are over-due */
931 diag->have_slept = have_slept;
932 diag->yet_to_sleep = yet_to_sleep;
933 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
935 if ((hpts->p_on_min_sleep == 0) && (yet_to_sleep > slot)) {
937 * We need to reschedule the hptss time-out.
939 hpts->p_hpts_sleep_time = slot;
940 need_new_to = slot * HPTS_TICKS_PER_USEC;
943 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
945 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERTED, slot, ticknow);
948 * Now how far is the hpts sleeping to? if active is 1, its
949 * up and ticking we do nothing, otherwise we may need to
950 * reschedule its callout if need_new_to is set from above.
954 tcp_hpts_log_it(hpts, inp, HPTSLOG_RESCHEDULE, 1, 0);
956 hpts->p_direct_wake = 1;
959 diag->need_new_to = 0;
960 diag->co_ret = 0xffff0000;
962 } else if (need_new_to) {
969 while (need_new_to > HPTS_USEC_IN_SEC) {
971 need_new_to -= HPTS_USEC_IN_SEC;
973 tv.tv_usec = need_new_to;
975 if (tcp_hpts_callout_skip_swi == 0) {
976 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
977 hpts_timeout_swi, hpts, hpts->p_cpu,
978 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
980 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
981 hpts_timeout_dir, hpts,
983 C_PREL(tcp_hpts_precision));
986 diag->need_new_to = need_new_to;
987 diag->co_ret = co_ret;
992 panic("Hpts:%p tp:%p already on hpts and add?", hpts, inp);
998 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag){
999 struct tcp_hpts_entry *hpts;
1000 uint32_t slot_on, cts;
1004 * We now return the next-slot the hpts will be on, beyond its
1005 * current run (if up) or where it was when it stopped if it is
1008 INP_WLOCK_ASSERT(inp);
1009 hpts = tcp_hpts_lock(inp);
1010 if (in_ts_percision)
1013 getmicrouptime(&tv);
1014 cts = tcp_tv_to_usectick(&tv);
1015 tcp_hpts_insert_locked(hpts, inp, slot, cts, line, diag, 0);
1016 slot_on = hpts->p_nxt_slot;
1017 mtx_unlock(&hpts->p_mtx);
1022 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
1023 return (tcp_hpts_insert_diag(inp, slot, line, NULL));
1027 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
1031 HPTS_MTX_ASSERT(hpts);
1032 if (inp->inp_in_input == 0) {
1033 /* Ok we need to set it on the hpts in the current slot */
1034 hpts_sane_input_insert(hpts, inp, line);
1036 if (hpts->p_hpts_active == 0) {
1038 * Activate the hpts if it is sleeping.
1041 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_INPUT, 0, 0);
1044 hpts->p_direct_wake = 1;
1045 tcp_wakeinput(hpts);
1047 } else if (hpts->p_hpts_active == 0) {
1049 hpts->p_direct_wake = 1;
1050 tcp_wakeinput(hpts);
1056 tcp_queue_pkt_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1057 int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked)
1059 /* Setup packet for input first */
1060 INP_WLOCK_ASSERT(tp->t_inpcb);
1061 m->m_pkthdr.pace_thoff = (uint16_t) ((caddr_t)th - mtod(m, caddr_t));
1062 m->m_pkthdr.pace_tlen = (uint16_t) tlen;
1063 m->m_pkthdr.pace_drphdrlen = drop_hdrlen;
1064 m->m_pkthdr.pace_tos = iptos;
1065 m->m_pkthdr.pace_lock = (uint8_t) ti_locked;
1066 if (tp->t_in_pkt == NULL) {
1070 tp->t_tail_pkt->m_nextpkt = m;
1077 __tcp_queue_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1078 int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked, int32_t line){
1079 struct tcp_hpts_entry *hpts;
1082 tcp_queue_pkt_to_input(tp, m, th, tlen, drop_hdrlen, iptos, ti_locked);
1083 hpts = tcp_input_lock(tp->t_inpcb);
1084 ret = __tcp_queue_to_input_locked(tp->t_inpcb, hpts, line);
1085 mtx_unlock(&hpts->p_mtx);
1090 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1092 struct tcp_hpts_entry *hpts;
1095 tp = intotcpcb(inp);
1096 hpts = tcp_input_lock(tp->t_inpcb);
1097 if (inp->inp_in_input == 0) {
1098 /* Ok we need to set it on the hpts in the current slot */
1099 hpts_sane_input_insert(hpts, inp, line);
1100 if (hpts->p_hpts_active == 0) {
1102 * Activate the hpts if it is sleeping.
1104 hpts->p_direct_wake = 1;
1105 tcp_wakeinput(hpts);
1107 } else if (hpts->p_hpts_active == 0) {
1108 hpts->p_direct_wake = 1;
1109 tcp_wakeinput(hpts);
1111 inp->inp_hpts_drop_reas = reason;
1112 mtx_unlock(&hpts->p_mtx);
1116 hpts_random_cpu(struct inpcb *inp){
1118 * No flow type set distribute the load randomly.
1124 * If one has been set use it i.e. we want both in and out on the
1127 if (inp->inp_input_cpu_set) {
1128 return (inp->inp_input_cpu);
1129 } else if (inp->inp_hpts_cpu_set) {
1130 return (inp->inp_hpts_cpu);
1132 /* Nothing set use a random number */
1134 cpuid = (ran & 0xffff) % mp_ncpus;
1139 hpts_cpuid(struct inpcb *inp){
1144 * If one has been set use it i.e. we want both in and out on the
1147 if (inp->inp_input_cpu_set) {
1148 return (inp->inp_input_cpu);
1149 } else if (inp->inp_hpts_cpu_set) {
1150 return (inp->inp_hpts_cpu);
1152 /* If one is set the other must be the same */
1154 cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1155 if (cpuid == NETISR_CPUID_NONE)
1156 return (hpts_random_cpu(inp));
1161 * We don't have a flowid -> cpuid mapping, so cheat and just map
1162 * unknown cpuids to curcpu. Not the best, but apparently better
1163 * than defaulting to swi 0.
1165 if (inp->inp_flowtype != M_HASHTYPE_NONE) {
1166 cpuid = inp->inp_flowid % mp_ncpus;
1169 cpuid = hpts_random_cpu(inp);
1175 * Do NOT try to optimize the processing of inp's
1176 * by first pulling off all the inp's into a temporary
1177 * list (e.g. TAILQ_CONCAT). If you do that the subtle
1178 * interactions of switching CPU's will kill because of
1179 * problems in the linked list manipulation. Basically
1180 * you would switch cpu's with the hpts mutex locked
1181 * but then while you were processing one of the inp's
1182 * some other one that you switch will get a new
1183 * packet on the different CPU. It will insert it
1184 * on the new hptss input list. Creating a temporary
1185 * link in the inp will not fix it either, since
1186 * the other hpts will be doing the same thing and
1187 * you will both end up using the temporary link.
1189 * You will die in an ASSERT for tailq corruption if you
1190 * run INVARIANTS or you will die horribly without
1191 * INVARIANTS in some unknown way with a corrupt linked
1195 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1200 uint16_t drop_reason;
1202 uint32_t did_prefetch = 0;
1203 int32_t ti_locked = TI_UNLOCKED;
1205 HPTS_MTX_ASSERT(hpts);
1206 while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1207 HPTS_MTX_ASSERT(hpts);
1208 hpts_sane_input_remove(hpts, inp, 0);
1209 if (inp->inp_input_cpu_set == 0) {
1215 drop_reason = inp->inp_hpts_drop_reas;
1216 inp->inp_in_input = 0;
1217 tp = intotcpcb(inp);
1218 mtx_unlock(&hpts->p_mtx);
1219 CURVNET_SET(tp->t_vnet);
1221 INP_INFO_RLOCK(&V_tcbinfo);
1222 ti_locked = TI_RLOCKED;
1224 ti_locked = TI_UNLOCKED;
1227 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1228 (inp->inp_flags2 & INP_FREED)) {
1231 if (ti_locked == TI_RLOCKED) {
1232 INP_INFO_RUNLOCK(&V_tcbinfo);
1234 if (in_pcbrele_wlocked(inp) == 0) {
1237 ti_locked = TI_UNLOCKED;
1239 mtx_lock(&hpts->p_mtx);
1242 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1246 /* This tcb is being destroyed for drop_reason */
1252 tp->t_in_pkt = NULL;
1259 tp = tcp_drop(tp, drop_reason);
1260 INP_INFO_RUNLOCK(&V_tcbinfo);
1264 if (in_pcbrele_wlocked(inp) == 0)
1267 mtx_lock(&hpts->p_mtx);
1272 * Setup so the next time we will move to the right
1273 * CPU. This should be a rare event. It will
1274 * sometimes happens when we are the client side
1275 * (usually not the server). Somehow tcp_output()
1276 * gets called before the tcp_do_segment() sets the
1277 * intial state. This means the r_cpu and r_hpts_cpu
1278 * is 0. We get on the hpts, and then tcp_input()
1279 * gets called setting up the r_cpu to the correct
1280 * value. The hpts goes off and sees the mis-match.
1281 * We simply correct it here and the CPU will switch
1282 * to the new hpts nextime the tcb gets added to the
1283 * the hpts (not this time) :-)
1290 (m->m_pkthdr.pace_lock == TI_RLOCKED ||
1291 tp->t_state != TCPS_ESTABLISHED)) {
1292 ti_locked = TI_RLOCKED;
1293 if (tcp_hptsi_lock_inpinfo(inp, &tp)) {
1299 if (in_newts_every_tcb) {
1300 if (in_ts_percision)
1305 if (tp->t_fb_ptr != NULL) {
1306 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1309 /* Any input work to do, if so do it first */
1310 if ((m != NULL) && (m == tp->t_in_pkt)) {
1312 int32_t tlen, drop_hdrlen, nxt_pkt;
1316 tp->t_in_pkt = tp->t_tail_pkt = NULL;
1318 th = (struct tcphdr *)(mtod(m, caddr_t)+m->m_pkthdr.pace_thoff);
1319 tlen = m->m_pkthdr.pace_tlen;
1320 drop_hdrlen = m->m_pkthdr.pace_drphdrlen;
1321 iptos = m->m_pkthdr.pace_tos;
1322 m->m_nextpkt = NULL;
1327 inp->inp_input_calls = 1;
1328 if (tp->t_fb->tfb_tcp_hpts_do_segment) {
1329 /* Use the hpts specific do_segment */
1330 (*tp->t_fb->tfb_tcp_hpts_do_segment) (m, th, inp->inp_socket,
1332 tlen, iptos, ti_locked, nxt_pkt, tv);
1334 /* Use the default do_segment */
1335 (*tp->t_fb->tfb_tcp_do_segment) (m, th, inp->inp_socket,
1337 tlen, iptos, ti_locked);
1340 * Do segment returns unlocked we need the
1341 * lock again but we also need some kasserts
1344 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1345 INP_UNLOCK_ASSERT(inp);
1350 m->m_pkthdr.pace_lock == TI_RLOCKED) {
1351 INP_INFO_RLOCK(&V_tcbinfo);
1352 ti_locked = TI_RLOCKED;
1354 ti_locked = TI_UNLOCKED;
1357 * Since we have an opening here we must
1358 * re-check if the tcb went away while we
1359 * were getting the lock(s).
1361 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1362 (inp->inp_flags2 & INP_FREED)) {
1373 * Now that we hold the INP lock, check if
1374 * we need to upgrade our lock.
1376 if (ti_locked == TI_UNLOCKED &&
1377 (tp->t_state != TCPS_ESTABLISHED)) {
1378 ti_locked = TI_RLOCKED;
1379 if (tcp_hptsi_lock_inpinfo(inp, &tp))
1382 } /** end while(m) */
1383 } /** end if ((m != NULL) && (m == tp->t_in_pkt)) */
1384 if (in_pcbrele_wlocked(inp) == 0)
1386 if (ti_locked == TI_RLOCKED)
1387 INP_INFO_RUNLOCK(&V_tcbinfo);
1388 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1389 INP_UNLOCK_ASSERT(inp);
1390 ti_locked = TI_UNLOCKED;
1391 mtx_lock(&hpts->p_mtx);
1398 tcp_hpts_est_run(struct tcp_hpts_entry *hpts)
1400 int32_t ticks_to_run;
1402 if (hpts->p_prevtick && (SEQ_GT(hpts->p_curtick, hpts->p_prevtick))) {
1403 ticks_to_run = hpts->p_curtick - hpts->p_prevtick;
1404 if (ticks_to_run >= (NUM_OF_HPTSI_SLOTS - 1)) {
1405 ticks_to_run = NUM_OF_HPTSI_SLOTS - 2;
1408 if (hpts->p_prevtick == hpts->p_curtick) {
1409 /* This happens when we get woken up right away */
1414 /* Set in where we will be when we catch up */
1415 hpts->p_nxt_slot = (hpts->p_cur_slot + ticks_to_run) % NUM_OF_HPTSI_SLOTS;
1416 if (hpts->p_nxt_slot == hpts->p_cur_slot) {
1417 panic("Impossible math -- hpts:%p p_nxt_slot:%d p_cur_slot:%d ticks_to_run:%d",
1418 hpts, hpts->p_nxt_slot, hpts->p_cur_slot, ticks_to_run);
1420 return (ticks_to_run);
1424 tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick)
1427 struct inpcb *inp = NULL, *ninp;
1429 int32_t ticks_to_run, i, error, tick_now, interum_tick;
1430 int32_t paced_cnt = 0;
1431 int32_t did_prefetch = 0;
1432 int32_t prefetch_ninp = 0;
1433 int32_t prefetch_tp = 0;
1437 HPTS_MTX_ASSERT(hpts);
1438 hpts->p_curtick = tcp_tv_to_hptstick(ctick);
1439 cts = tcp_tv_to_usectick(ctick);
1440 memcpy(&tv, ctick, sizeof(struct timeval));
1441 hpts->p_cur_slot = hpts_tick(hpts, 1);
1443 /* Figure out if we had missed ticks */
1445 HPTS_MTX_ASSERT(hpts);
1446 ticks_to_run = tcp_hpts_est_run(hpts);
1447 if (!TAILQ_EMPTY(&hpts->p_input)) {
1448 tcp_input_data(hpts, &tv);
1451 if (TAILQ_EMPTY(&hpts->p_input) &&
1452 (hpts->p_on_inqueue_cnt != 0)) {
1453 panic("tp:%p in_hpts input empty but cnt:%d",
1454 hpts, hpts->p_on_inqueue_cnt);
1457 HPTS_MTX_ASSERT(hpts);
1458 /* Reset the ticks to run and time if we need too */
1459 interum_tick = tcp_gethptstick(&tv);
1460 if (interum_tick != hpts->p_curtick) {
1461 /* Save off the new time we execute to */
1463 hpts->p_curtick = interum_tick;
1464 cts = tcp_tv_to_usectick(&tv);
1465 hpts->p_cur_slot = hpts_tick(hpts, 1);
1466 ticks_to_run = tcp_hpts_est_run(hpts);
1468 if (ticks_to_run == -1) {
1472 tcp_hpts_log_it(hpts, inp, HPTSLOG_SETTORUN, ticks_to_run, 0);
1474 if (hpts->p_on_queue_cnt == 0) {
1477 HPTS_MTX_ASSERT(hpts);
1478 for (i = 0; i < ticks_to_run; i++) {
1480 * Calculate our delay, if there are no extra ticks there
1483 hpts->p_delayed_by = (ticks_to_run - (i + 1)) * HPTS_TICKS_PER_USEC;
1484 HPTS_MTX_ASSERT(hpts);
1485 while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1488 tcp_hpts_log_it(hpts, inp, HPTSLOG_HPTSI, ticks_to_run, i);
1492 if (hpts->p_cur_slot != inp->inp_hptsslot) {
1493 panic("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1494 hpts, inp, hpts->p_cur_slot, inp->inp_hptsslot);
1497 if (inp->inp_hpts_cpu_set == 0) {
1502 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_cur_slot], 0);
1503 if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1504 /* We prefetch the next inp if possible */
1505 kern_prefetch(ninp, &prefetch_ninp);
1508 if (inp->inp_hpts_request) {
1510 * This guy is deferred out further in time
1511 * then our wheel had on it. Push him back
1514 int32_t remaining_slots;
1516 remaining_slots = ticks_to_run - (i + 1);
1517 if (inp->inp_hpts_request > remaining_slots) {
1519 * Keep INVARIANTS happy by clearing
1522 tcp_hpts_insert_locked(hpts, inp, inp->inp_hpts_request, cts, __LINE__, NULL, 1);
1526 inp->inp_hpts_request = 0;
1529 * We clear the hpts flag here after dealing with
1530 * remaining slots. This way anyone looking with the
1531 * TCB lock will see its on the hpts until just
1534 inp->inp_in_hpts = 0;
1535 mtx_unlock(&hpts->p_mtx);
1537 if (in_pcbrele_wlocked(inp)) {
1538 mtx_lock(&hpts->p_mtx);
1540 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 1);
1544 if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1547 if (mtx_owned(&hpts->p_mtx)) {
1548 panic("Hpts:%p owns mtx prior-to lock line:%d",
1553 mtx_lock(&hpts->p_mtx);
1555 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 3);
1559 tp = intotcpcb(inp);
1560 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1565 * Setup so the next time we will move to
1566 * the right CPU. This should be a rare
1567 * event. It will sometimes happens when we
1568 * are the client side (usually not the
1569 * server). Somehow tcp_output() gets called
1570 * before the tcp_do_segment() sets the
1571 * intial state. This means the r_cpu and
1572 * r_hpts_cpu is 0. We get on the hpts, and
1573 * then tcp_input() gets called setting up
1574 * the r_cpu to the correct value. The hpts
1575 * goes off and sees the mis-match. We
1576 * simply correct it here and the CPU will
1577 * switch to the new hpts nextime the tcb
1578 * gets added to the the hpts (not this one)
1583 if (out_newts_every_tcb) {
1586 if (out_ts_percision)
1589 getmicrouptime(&sv);
1590 cts = tcp_tv_to_usectick(&sv);
1592 CURVNET_SET(tp->t_vnet);
1594 * There is a hole here, we get the refcnt on the
1595 * inp so it will still be preserved but to make
1596 * sure we can get the INP we need to hold the p_mtx
1597 * above while we pull out the tp/inp, as long as
1598 * fini gets the lock first we are assured of having
1599 * a sane INP we can lock and test.
1602 if (mtx_owned(&hpts->p_mtx)) {
1603 panic("Hpts:%p owns mtx before tcp-output:%d",
1607 if (tp->t_fb_ptr != NULL) {
1608 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1611 inp->inp_hpts_calls = 1;
1612 if (tp->t_fb->tfb_tcp_output_wtime != NULL) {
1613 error = (*tp->t_fb->tfb_tcp_output_wtime) (tp, &tv);
1615 error = tp->t_fb->tfb_tcp_output(tp);
1617 if (ninp && ninp->inp_ppcb) {
1619 * If we have a nxt inp, see if we can
1620 * prefetch its ppcb. Note this may seem
1621 * "risky" since we have no locks (other
1622 * than the previous inp) and there no
1623 * assurance that ninp was not pulled while
1624 * we were processing inp and freed. If this
1625 * occured it could mean that either:
1627 * a) Its NULL (which is fine we won't go
1628 * here) <or> b) Its valid (which is cool we
1629 * will prefetch it) <or> c) The inp got
1630 * freed back to the slab which was
1631 * reallocated. Then the piece of memory was
1632 * re-used and something else (not an
1633 * address) is in inp_ppcb. If that occurs
1634 * we don't crash, but take a TLB shootdown
1635 * performance hit (same as if it was NULL
1636 * and we tried to pre-fetch it).
1638 * Considering that the likelyhood of <c> is
1639 * quite rare we will take a risk on doing
1640 * this. If performance drops after testing
1641 * we can always take this out. NB: the
1642 * kern_prefetch on amd64 actually has
1643 * protection against a bad address now via
1644 * the DMAP_() tests. This will prevent the
1645 * TLB hit, and instead if <c> occurs just
1646 * cause us to load cache with a useless
1649 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1653 INP_UNLOCK_ASSERT(inp);
1656 if (mtx_owned(&hpts->p_mtx)) {
1657 panic("Hpts:%p owns mtx prior-to lock line:%d",
1661 mtx_lock(&hpts->p_mtx);
1663 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 4);
1666 HPTS_MTX_ASSERT(hpts);
1669 if (hpts->p_cur_slot >= NUM_OF_HPTSI_SLOTS) {
1670 hpts->p_cur_slot = 0;
1674 HPTS_MTX_ASSERT(hpts);
1675 hpts->p_prevtick = hpts->p_curtick;
1676 hpts->p_delayed_by = 0;
1678 * Check to see if we took an excess amount of time and need to run
1679 * more ticks (if we did not hit eno-bufs).
1681 /* Re-run any input that may be there */
1682 (void)tcp_gethptstick(&tv);
1683 if (!TAILQ_EMPTY(&hpts->p_input)) {
1684 tcp_input_data(hpts, &tv);
1687 if (TAILQ_EMPTY(&hpts->p_input) &&
1688 (hpts->p_on_inqueue_cnt != 0)) {
1689 panic("tp:%p in_hpts input empty but cnt:%d",
1690 hpts, hpts->p_on_inqueue_cnt);
1693 tick_now = tcp_gethptstick(&tv);
1694 if (SEQ_GT(tick_now, hpts->p_prevtick)) {
1697 /* Did we really spend a full tick or more in here? */
1698 timersub(&tv, ctick, &res);
1699 if (res.tv_sec || (res.tv_usec >= HPTS_TICKS_PER_USEC)) {
1700 counter_u64_add(hpts_loops, 1);
1702 tcp_hpts_log_it(hpts, inp, HPTSLOG_TOLONG, (uint32_t) res.tv_usec, tick_now);
1705 hpts->p_curtick = tick_now;
1711 uint32_t t = 0, i, fnd = 0;
1713 if (hpts->p_on_queue_cnt) {
1717 * Find next slot that is occupied and use that to
1718 * be the sleep time.
1720 for (i = 1, t = hpts->p_nxt_slot; i < NUM_OF_HPTSI_SLOTS; i++) {
1721 if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1725 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1728 hpts->p_hpts_sleep_time = i;
1730 counter_u64_add(back_tosleep, 1);
1732 panic("Hpts:%p cnt:%d but non found", hpts, hpts->p_on_queue_cnt);
1734 hpts->p_on_queue_cnt = 0;
1739 /* No one on the wheel sleep for all but 2 slots */
1741 if (hpts_sleep_max == 0)
1743 hpts->p_hpts_sleep_time = min((NUM_OF_HPTSI_SLOTS - 2), hpts_sleep_max);
1747 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEPSET, t, (hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC));
1753 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1755 struct tcp_hpts_entry *hpts;
1757 INP_WLOCK_ASSERT(inp);
1758 hpts = tcp_hpts_lock(inp);
1759 if ((inp->inp_in_hpts == 0) &&
1760 (inp->inp_hpts_cpu_set == 0)) {
1761 inp->inp_hpts_cpu = hpts_cpuid(inp);
1762 inp->inp_hpts_cpu_set = 1;
1764 mtx_unlock(&hpts->p_mtx);
1765 hpts = tcp_input_lock(inp);
1766 if ((inp->inp_input_cpu_set == 0) &&
1767 (inp->inp_in_input == 0)) {
1768 inp->inp_input_cpu = hpts_cpuid(inp);
1769 inp->inp_input_cpu_set = 1;
1771 mtx_unlock(&hpts->p_mtx);
1775 tcp_hpts_delayedby(struct inpcb *inp){
1776 return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1780 tcp_hpts_thread(void *ctx)
1782 struct tcp_hpts_entry *hpts;
1786 hpts = (struct tcp_hpts_entry *)ctx;
1787 mtx_lock(&hpts->p_mtx);
1788 if (hpts->p_direct_wake) {
1789 /* Signaled by input */
1791 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 1, 1);
1792 callout_stop(&hpts->co);
1795 if (callout_pending(&hpts->co) ||
1796 !callout_active(&hpts->co)) {
1798 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 2, 2);
1799 mtx_unlock(&hpts->p_mtx);
1802 callout_deactivate(&hpts->co);
1804 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 3, 3);
1806 hpts->p_hpts_active = 1;
1807 (void)tcp_gethptstick(&tv);
1808 tcp_hptsi(hpts, &tv);
1809 HPTS_MTX_ASSERT(hpts);
1811 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1812 if (tcp_min_hptsi_time && (tv.tv_usec < tcp_min_hptsi_time)) {
1813 tv.tv_usec = tcp_min_hptsi_time;
1814 hpts->p_on_min_sleep = 1;
1816 /* Clear the min sleep flag */
1817 hpts->p_on_min_sleep = 0;
1819 hpts->p_hpts_active = 0;
1821 if (tcp_hpts_callout_skip_swi == 0) {
1822 callout_reset_sbt_on(&hpts->co, sb, 0,
1823 hpts_timeout_swi, hpts, hpts->p_cpu,
1824 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1826 callout_reset_sbt_on(&hpts->co, sb, 0,
1827 hpts_timeout_dir, hpts,
1829 C_PREL(tcp_hpts_precision));
1831 hpts->p_direct_wake = 0;
1832 mtx_unlock(&hpts->p_mtx);
1838 tcp_init_hptsi(void *st)
1840 int32_t i, j, error, bound = 0, created = 0;
1844 struct tcp_hpts_entry *hpts;
1846 uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1848 tcp_pace.rp_proc = NULL;
1849 tcp_pace.rp_num_hptss = ncpus;
1850 hpts_loops = counter_u64_alloc(M_WAITOK);
1851 back_tosleep = counter_u64_alloc(M_WAITOK);
1853 sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1854 tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1855 asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1856 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1857 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1858 M_TCPHPTS, M_WAITOK | M_ZERO);
1859 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1860 M_TCPHPTS, M_WAITOK);
1861 hpts = tcp_pace.rp_ent[i];
1863 * Init all the hpts structures that are not specifically
1864 * zero'd by the allocations. Also lets attach them to the
1865 * appropriate sysctl block as well.
1867 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1868 "hpts", MTX_DEF | MTX_DUPOK);
1869 TAILQ_INIT(&hpts->p_input);
1870 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1871 TAILQ_INIT(&hpts->p_hptss[j]);
1873 sysctl_ctx_init(&hpts->hpts_ctx);
1874 sprintf(unit, "%d", i);
1875 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1876 SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1881 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1882 SYSCTL_CHILDREN(hpts->hpts_root),
1883 OID_AUTO, "in_qcnt", CTLFLAG_RD,
1884 &hpts->p_on_inqueue_cnt, 0,
1885 "Count TCB's awaiting input processing");
1886 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1887 SYSCTL_CHILDREN(hpts->hpts_root),
1888 OID_AUTO, "out_qcnt", CTLFLAG_RD,
1889 &hpts->p_on_queue_cnt, 0,
1890 "Count TCB's awaiting output processing");
1891 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1892 SYSCTL_CHILDREN(hpts->hpts_root),
1893 OID_AUTO, "active", CTLFLAG_RD,
1894 &hpts->p_hpts_active, 0,
1895 "Is the hpts active");
1896 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1897 SYSCTL_CHILDREN(hpts->hpts_root),
1898 OID_AUTO, "curslot", CTLFLAG_RD,
1899 &hpts->p_cur_slot, 0,
1900 "What the current slot is if active");
1901 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1902 SYSCTL_CHILDREN(hpts->hpts_root),
1903 OID_AUTO, "curtick", CTLFLAG_RD,
1904 &hpts->p_curtick, 0,
1905 "What the current tick on if active");
1906 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1907 SYSCTL_CHILDREN(hpts->hpts_root),
1908 OID_AUTO, "logsize", CTLFLAG_RD,
1909 &hpts->p_logsize, 0,
1910 "Hpts logging buffer size");
1911 hpts->p_hpts_sleep_time = NUM_OF_HPTSI_SLOTS - 2;
1913 hpts->p_prevtick = hpts->p_curtick = tcp_gethptstick(&tv);
1914 hpts->p_prevtick -= 1;
1915 hpts->p_prevtick %= NUM_OF_HPTSI_SLOTS;
1916 hpts->p_cpu = 0xffff;
1917 hpts->p_nxt_slot = 1;
1918 hpts->p_logsize = tcp_hpts_logging_size;
1919 if (hpts->p_logsize) {
1920 sz = (sizeof(struct hpts_log) * hpts->p_logsize);
1921 hpts->p_log = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1923 callout_init(&hpts->co, 1);
1926 * Now lets start ithreads to handle the hptss.
1929 hpts = tcp_pace.rp_ent[i];
1931 error = swi_add(&hpts->ie, "hpts",
1932 tcp_hpts_thread, (void *)hpts,
1933 SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1935 panic("Can't add hpts:%p i:%d err:%d",
1939 if (tcp_bind_threads) {
1940 if (intr_event_bind(hpts->ie, i) == 0)
1944 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1946 if (tcp_hpts_callout_skip_swi == 0) {
1947 callout_reset_sbt_on(&hpts->co, sb, 0,
1948 hpts_timeout_swi, hpts, hpts->p_cpu,
1949 (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1951 callout_reset_sbt_on(&hpts->co, sb, 0,
1952 hpts_timeout_dir, hpts,
1954 C_PREL(tcp_hpts_precision));
1957 printf("TCP Hpts created %d swi interrupt thread and bound %d\n",
1962 SYSINIT(tcphptsi, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1963 MODULE_VERSION(tcphpts, 1);