]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_hpts.c
sctp: ensure that ASCONF chunks are not too large
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_hpts.c
1 /*-
2  * Copyright (c) 2016-2018 Netflix, Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_rss.h"
32 #include "opt_tcpdebug.h"
33
34 /**
35  * Some notes about usage.
36  *
37  * The tcp_hpts system is designed to provide a high precision timer
38  * system for tcp. Its main purpose is to provide a mechanism for
39  * pacing packets out onto the wire. It can be used in two ways
40  * by a given TCP stack (and those two methods can be used simultaneously).
41  *
42  * First, and probably the main thing its used by Rack and BBR, it can
43  * be used to call tcp_output() of a transport stack at some time in the future.
44  * The normal way this is done is that tcp_output() of the stack schedules
45  * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
46  * slot is the time from now that the stack wants to be called but it
47  * must be converted to tcp_hpts's notion of slot. This is done with
48  * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
49  * call from the tcp_output() routine might look like:
50  *
51  * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
52  *
53  * The above would schedule tcp_ouput() to be called in 550 useconds.
54  * Note that if using this mechanism the stack will want to add near
55  * its top a check to prevent unwanted calls (from user land or the
56  * arrival of incoming ack's). So it would add something like:
57  *
58  * if (inp->inp_in_hpts)
59  *    return;
60  *
61  * to prevent output processing until the time alotted has gone by.
62  * Of course this is a bare bones example and the stack will probably
63  * have more consideration then just the above.
64  *
65  * Now the second function (actually two functions I guess :D)
66  * the tcp_hpts system provides is the  ability to either abort
67  * a connection (later) or process input on a connection.
68  * Why would you want to do this? To keep processor locality
69  * and or not have to worry about untangling any recursive
70  * locks. The input function now is hooked to the new LRO
71  * system as well.
72  *
73  * In order to use the input redirection function the
74  * tcp stack must define an input function for
75  * tfb_do_queued_segments(). This function understands
76  * how to dequeue a array of packets that were input and
77  * knows how to call the correct processing routine.
78  *
79  * Locking in this is important as well so most likely the
80  * stack will need to define the tfb_do_segment_nounlock()
81  * splitting tfb_do_segment() into two parts. The main processing
82  * part that does not unlock the INP and returns a value of 1 or 0.
83  * It returns 0 if all is well and the lock was not released. It
84  * returns 1 if we had to destroy the TCB (a reset received etc).
85  * The remains of tfb_do_segment() then become just a simple call
86  * to the tfb_do_segment_nounlock() function and check the return
87  * code and possibly unlock.
88  *
89  * The stack must also set the flag on the INP that it supports this
90  * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
91  * this flag as well and will queue packets when it is set.
92  * There are other flags as well INP_MBUF_QUEUE_READY and
93  * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
94  * that we are in the pacer for output so there is no
95  * need to wake up the hpts system to get immediate
96  * input. The second tells the LRO code that its okay
97  * if a SACK arrives you can still defer input and let
98  * the current hpts timer run (this is usually set when
99  * a rack timer is up so we know SACK's are happening
100  * on the connection already and don't want to wakeup yet).
101  *
102  * There is a common functions within the rack_bbr_common code
103  * version i.e. ctf_do_queued_segments(). This function
104  * knows how to take the input queue of packets from
105  * tp->t_in_pkts and process them digging out
106  * all the arguments, calling any bpf tap and
107  * calling into tfb_do_segment_nounlock(). The common
108  * function (ctf_do_queued_segments())  requires that
109  * you have defined the tfb_do_segment_nounlock() as
110  * described above.
111  *
112  * The second feature of the input side of hpts is the
113  * dropping of a connection. This is due to the way that
114  * locking may have occured on the INP_WLOCK. So if
115  * a stack wants to drop a connection it calls:
116  *
117  *     tcp_set_inp_to_drop(tp, ETIMEDOUT)
118  *
119  * To schedule the tcp_hpts system to call
120  *
121  *    tcp_drop(tp, drop_reason)
122  *
123  * at a future point. This is quite handy to prevent locking
124  * issues when dropping connections.
125  *
126  */
127
128 #include <sys/param.h>
129 #include <sys/bus.h>
130 #include <sys/interrupt.h>
131 #include <sys/module.h>
132 #include <sys/kernel.h>
133 #include <sys/hhook.h>
134 #include <sys/malloc.h>
135 #include <sys/mbuf.h>
136 #include <sys/proc.h>           /* for proc0 declaration */
137 #include <sys/socket.h>
138 #include <sys/socketvar.h>
139 #include <sys/sysctl.h>
140 #include <sys/systm.h>
141 #include <sys/refcount.h>
142 #include <sys/sched.h>
143 #include <sys/queue.h>
144 #include <sys/smp.h>
145 #include <sys/counter.h>
146 #include <sys/time.h>
147 #include <sys/kthread.h>
148 #include <sys/kern_prefetch.h>
149
150 #include <vm/uma.h>
151 #include <vm/vm.h>
152
153 #include <net/route.h>
154 #include <net/vnet.h>
155
156 #ifdef RSS
157 #include <net/netisr.h>
158 #include <net/rss_config.h>
159 #endif
160
161 #define TCPSTATES               /* for logging */
162
163 #include <netinet/in.h>
164 #include <netinet/in_kdtrace.h>
165 #include <netinet/in_pcb.h>
166 #include <netinet/ip.h>
167 #include <netinet/ip_icmp.h>    /* required for icmp_var.h */
168 #include <netinet/icmp_var.h>   /* for ICMP_BANDLIM */
169 #include <netinet/ip_var.h>
170 #include <netinet/ip6.h>
171 #include <netinet6/in6_pcb.h>
172 #include <netinet6/ip6_var.h>
173 #include <netinet/tcp.h>
174 #include <netinet/tcp_fsm.h>
175 #include <netinet/tcp_seq.h>
176 #include <netinet/tcp_timer.h>
177 #include <netinet/tcp_var.h>
178 #include <netinet/tcpip.h>
179 #include <netinet/cc/cc.h>
180 #include <netinet/tcp_hpts.h>
181 #include <netinet/tcp_log_buf.h>
182
183 #ifdef tcpdebug
184 #include <netinet/tcp_debug.h>
185 #endif                          /* tcpdebug */
186 #ifdef tcp_offload
187 #include <netinet/tcp_offload.h>
188 #endif
189
190 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
191 #ifdef RSS
192 static int tcp_bind_threads = 1;
193 #else
194 static int tcp_bind_threads = 2;
195 #endif
196 static int tcp_use_irq_cpu = 0;
197 static struct tcp_hptsi tcp_pace;
198 static uint32_t *cts_last_ran;
199 static int hpts_does_tp_logging = 0;
200 static int hpts_use_assigned_cpu = 1;
201 static int32_t hpts_uses_oldest = OLDEST_THRESHOLD;
202
203 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
204 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
205 static void tcp_hpts_thread(void *ctx);
206 static void tcp_init_hptsi(void *st);
207
208 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
209 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
210 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
211 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
212
213
214
215 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
216     "TCP Hpts controls");
217 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
218     "TCP Hpts statistics");
219
220 #define timersub(tvp, uvp, vvp)                                         \
221         do {                                                            \
222                 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;          \
223                 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;       \
224                 if ((vvp)->tv_usec < 0) {                               \
225                         (vvp)->tv_sec--;                                \
226                         (vvp)->tv_usec += 1000000;                      \
227                 }                                                       \
228         } while (0)
229
230 static int32_t tcp_hpts_precision = 120;
231
232 struct hpts_domain_info {
233         int count;
234         int cpu[MAXCPU];
235 };
236
237 struct hpts_domain_info hpts_domains[MAXMEMDOM];
238
239 counter_u64_t hpts_hopelessly_behind;
240
241 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
242     &hpts_hopelessly_behind,
243     "Number of times hpts could not catch up and was behind hopelessly");
244
245 counter_u64_t hpts_loops;
246
247 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
248     &hpts_loops, "Number of times hpts had to loop to catch up");
249
250 counter_u64_t back_tosleep;
251
252 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
253     &back_tosleep, "Number of times hpts found no tcbs");
254
255 counter_u64_t combined_wheel_wrap;
256
257 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
258     &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
259
260 counter_u64_t wheel_wrap;
261
262 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
263     &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
264
265 counter_u64_t hpts_direct_call;
266 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
267     &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
268
269 counter_u64_t hpts_wake_timeout;
270
271 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
272     &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
273
274 counter_u64_t hpts_direct_awakening;
275
276 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
277     &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
278
279 counter_u64_t hpts_back_tosleep;
280
281 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
282     &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
283
284 counter_u64_t cpu_uses_flowid;
285 counter_u64_t cpu_uses_random;
286
287 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
288     &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
289 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
290     &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
291
292 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
293 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
294 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
295     &tcp_bind_threads, 2,
296     "Thread Binding tunable");
297 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
298     &tcp_use_irq_cpu, 0,
299     "Use of irq CPU  tunable");
300 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
301     &tcp_hpts_precision, 120,
302     "Value for PRE() precision of callout");
303 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
304     &conn_cnt_thresh, 0,
305     "How many connections (below) make us use the callout based mechanism");
306 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
307     &hpts_does_tp_logging, 0,
308     "Do we add to any tp that has logging on pacer logs");
309 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_assigned_cpu, CTLFLAG_RW,
310     &hpts_use_assigned_cpu, 0,
311     "Do we start any hpts timer on the assigned cpu?");
312 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_oldest, CTLFLAG_RW,
313     &hpts_uses_oldest, OLDEST_THRESHOLD,
314     "Do syscalls look for the hpts that has been the longest since running (or just use cpu no if 0)?");
315 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
316     &dynamic_min_sleep, 250,
317     "What is the dynamic minsleep value?");
318 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
319     &dynamic_max_sleep, 5000,
320     "What is the dynamic maxsleep value?");
321
322
323
324
325
326 static int32_t max_pacer_loops = 10;
327 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
328     &max_pacer_loops, 10,
329     "What is the maximum number of times the pacer will loop trying to catch up");
330
331 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
332
333 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
334
335 static int
336 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
337 {
338         int error;
339         uint32_t new;
340
341         new = hpts_sleep_max;
342         error = sysctl_handle_int(oidp, &new, 0, req);
343         if (error == 0 && req->newptr) {
344                 if ((new < dynamic_min_sleep) ||
345                     (new > HPTS_MAX_SLEEP_ALLOWED))
346                         error = EINVAL;
347                 else
348                         hpts_sleep_max = new;
349         }
350         return (error);
351 }
352
353 static int
354 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
355 {
356         int error;
357         uint32_t new;
358
359         new = tcp_min_hptsi_time;
360         error = sysctl_handle_int(oidp, &new, 0, req);
361         if (error == 0 && req->newptr) {
362                 if (new < LOWEST_SLEEP_ALLOWED)
363                         error = EINVAL;
364                 else
365                         tcp_min_hptsi_time = new;
366         }
367         return (error);
368 }
369
370 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
371     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
372     &hpts_sleep_max, 0,
373     &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
374     "Maximum time hpts will sleep");
375
376 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
377     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
378     &tcp_min_hptsi_time, 0,
379     &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
380     "The minimum time the hpts must sleep before processing more slots");
381
382 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
383 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
384 static int tcp_hpts_no_wake_over_thresh = 1;
385
386 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
387     &ticks_indicate_more_sleep, 0,
388     "If we only process this many or less on a timeout, we need longer sleep on the next callout");
389 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
390     &ticks_indicate_less_sleep, 0,
391     "If we process this many or more on a timeout, we need less sleep on the next callout");
392 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
393     &tcp_hpts_no_wake_over_thresh, 0,
394     "When we are over the threshold on the pacer do we prohibit wakeups?");
395
396 static void
397 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
398              int slots_to_run, int idx, int from_callout)
399 {
400         union tcp_log_stackspecific log;
401         /*
402          * Unused logs are
403          * 64 bit - delRate, rttProp, bw_inuse
404          * 16 bit - cwnd_gain
405          *  8 bit - bbr_state, bbr_substate, inhpts, ininput;
406          */
407         memset(&log.u_bbr, 0, sizeof(log.u_bbr));
408         log.u_bbr.flex1 = hpts->p_nxt_slot;
409         log.u_bbr.flex2 = hpts->p_cur_slot;
410         log.u_bbr.flex3 = hpts->p_prev_slot;
411         log.u_bbr.flex4 = idx;
412         log.u_bbr.flex5 = hpts->p_curtick;
413         log.u_bbr.flex6 = hpts->p_on_queue_cnt;
414         log.u_bbr.flex7 = hpts->p_cpu;
415         log.u_bbr.flex8 = (uint8_t)from_callout;
416         log.u_bbr.inflight = slots_to_run;
417         log.u_bbr.applimited = hpts->overidden_sleep;
418         log.u_bbr.delivered = hpts->saved_curtick;
419         log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
420         log.u_bbr.epoch = hpts->saved_curslot;
421         log.u_bbr.lt_epoch = hpts->saved_prev_slot;
422         log.u_bbr.pkts_out = hpts->p_delayed_by;
423         log.u_bbr.lost = hpts->p_hpts_sleep_time;
424         log.u_bbr.pacing_gain = hpts->p_cpu;
425         log.u_bbr.pkt_epoch = hpts->p_runningslot;
426         log.u_bbr.use_lt_bw = 1;
427         TCP_LOG_EVENTP(tp, NULL,
428                        &tp->t_inpcb->inp_socket->so_rcv,
429                        &tp->t_inpcb->inp_socket->so_snd,
430                        BBR_LOG_HPTSDIAG, 0,
431                        0, &log, false, tv);
432 }
433
434 static void
435 tcp_wakehpts(struct tcp_hpts_entry *hpts)
436 {
437         HPTS_MTX_ASSERT(hpts);
438
439         if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
440                 hpts->p_direct_wake = 0;
441                 return;
442         }
443         if (hpts->p_hpts_wake_scheduled == 0) {
444                 hpts->p_hpts_wake_scheduled = 1;
445                 swi_sched(hpts->ie_cookie, 0);
446         }
447 }
448
449 static void
450 hpts_timeout_swi(void *arg)
451 {
452         struct tcp_hpts_entry *hpts;
453
454         hpts = (struct tcp_hpts_entry *)arg;
455         swi_sched(hpts->ie_cookie, 0);
456 }
457
458 static inline void
459 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
460 {
461         HPTS_MTX_ASSERT(hpts);
462         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu, ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
463         KASSERT(inp->inp_in_hpts != 0, ("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp));
464         TAILQ_REMOVE(head, inp, inp_hpts);
465         hpts->p_on_queue_cnt--;
466         KASSERT(hpts->p_on_queue_cnt >= 0,
467                 ("Hpts goes negative inp:%p hpts:%p",
468                  inp, hpts));
469         if (clear) {
470                 inp->inp_hpts_request = 0;
471                 inp->inp_in_hpts = 0;
472         }
473 }
474
475 static inline void
476 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
477 {
478         HPTS_MTX_ASSERT(hpts);
479         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
480                 ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
481         KASSERT(((noref == 1) && (inp->inp_in_hpts == 1)) ||
482                 ((noref == 0) && (inp->inp_in_hpts == 0)),
483                 ("%s: hpts:%p inp:%p already on the hpts?",
484                  __FUNCTION__, hpts, inp));
485         TAILQ_INSERT_TAIL(head, inp, inp_hpts);
486         inp->inp_in_hpts = 1;
487         hpts->p_on_queue_cnt++;
488         if (noref == 0) {
489                 in_pcbref(inp);
490         }
491 }
492
493 static inline void
494 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
495 {
496         HPTS_MTX_ASSERT(hpts);
497         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
498                 ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
499         KASSERT(inp->inp_in_input != 0,
500                 ("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp));
501         TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
502         hpts->p_on_inqueue_cnt--;
503         KASSERT(hpts->p_on_inqueue_cnt >= 0,
504                 ("Hpts in goes negative inp:%p hpts:%p",
505                  inp, hpts));
506         KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
507                  ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
508                 ("%s hpts:%p input cnt (p_on_inqueue):%d and queue state mismatch",
509                  __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
510         if (clear)
511                 inp->inp_in_input = 0;
512 }
513
514 static inline void
515 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
516 {
517         HPTS_MTX_ASSERT(hpts);
518         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
519                 ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
520         KASSERT(inp->inp_in_input == 0,
521                 ("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp));
522         TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
523         inp->inp_in_input = 1;
524         hpts->p_on_inqueue_cnt++;
525         in_pcbref(inp);
526 }
527
528 struct tcp_hpts_entry *
529 tcp_cur_hpts(struct inpcb *inp)
530 {
531         int32_t hpts_num;
532         struct tcp_hpts_entry *hpts;
533
534         hpts_num = inp->inp_hpts_cpu;
535         hpts = tcp_pace.rp_ent[hpts_num];
536         return (hpts);
537 }
538
539 struct tcp_hpts_entry *
540 tcp_hpts_lock(struct inpcb *inp)
541 {
542         struct tcp_hpts_entry *hpts;
543         int32_t hpts_num;
544
545 again:
546         hpts_num = inp->inp_hpts_cpu;
547         hpts = tcp_pace.rp_ent[hpts_num];
548         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
549                 ("Hpts:%p owns mtx prior-to lock line:%d",
550                  hpts, __LINE__));
551         mtx_lock(&hpts->p_mtx);
552         if (hpts_num != inp->inp_hpts_cpu) {
553                 mtx_unlock(&hpts->p_mtx);
554                 goto again;
555         }
556         return (hpts);
557 }
558
559 struct tcp_hpts_entry *
560 tcp_input_lock(struct inpcb *inp)
561 {
562         struct tcp_hpts_entry *hpts;
563         int32_t hpts_num;
564
565 again:
566         hpts_num = inp->inp_input_cpu;
567         hpts = tcp_pace.rp_ent[hpts_num];
568         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
569                 ("Hpts:%p owns mtx prior-to lock line:%d",
570                 hpts, __LINE__));
571         mtx_lock(&hpts->p_mtx);
572         if (hpts_num != inp->inp_input_cpu) {
573                 mtx_unlock(&hpts->p_mtx);
574                 goto again;
575         }
576         return (hpts);
577 }
578
579 static void
580 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
581 {
582         int32_t add_freed;
583         int32_t ret;
584
585         if (inp->inp_flags2 & INP_FREED) {
586                 /*
587                  * Need to play a special trick so that in_pcbrele_wlocked
588                  * does not return 1 when it really should have returned 0.
589                  */
590                 add_freed = 1;
591                 inp->inp_flags2 &= ~INP_FREED;
592         } else {
593                 add_freed = 0;
594         }
595 #ifndef INP_REF_DEBUG
596         ret = in_pcbrele_wlocked(inp);
597 #else
598         ret = __in_pcbrele_wlocked(inp, line);
599 #endif
600         KASSERT(ret != 1, ("inpcb:%p release ret 1", inp));
601         if (add_freed) {
602                 inp->inp_flags2 |= INP_FREED;
603         }
604 }
605
606 static void
607 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
608 {
609         if (inp->inp_in_hpts) {
610                 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
611                 tcp_remove_hpts_ref(inp, hpts, line);
612         }
613 }
614
615 static void
616 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
617 {
618         HPTS_MTX_ASSERT(hpts);
619         if (inp->inp_in_input) {
620                 hpts_sane_input_remove(hpts, inp, 1);
621                 tcp_remove_hpts_ref(inp, hpts, line);
622         }
623 }
624
625 /*
626  * Called normally with the INP_LOCKED but it
627  * does not matter, the hpts lock is the key
628  * but the lock order allows us to hold the
629  * INP lock and then get the hpts lock.
630  *
631  * Valid values in the flags are
632  * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
633  * HPTS_REMOVE_INPUT - remove from the input of the hpts.
634  * Note that you can use one or both values together
635  * and get two actions.
636  */
637 void
638 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
639 {
640         struct tcp_hpts_entry *hpts;
641
642         INP_WLOCK_ASSERT(inp);
643         if (flags & HPTS_REMOVE_OUTPUT) {
644                 hpts = tcp_hpts_lock(inp);
645                 tcp_hpts_remove_locked_output(hpts, inp, flags, line);
646                 mtx_unlock(&hpts->p_mtx);
647         }
648         if (flags & HPTS_REMOVE_INPUT) {
649                 hpts = tcp_input_lock(inp);
650                 tcp_hpts_remove_locked_input(hpts, inp, flags, line);
651                 mtx_unlock(&hpts->p_mtx);
652         }
653 }
654
655 static inline int
656 hpts_slot(uint32_t wheel_slot, uint32_t plus)
657 {
658         /*
659          * Given a slot on the wheel, what slot
660          * is that plus ticks out?
661          */
662         KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
663         return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
664 }
665
666 static inline int
667 tick_to_wheel(uint32_t cts_in_wticks)
668 {
669         /*
670          * Given a timestamp in ticks (so by
671          * default to get it to a real time one
672          * would multiply by 10.. i.e the number
673          * of ticks in a slot) map it to our limited
674          * space wheel.
675          */
676         return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
677 }
678
679 static inline int
680 hpts_slots_diff(int prev_slot, int slot_now)
681 {
682         /*
683          * Given two slots that are someplace
684          * on our wheel. How far are they apart?
685          */
686         if (slot_now > prev_slot)
687                 return (slot_now - prev_slot);
688         else if (slot_now == prev_slot)
689                 /*
690                  * Special case, same means we can go all of our
691                  * wheel less one slot.
692                  */
693                 return (NUM_OF_HPTSI_SLOTS - 1);
694         else
695                 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
696 }
697
698 /*
699  * Given a slot on the wheel that is the current time
700  * mapped to the wheel (wheel_slot), what is the maximum
701  * distance forward that can be obtained without
702  * wrapping past either prev_slot or running_slot
703  * depending on the htps state? Also if passed
704  * a uint32_t *, fill it with the slot location.
705  *
706  * Note if you do not give this function the current
707  * time (that you think it is) mapped to the wheel slot
708  * then the results will not be what you expect and
709  * could lead to invalid inserts.
710  */
711 static inline int32_t
712 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
713 {
714         uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
715
716         if ((hpts->p_hpts_active == 1) &&
717             (hpts->p_wheel_complete == 0)) {
718                 end_slot = hpts->p_runningslot;
719                 /* Back up one tick */
720                 if (end_slot == 0)
721                         end_slot = NUM_OF_HPTSI_SLOTS - 1;
722                 else
723                         end_slot--;
724                 if (target_slot)
725                         *target_slot = end_slot;
726         } else {
727                 /*
728                  * For the case where we are
729                  * not active, or we have
730                  * completed the pass over
731                  * the wheel, we can use the
732                  * prev tick and subtract one from it. This puts us
733                  * as far out as possible on the wheel.
734                  */
735                 end_slot = hpts->p_prev_slot;
736                 if (end_slot == 0)
737                         end_slot = NUM_OF_HPTSI_SLOTS - 1;
738                 else
739                         end_slot--;
740                 if (target_slot)
741                         *target_slot = end_slot;
742                 /*
743                  * Now we have close to the full wheel left minus the
744                  * time it has been since the pacer went to sleep. Note
745                  * that wheel_tick, passed in, should be the current time
746                  * from the perspective of the caller, mapped to the wheel.
747                  */
748                 if (hpts->p_prev_slot != wheel_slot)
749                         dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
750                 else
751                         dis_to_travel = 1;
752                 /*
753                  * dis_to_travel in this case is the space from when the
754                  * pacer stopped (p_prev_slot) and where our wheel_slot
755                  * is now. To know how many slots we can put it in we
756                  * subtract from the wheel size. We would not want
757                  * to place something after p_prev_slot or it will
758                  * get ran too soon.
759                  */
760                 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
761         }
762         /*
763          * So how many slots are open between p_runningslot -> p_cur_slot
764          * that is what is currently un-available for insertion. Special
765          * case when we are at the last slot, this gets 1, so that
766          * the answer to how many slots are available is all but 1.
767          */
768         if (hpts->p_runningslot == hpts->p_cur_slot)
769                 dis_to_travel = 1;
770         else
771                 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
772         /*
773          * How long has the pacer been running?
774          */
775         if (hpts->p_cur_slot != wheel_slot) {
776                 /* The pacer is a bit late */
777                 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
778         } else {
779                 /* The pacer is right on time, now == pacers start time */
780                 pacer_to_now = 0;
781         }
782         /*
783          * To get the number left we can insert into we simply
784          * subtract the distance the pacer has to run from how
785          * many slots there are.
786          */
787         avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
788         /*
789          * Now how many of those we will eat due to the pacer's
790          * time (p_cur_slot) of start being behind the
791          * real time (wheel_slot)?
792          */
793         if (avail_on_wheel <= pacer_to_now) {
794                 /*
795                  * Wheel wrap, we can't fit on the wheel, that
796                  * is unusual the system must be way overloaded!
797                  * Insert into the assured slot, and return special
798                  * "0".
799                  */
800                 counter_u64_add(combined_wheel_wrap, 1);
801                 *target_slot = hpts->p_nxt_slot;
802                 return (0);
803         } else {
804                 /*
805                  * We know how many slots are open
806                  * on the wheel (the reverse of what
807                  * is left to run. Take away the time
808                  * the pacer started to now (wheel_slot)
809                  * and that tells you how many slots are
810                  * open that can be inserted into that won't
811                  * be touched by the pacer until later.
812                  */
813                 return (avail_on_wheel - pacer_to_now);
814         }
815 }
816
817 static int
818 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
819 {
820         uint32_t need_wake = 0;
821
822         HPTS_MTX_ASSERT(hpts);
823         if (inp->inp_in_hpts == 0) {
824                 /* Ok we need to set it on the hpts in the current slot */
825                 inp->inp_hpts_request = 0;
826                 if ((hpts->p_hpts_active == 0) ||
827                     (hpts->p_wheel_complete)) {
828                         /*
829                          * A sleeping hpts we want in next slot to run
830                          * note that in this state p_prev_slot == p_cur_slot
831                          */
832                         inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
833                         if ((hpts->p_on_min_sleep == 0) && (hpts->p_hpts_active == 0))
834                                 need_wake = 1;
835                 } else if ((void *)inp == hpts->p_inp) {
836                         /*
837                          * The hpts system is running and the caller
838                          * was awoken by the hpts system.
839                          * We can't allow you to go into the same slot we
840                          * are in (we don't want a loop :-D).
841                          */
842                         inp->inp_hptsslot = hpts->p_nxt_slot;
843                 } else
844                         inp->inp_hptsslot = hpts->p_runningslot;
845                 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
846                 if (need_wake) {
847                         /*
848                          * Activate the hpts if it is sleeping and its
849                          * timeout is not 1.
850                          */
851                         hpts->p_direct_wake = 1;
852                         tcp_wakehpts(hpts);
853                 }
854         }
855         return (need_wake);
856 }
857
858 int
859 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
860 {
861         int32_t ret;
862         struct tcp_hpts_entry *hpts;
863
864         INP_WLOCK_ASSERT(inp);
865         hpts = tcp_hpts_lock(inp);
866         ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
867         mtx_unlock(&hpts->p_mtx);
868         return (ret);
869 }
870
871 #ifdef INVARIANTS
872 static void
873 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
874 {
875         /*
876          * Sanity checks for the pacer with invariants
877          * on insert.
878          */
879         KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
880                 ("hpts:%p inp:%p slot:%d > max",
881                  hpts, inp, inp_hptsslot));
882         if ((hpts->p_hpts_active) &&
883             (hpts->p_wheel_complete == 0)) {
884                 /*
885                  * If the pacer is processing a arc
886                  * of the wheel, we need to make
887                  * sure we are not inserting within
888                  * that arc.
889                  */
890                 int distance, yet_to_run;
891
892                 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
893                 if (hpts->p_runningslot != hpts->p_cur_slot)
894                         yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
895                 else
896                         yet_to_run = 0; /* processing last slot */
897                 KASSERT(yet_to_run <= distance,
898                         ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
899                          hpts, inp, inp_hptsslot,
900                          distance, yet_to_run,
901                          hpts->p_runningslot, hpts->p_cur_slot));
902         }
903 }
904 #endif
905
906 static void
907 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, int32_t line,
908                        struct hpts_diag *diag, struct timeval *tv)
909 {
910         uint32_t need_new_to = 0;
911         uint32_t wheel_cts; 
912         int32_t wheel_slot, maxslots, last_slot;
913         int cpu;
914         int8_t need_wakeup = 0;
915
916         HPTS_MTX_ASSERT(hpts);
917         if (diag) {
918                 memset(diag, 0, sizeof(struct hpts_diag));
919                 diag->p_hpts_active = hpts->p_hpts_active;
920                 diag->p_prev_slot = hpts->p_prev_slot;
921                 diag->p_runningslot = hpts->p_runningslot;
922                 diag->p_nxt_slot = hpts->p_nxt_slot;
923                 diag->p_cur_slot = hpts->p_cur_slot;
924                 diag->p_curtick = hpts->p_curtick;
925                 diag->p_lasttick = hpts->p_lasttick;
926                 diag->slot_req = slot;
927                 diag->p_on_min_sleep = hpts->p_on_min_sleep;
928                 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
929         }
930         KASSERT(inp->inp_in_hpts == 0, ("Hpts:%p tp:%p already on hpts and add?", hpts, inp));
931         if (slot == 0) {
932                 /* Immediate */
933                 tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
934                 return;
935         }
936         /* Get the current time relative to the wheel */
937         wheel_cts = tcp_tv_to_hptstick(tv);
938         /* Map it onto the wheel */
939         wheel_slot = tick_to_wheel(wheel_cts);
940         /* Now what's the max we can place it at? */
941         maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
942         if (diag) {
943                 diag->wheel_slot = wheel_slot;
944                 diag->maxslots = maxslots;
945                 diag->wheel_cts = wheel_cts;
946         }
947         if (maxslots == 0) {
948                 /* The pacer is in a wheel wrap behind, yikes! */
949                 if (slot > 1) {
950                         /*
951                          * Reduce by 1 to prevent a forever loop in
952                          * case something else is wrong. Note this
953                          * probably does not hurt because the pacer
954                          * if its true is so far behind we will be
955                          * > 1second late calling anyway.
956                          */
957                         slot--;
958                 }
959                 inp->inp_hptsslot = last_slot;
960                 inp->inp_hpts_request = slot;
961         } else  if (maxslots >= slot) {
962                 /* It all fits on the wheel */
963                 inp->inp_hpts_request = 0;
964                 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
965         } else {
966                 /* It does not fit */
967                 inp->inp_hpts_request = slot - maxslots;
968                 inp->inp_hptsslot = last_slot;
969         }
970         if (diag) {
971                 diag->slot_remaining = inp->inp_hpts_request;
972                 diag->inp_hptsslot = inp->inp_hptsslot;
973         }
974 #ifdef INVARIANTS
975         check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
976 #endif
977         hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, 0);
978         if ((hpts->p_hpts_active == 0) &&
979             (inp->inp_hpts_request == 0) &&
980             (hpts->p_on_min_sleep == 0)) {
981                 /*
982                  * The hpts is sleeping and NOT on a minimum
983                  * sleep time, we need to figure out where
984                  * it will wake up at and if we need to reschedule
985                  * its time-out.
986                  */
987                 uint32_t have_slept, yet_to_sleep;
988
989                 /* Now do we need to restart the hpts's timer? */
990                 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
991                 if (have_slept < hpts->p_hpts_sleep_time)
992                         yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
993                 else {
994                         /* We are over-due */
995                         yet_to_sleep = 0;
996                         need_wakeup = 1;
997                 }
998                 if (diag) {
999                         diag->have_slept = have_slept;
1000                         diag->yet_to_sleep = yet_to_sleep;
1001                 }
1002                 if (yet_to_sleep &&
1003                     (yet_to_sleep > slot)) {
1004                         /*
1005                          * We need to reschedule the hpts's time-out.
1006                          */
1007                         hpts->p_hpts_sleep_time = slot;
1008                         need_new_to = slot * HPTS_TICKS_PER_SLOT;
1009                 }
1010         }
1011         /*
1012          * Now how far is the hpts sleeping to? if active is 1, its
1013          * up and ticking we do nothing, otherwise we may need to
1014          * reschedule its callout if need_new_to is set from above.
1015          */
1016         if (need_wakeup) {
1017                 hpts->p_direct_wake = 1;
1018                 tcp_wakehpts(hpts);
1019                 if (diag) {
1020                         diag->need_new_to = 0;
1021                         diag->co_ret = 0xffff0000;
1022                 }
1023         } else if (need_new_to) {
1024                 int32_t co_ret;
1025                 struct timeval tv;
1026                 sbintime_t sb;
1027
1028                 tv.tv_sec = 0;
1029                 tv.tv_usec = 0;
1030                 while (need_new_to > HPTS_USEC_IN_SEC) {
1031                         tv.tv_sec++;
1032                         need_new_to -= HPTS_USEC_IN_SEC;
1033                 }
1034                 tv.tv_usec = need_new_to;
1035                 sb = tvtosbt(tv);
1036                 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
1037                 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
1038                                               hpts_timeout_swi, hpts, cpu,
1039                                               (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1040                 if (diag) {
1041                         diag->need_new_to = need_new_to;
1042                         diag->co_ret = co_ret;
1043                 }
1044         }
1045 }
1046
1047 uint32_t
1048 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
1049 {
1050         struct tcp_hpts_entry *hpts;
1051         uint32_t slot_on;
1052         struct timeval tv;
1053
1054         /*
1055          * We now return the next-slot the hpts will be on, beyond its
1056          * current run (if up) or where it was when it stopped if it is
1057          * sleeping.
1058          */
1059         INP_WLOCK_ASSERT(inp);
1060         hpts = tcp_hpts_lock(inp);
1061         microuptime(&tv);
1062         tcp_hpts_insert_locked(hpts, inp, slot, line, diag, &tv);
1063         slot_on = hpts->p_nxt_slot;
1064         mtx_unlock(&hpts->p_mtx);
1065         return (slot_on);
1066 }
1067
1068 uint32_t
1069 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
1070         return (tcp_hpts_insert_diag(inp, slot, line, NULL));
1071 }
1072
1073 int
1074 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
1075 {
1076         int32_t retval = 0;
1077
1078         HPTS_MTX_ASSERT(hpts);
1079         if (inp->inp_in_input == 0) {
1080                 /* Ok we need to set it on the hpts in the current slot */
1081                 hpts_sane_input_insert(hpts, inp, line);
1082                 retval = 1;
1083                 if ((hpts->p_hpts_active == 0) &&
1084                     (hpts->p_on_min_sleep == 0)){
1085                         /*
1086                          * Activate the hpts if it is sleeping.
1087                          */
1088                         retval = 2;
1089                         hpts->p_direct_wake = 1;
1090                         tcp_wakehpts(hpts);
1091                 }
1092         } else if ((hpts->p_hpts_active == 0) &&
1093                    (hpts->p_on_min_sleep == 0)){
1094                 retval = 4;
1095                 hpts->p_direct_wake = 1;
1096                 tcp_wakehpts(hpts);
1097         }
1098         return (retval);
1099 }
1100
1101 int32_t
1102 __tcp_queue_to_input(struct inpcb *inp, int line)
1103 {
1104         struct tcp_hpts_entry *hpts;
1105         int32_t ret;
1106
1107         hpts = tcp_input_lock(inp);
1108         ret = __tcp_queue_to_input_locked(inp, hpts, line);
1109         mtx_unlock(&hpts->p_mtx);
1110         return (ret);
1111 }
1112
1113 void
1114 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1115 {
1116         struct tcp_hpts_entry *hpts;
1117         struct tcpcb *tp;
1118
1119         tp = intotcpcb(inp);
1120         hpts = tcp_input_lock(tp->t_inpcb);
1121         if (inp->inp_in_input == 0) {
1122                 /* Ok we need to set it on the hpts in the current slot */
1123                 hpts_sane_input_insert(hpts, inp, line);
1124                 if ((hpts->p_hpts_active == 0) &&
1125                     (hpts->p_on_min_sleep == 0)){
1126                         /*
1127                          * Activate the hpts if it is sleeping.
1128                          */
1129                         hpts->p_direct_wake = 1;
1130                         tcp_wakehpts(hpts);
1131                 }
1132         } else if ((hpts->p_hpts_active == 0) &&
1133                    (hpts->p_on_min_sleep == 0)){
1134                 hpts->p_direct_wake = 1;
1135                 tcp_wakehpts(hpts);
1136         }
1137         inp->inp_hpts_drop_reas = reason;
1138         mtx_unlock(&hpts->p_mtx);
1139 }
1140
1141 uint16_t
1142 hpts_random_cpu(struct inpcb *inp){
1143         /*
1144          * No flow type set distribute the load randomly.
1145          */
1146         uint16_t cpuid;
1147         uint32_t ran;
1148
1149         /*
1150          * If one has been set use it i.e. we want both in and out on the
1151          * same hpts.
1152          */
1153         if (inp->inp_input_cpu_set) {
1154                 return (inp->inp_input_cpu);
1155         } else if (inp->inp_hpts_cpu_set) {
1156                 return (inp->inp_hpts_cpu);
1157         }
1158         /* Nothing set use a random number */
1159         ran = arc4random();
1160         cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
1161         return (cpuid);
1162 }
1163
1164 static uint16_t
1165 hpts_cpuid(struct inpcb *inp, int *failed)
1166 {
1167         u_int cpuid;
1168 #ifdef NUMA
1169         struct hpts_domain_info *di;
1170 #endif
1171
1172         *failed = 0;
1173         /*
1174          * If one has been set use it i.e. we want both in and out on the
1175          * same hpts.
1176          */
1177         if (inp->inp_input_cpu_set) {
1178                 return (inp->inp_input_cpu);
1179         } else if (inp->inp_hpts_cpu_set) {
1180                 return (inp->inp_hpts_cpu);
1181         }
1182         /*
1183          * If we are using the irq cpu set by LRO or
1184          * the driver then it overrides all other domains.
1185          */
1186         if (tcp_use_irq_cpu) {
1187                 if (inp->inp_irq_cpu_set == 0) {
1188                         *failed = 1;
1189                         return(0);
1190                 }
1191                 return(inp->inp_irq_cpu);
1192         }
1193         /* If one is set the other must be the same */
1194 #ifdef RSS
1195         cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1196         if (cpuid == NETISR_CPUID_NONE)
1197                 return (hpts_random_cpu(inp));
1198         else
1199                 return (cpuid);
1200 #endif
1201         /*
1202          * We don't have a flowid -> cpuid mapping, so cheat and just map
1203          * unknown cpuids to curcpu.  Not the best, but apparently better
1204          * than defaulting to swi 0.
1205          */
1206         if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1207                 counter_u64_add(cpu_uses_random, 1);
1208                 return (hpts_random_cpu(inp));
1209         }
1210         /*
1211          * Hash to a thread based on the flowid.  If we are using numa,
1212          * then restrict the hash to the numa domain where the inp lives.
1213          */
1214 #ifdef NUMA
1215         if (tcp_bind_threads == 2 && inp->inp_numa_domain != M_NODOM) {
1216                 di = &hpts_domains[inp->inp_numa_domain];
1217                 cpuid = di->cpu[inp->inp_flowid % di->count];
1218         } else
1219 #endif
1220                 cpuid = inp->inp_flowid % mp_ncpus;
1221         counter_u64_add(cpu_uses_flowid, 1);
1222         return (cpuid);
1223 }
1224
1225 static void
1226 tcp_drop_in_pkts(struct tcpcb *tp)
1227 {
1228         struct mbuf *m, *n;
1229
1230         m = tp->t_in_pkt;
1231         if (m)
1232                 n = m->m_nextpkt;
1233         else
1234                 n = NULL;
1235         tp->t_in_pkt = NULL;
1236         while (m) {
1237                 m_freem(m);
1238                 m = n;
1239                 if (m)
1240                         n = m->m_nextpkt;
1241         }
1242 }
1243
1244 /*
1245  * Do NOT try to optimize the processing of inp's
1246  * by first pulling off all the inp's into a temporary
1247  * list (e.g. TAILQ_CONCAT). If you do that the subtle
1248  * interactions of switching CPU's will kill because of
1249  * problems in the linked list manipulation. Basically
1250  * you would switch cpu's with the hpts mutex locked
1251  * but then while you were processing one of the inp's
1252  * some other one that you switch will get a new
1253  * packet on the different CPU. It will insert it
1254  * on the new hpts's input list. Creating a temporary
1255  * link in the inp will not fix it either, since
1256  * the other hpts will be doing the same thing and
1257  * you will both end up using the temporary link.
1258  *
1259  * You will die in an ASSERT for tailq corruption if you
1260  * run INVARIANTS or you will die horribly without
1261  * INVARIANTS in some unknown way with a corrupt linked
1262  * list.
1263  */
1264 static void
1265 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1266 {
1267         struct tcpcb *tp;
1268         struct inpcb *inp;
1269         uint16_t drop_reason;
1270         int16_t set_cpu;
1271         uint32_t did_prefetch = 0;
1272         int dropped;
1273
1274         HPTS_MTX_ASSERT(hpts);
1275         NET_EPOCH_ASSERT();
1276
1277         while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1278                 HPTS_MTX_ASSERT(hpts);
1279                 hpts_sane_input_remove(hpts, inp, 0);
1280                 if (inp->inp_input_cpu_set == 0) {
1281                         set_cpu = 1;
1282                 } else {
1283                         set_cpu = 0;
1284                 }
1285                 hpts->p_inp = inp;
1286                 drop_reason = inp->inp_hpts_drop_reas;
1287                 inp->inp_in_input = 0;
1288                 mtx_unlock(&hpts->p_mtx);
1289                 INP_WLOCK(inp);
1290 #ifdef VIMAGE
1291                 CURVNET_SET(inp->inp_vnet);
1292 #endif
1293                 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1294                     (inp->inp_flags2 & INP_FREED)) {
1295 out:
1296                         hpts->p_inp = NULL;
1297                         if (in_pcbrele_wlocked(inp) == 0) {
1298                                 INP_WUNLOCK(inp);
1299                         }
1300 #ifdef VIMAGE
1301                         CURVNET_RESTORE();
1302 #endif
1303                         mtx_lock(&hpts->p_mtx);
1304                         continue;
1305                 }
1306                 tp = intotcpcb(inp);
1307                 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1308                         goto out;
1309                 }
1310                 if (drop_reason) {
1311                         /* This tcb is being destroyed for drop_reason */
1312                         tcp_drop_in_pkts(tp);
1313                         tp = tcp_drop(tp, drop_reason);
1314                         if (tp == NULL) {
1315                                 INP_WLOCK(inp);
1316                         }
1317                         if (in_pcbrele_wlocked(inp) == 0)
1318                                 INP_WUNLOCK(inp);
1319 #ifdef VIMAGE
1320                         CURVNET_RESTORE();
1321 #endif
1322                         mtx_lock(&hpts->p_mtx);
1323                         continue;
1324                 }
1325                 if (set_cpu) {
1326                         /*
1327                          * Setup so the next time we will move to the right
1328                          * CPU. This should be a rare event. It will
1329                          * sometimes happens when we are the client side
1330                          * (usually not the server). Somehow tcp_output()
1331                          * gets called before the tcp_do_segment() sets the
1332                          * intial state. This means the r_cpu and r_hpts_cpu
1333                          * is 0. We get on the hpts, and then tcp_input()
1334                          * gets called setting up the r_cpu to the correct
1335                          * value. The hpts goes off and sees the mis-match.
1336                          * We simply correct it here and the CPU will switch
1337                          * to the new hpts nextime the tcb gets added to the
1338                          * the hpts (not this time) :-)
1339                          */
1340                         tcp_set_hpts(inp);
1341                 }
1342                 if (tp->t_fb_ptr != NULL) {
1343                         kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1344                         did_prefetch = 1;
1345                 }
1346                 if ((tp->t_fb->tfb_do_queued_segments != NULL) && tp->t_in_pkt) {
1347                         if (inp->inp_in_input)
1348                                 tcp_hpts_remove(inp, HPTS_REMOVE_INPUT);
1349                         dropped = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1350                         if (dropped) {
1351                                 /* Re-acquire the wlock so we can release the reference */
1352                                 INP_WLOCK(inp);
1353                         }
1354                 } else if (tp->t_in_pkt) {
1355                         /*
1356                          * We reach here only if we had a
1357                          * stack that supported INP_SUPPORTS_MBUFQ
1358                          * and then somehow switched to a stack that
1359                          * does not. The packets are basically stranded
1360                          * and would hang with the connection until
1361                          * cleanup without this code. Its not the
1362                          * best way but I know of no other way to
1363                          * handle it since the stack needs functions
1364                          * it does not have to handle queued packets.
1365                          */
1366                         tcp_drop_in_pkts(tp);
1367                 }
1368                 if (in_pcbrele_wlocked(inp) == 0)
1369                         INP_WUNLOCK(inp);
1370                 INP_UNLOCK_ASSERT(inp);
1371 #ifdef VIMAGE
1372                 CURVNET_RESTORE();
1373 #endif
1374                 mtx_lock(&hpts->p_mtx);
1375                 hpts->p_inp = NULL;
1376         }
1377 }
1378
1379 static void
1380 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1381 {
1382         uint32_t t = 0, i, fnd = 0;
1383
1384         if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1385                 /*
1386                  * Find next slot that is occupied and use that to
1387                  * be the sleep time.
1388                  */
1389                 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1390                         if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1391                                 fnd = 1;
1392                                 break;
1393                         }
1394                         t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1395                 }
1396                 KASSERT(fnd != 0, ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1397                 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1398         } else {
1399                 /* No one on the wheel sleep for all but 400 slots or sleep max  */
1400                 hpts->p_hpts_sleep_time = hpts_sleep_max;
1401         }
1402 }
1403
1404 static int32_t
1405 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1406 {
1407         struct tcpcb *tp;
1408         struct inpcb *inp = NULL, *ninp;
1409         struct timeval tv;
1410         int32_t slots_to_run, i, error;
1411         int32_t loop_cnt = 0;
1412         int32_t did_prefetch = 0;
1413         int32_t prefetch_ninp = 0;
1414         int32_t prefetch_tp = 0;
1415         int32_t wrap_loop_cnt = 0;
1416         int32_t slot_pos_of_endpoint = 0;
1417         int32_t orig_exit_slot;
1418         int16_t set_cpu;
1419         int8_t completed_measure = 0, seen_endpoint = 0;
1420
1421         HPTS_MTX_ASSERT(hpts);
1422         NET_EPOCH_ASSERT();
1423         /* record previous info for any logging */
1424         hpts->saved_lasttick = hpts->p_lasttick;
1425         hpts->saved_curtick = hpts->p_curtick;
1426         hpts->saved_curslot = hpts->p_cur_slot;
1427         hpts->saved_prev_slot = hpts->p_prev_slot;
1428
1429         hpts->p_lasttick = hpts->p_curtick;
1430         hpts->p_curtick = tcp_gethptstick(&tv);
1431         cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1432         orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1433         if ((hpts->p_on_queue_cnt == 0) ||
1434             (hpts->p_lasttick == hpts->p_curtick)) {
1435                 /*
1436                  * No time has yet passed,
1437                  * or nothing to do.
1438                  */
1439                 hpts->p_prev_slot = hpts->p_cur_slot;
1440                 hpts->p_lasttick = hpts->p_curtick;
1441                 goto no_run;
1442         }
1443 again:
1444         hpts->p_wheel_complete = 0;
1445         HPTS_MTX_ASSERT(hpts);
1446         slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1447         if (((hpts->p_curtick - hpts->p_lasttick) >
1448              ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1449             (hpts->p_on_queue_cnt != 0)) {
1450                 /*
1451                  * Wheel wrap is occuring, basically we
1452                  * are behind and the distance between
1453                  * run's has spread so much it has exceeded
1454                  * the time on the wheel (1.024 seconds). This
1455                  * is ugly and should NOT be happening. We
1456                  * need to run the entire wheel. We last processed
1457                  * p_prev_slot, so that needs to be the last slot
1458                  * we run. The next slot after that should be our
1459                  * reserved first slot for new, and then starts
1460                  * the running position. Now the problem is the
1461                  * reserved "not to yet" place does not exist
1462                  * and there may be inp's in there that need
1463                  * running. We can merge those into the
1464                  * first slot at the head.
1465                  */
1466                 wrap_loop_cnt++;
1467                 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1468                 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1469                 /*
1470                  * Adjust p_cur_slot to be where we are starting from
1471                  * hopefully we will catch up (fat chance if something
1472                  * is broken this bad :( )
1473                  */
1474                 hpts->p_cur_slot = hpts->p_prev_slot;
1475                 /*
1476                  * The next slot has guys to run too, and that would
1477                  * be where we would normally start, lets move them into
1478                  * the next slot (p_prev_slot + 2) so that we will
1479                  * run them, the extra 10usecs of late (by being
1480                  * put behind) does not really matter in this situation.
1481                  */
1482 #ifdef INVARIANTS
1483                 /*
1484                  * To prevent a panic we need to update the inpslot to the
1485                  * new location. This is safe since it takes both the
1486                  * INP lock and the pacer mutex to change the inp_hptsslot.
1487                  */
1488                 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot], inp_hpts) {
1489                         inp->inp_hptsslot = hpts->p_runningslot;
1490                 }
1491 #endif
1492                 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot],
1493                              &hpts->p_hptss[hpts->p_nxt_slot], inp_hpts);
1494                 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1495                 counter_u64_add(wheel_wrap, 1);
1496         } else {
1497                 /*
1498                  * Nxt slot is always one after p_runningslot though
1499                  * its not used usually unless we are doing wheel wrap.
1500                  */
1501                 hpts->p_nxt_slot = hpts->p_prev_slot;
1502                 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1503         }
1504         KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
1505                  ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
1506                 ("%s hpts:%p in_hpts cnt:%d and queue state mismatch",
1507                  __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
1508         HPTS_MTX_ASSERT(hpts);
1509         if (hpts->p_on_queue_cnt == 0) {
1510                 goto no_one;
1511         }
1512         HPTS_MTX_ASSERT(hpts);
1513         for (i = 0; i < slots_to_run; i++) {
1514                 /*
1515                  * Calculate our delay, if there are no extra ticks there
1516                  * was not any (i.e. if slots_to_run == 1, no delay).
1517                  */
1518                 hpts->p_delayed_by = (slots_to_run - (i + 1)) * HPTS_TICKS_PER_SLOT;
1519                 HPTS_MTX_ASSERT(hpts);
1520                 while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_runningslot])) != NULL) {
1521                         HPTS_MTX_ASSERT(hpts);
1522                         /* For debugging */
1523                         if (seen_endpoint == 0) {
1524                                 seen_endpoint = 1;
1525                                 orig_exit_slot = slot_pos_of_endpoint = hpts->p_runningslot;
1526                         } else if (completed_measure == 0) {
1527                                 /* Record the new position */
1528                                 orig_exit_slot = hpts->p_runningslot;
1529                         }
1530                         hpts->p_inp = inp;
1531                         KASSERT(hpts->p_runningslot == inp->inp_hptsslot,
1532                                 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1533                                  hpts, inp, hpts->p_runningslot, inp->inp_hptsslot));
1534                         /* Now pull it */
1535                         if (inp->inp_hpts_cpu_set == 0) {
1536                                 set_cpu = 1;
1537                         } else {
1538                                 set_cpu = 0;
1539                         }
1540                         hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_runningslot], 0);
1541                         if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_runningslot])) != NULL) {
1542                                 /* We prefetch the next inp if possible */
1543                                 kern_prefetch(ninp, &prefetch_ninp);
1544                                 prefetch_ninp = 1;
1545                         }
1546                         if (inp->inp_hpts_request) {
1547                                 /*
1548                                  * This guy is deferred out further in time
1549                                  * then our wheel had available on it.
1550                                  * Push him back on the wheel or run it
1551                                  * depending.
1552                                  */
1553                                 uint32_t maxslots, last_slot, remaining_slots;
1554
1555                                 remaining_slots = slots_to_run - (i + 1);
1556                                 if (inp->inp_hpts_request > remaining_slots) {
1557                                         /*
1558                                          * How far out can we go?
1559                                          */
1560                                         maxslots = max_slots_available(hpts, hpts->p_cur_slot, &last_slot);
1561                                         if (maxslots >= inp->inp_hpts_request) {
1562                                                 /* we can place it finally to be processed  */
1563                                                 inp->inp_hptsslot = hpts_slot(hpts->p_runningslot, inp->inp_hpts_request);
1564                                                 inp->inp_hpts_request = 0;
1565                                         } else {
1566                                                 /* Work off some more time */
1567                                                 inp->inp_hptsslot = last_slot;
1568                                                 inp->inp_hpts_request-= maxslots;
1569                                         }
1570                                         hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], __LINE__, 1);
1571                                         hpts->p_inp = NULL;
1572                                         continue;
1573                                 }
1574                                 inp->inp_hpts_request = 0;
1575                                 /* Fall through we will so do it now */
1576                         }
1577                         /*
1578                          * We clear the hpts flag here after dealing with
1579                          * remaining slots. This way anyone looking with the
1580                          * TCB lock will see its on the hpts until just
1581                          * before we unlock.
1582                          */
1583                         inp->inp_in_hpts = 0;
1584                         mtx_unlock(&hpts->p_mtx);
1585                         INP_WLOCK(inp);
1586                         if (in_pcbrele_wlocked(inp)) {
1587                                 mtx_lock(&hpts->p_mtx);
1588                                 hpts->p_inp = NULL;
1589                                 continue;
1590                         }
1591                         if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1592                             (inp->inp_flags2 & INP_FREED)) {
1593                         out_now:
1594                                 KASSERT(mtx_owned(&hpts->p_mtx) == 0,
1595                                         ("Hpts:%p owns mtx prior-to lock line:%d",
1596                                          hpts, __LINE__));
1597                                 INP_WUNLOCK(inp);
1598                                 mtx_lock(&hpts->p_mtx);
1599                                 hpts->p_inp = NULL;
1600                                 continue;
1601                         }
1602                         tp = intotcpcb(inp);
1603                         if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1604                                 goto out_now;
1605                         }
1606                         if (set_cpu) {
1607                                 /*
1608                                  * Setup so the next time we will move to
1609                                  * the right CPU. This should be a rare
1610                                  * event. It will sometimes happens when we
1611                                  * are the client side (usually not the
1612                                  * server). Somehow tcp_output() gets called
1613                                  * before the tcp_do_segment() sets the
1614                                  * intial state. This means the r_cpu and
1615                                  * r_hpts_cpu is 0. We get on the hpts, and
1616                                  * then tcp_input() gets called setting up
1617                                  * the r_cpu to the correct value. The hpts
1618                                  * goes off and sees the mis-match. We
1619                                  * simply correct it here and the CPU will
1620                                  * switch to the new hpts nextime the tcb
1621                                  * gets added to the hpts (not this one)
1622                                  * :-)
1623                                  */
1624                                 tcp_set_hpts(inp);
1625                         }
1626 #ifdef VIMAGE
1627                         CURVNET_SET(inp->inp_vnet);
1628 #endif
1629                         /* Lets do any logging that we might want to */
1630                         if (hpts_does_tp_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1631                                 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1632                         }
1633                         /*
1634                          * There is a hole here, we get the refcnt on the
1635                          * inp so it will still be preserved but to make
1636                          * sure we can get the INP we need to hold the p_mtx
1637                          * above while we pull out the tp/inp,  as long as
1638                          * fini gets the lock first we are assured of having
1639                          * a sane INP we can lock and test.
1640                          */
1641                         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
1642                                 ("Hpts:%p owns mtx prior-to tcp_output call line:%d",
1643                                  hpts, __LINE__));
1644
1645                         if (tp->t_fb_ptr != NULL) {
1646                                 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1647                                 did_prefetch = 1;
1648                         }
1649                         if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && tp->t_in_pkt) {
1650                                 error = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1651                                 if (error) {
1652                                         /* The input killed the connection */
1653                                         goto skip_pacing;
1654                                 }
1655                         }
1656                         inp->inp_hpts_calls = 1;
1657                         error = tp->t_fb->tfb_tcp_output(tp);
1658                         inp->inp_hpts_calls = 0;
1659                         if (ninp && ninp->inp_ppcb) {
1660                                 /*
1661                                  * If we have a nxt inp, see if we can
1662                                  * prefetch its ppcb. Note this may seem
1663                                  * "risky" since we have no locks (other
1664                                  * than the previous inp) and there no
1665                                  * assurance that ninp was not pulled while
1666                                  * we were processing inp and freed. If this
1667                                  * occurred it could mean that either:
1668                                  *
1669                                  * a) Its NULL (which is fine we won't go
1670                                  * here) <or> b) Its valid (which is cool we
1671                                  * will prefetch it) <or> c) The inp got
1672                                  * freed back to the slab which was
1673                                  * reallocated. Then the piece of memory was
1674                                  * re-used and something else (not an
1675                                  * address) is in inp_ppcb. If that occurs
1676                                  * we don't crash, but take a TLB shootdown
1677                                  * performance hit (same as if it was NULL
1678                                  * and we tried to pre-fetch it).
1679                                  *
1680                                  * Considering that the likelyhood of <c> is
1681                                  * quite rare we will take a risk on doing
1682                                  * this. If performance drops after testing
1683                                  * we can always take this out. NB: the
1684                                  * kern_prefetch on amd64 actually has
1685                                  * protection against a bad address now via
1686                                  * the DMAP_() tests. This will prevent the
1687                                  * TLB hit, and instead if <c> occurs just
1688                                  * cause us to load cache with a useless
1689                                  * address (to us).
1690                                  */
1691                                 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1692                                 prefetch_tp = 1;
1693                         }
1694                         INP_WUNLOCK(inp);
1695                 skip_pacing:
1696 #ifdef VIMAGE
1697                         CURVNET_RESTORE();
1698 #endif
1699                         INP_UNLOCK_ASSERT(inp);
1700                         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
1701                                 ("Hpts:%p owns mtx prior-to lock line:%d",
1702                                  hpts, __LINE__));
1703                         mtx_lock(&hpts->p_mtx);
1704                         hpts->p_inp = NULL;
1705                 }
1706                 if (seen_endpoint) {
1707                         /*
1708                          * We now have a accurate distance between
1709                          * slot_pos_of_endpoint <-> orig_exit_slot
1710                          * to tell us how late we were, orig_exit_slot
1711                          * is where we calculated the end of our cycle to
1712                          * be when we first entered.
1713                          */
1714                         completed_measure = 1;
1715                 }
1716                 HPTS_MTX_ASSERT(hpts);
1717                 hpts->p_inp = NULL;
1718                 hpts->p_runningslot++;
1719                 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1720                         hpts->p_runningslot = 0;
1721                 }
1722         }
1723 no_one:
1724         HPTS_MTX_ASSERT(hpts);
1725         hpts->p_delayed_by = 0;
1726         /*
1727          * Check to see if we took an excess amount of time and need to run
1728          * more ticks (if we did not hit eno-bufs).
1729          */
1730         KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
1731                  ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
1732                 ("%s hpts:%p in_hpts cnt:%d queue state mismatch",
1733                  __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
1734         hpts->p_prev_slot = hpts->p_cur_slot;
1735         hpts->p_lasttick = hpts->p_curtick;
1736         if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1737                 /*
1738                  * Something is serious slow we have
1739                  * looped through processing the wheel
1740                  * and by the time we cleared the
1741                  * needs to run max_pacer_loops time
1742                  * we still needed to run. That means
1743                  * the system is hopelessly behind and
1744                  * can never catch up :(
1745                  *
1746                  * We will just lie to this thread
1747                  * and let it thing p_curtick is
1748                  * correct. When it next awakens
1749                  * it will find itself further behind.
1750                  */
1751                 if (from_callout)
1752                         counter_u64_add(hpts_hopelessly_behind, 1);
1753                 goto no_run;
1754         }
1755         hpts->p_curtick = tcp_gethptstick(&tv);
1756         hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1757         if (seen_endpoint == 0) {
1758                 /* We saw no endpoint but we may be looping */
1759                 orig_exit_slot = hpts->p_cur_slot;
1760         }
1761         if ((wrap_loop_cnt < 2) &&
1762             (hpts->p_lasttick != hpts->p_curtick)) {
1763                 counter_u64_add(hpts_loops, 1);
1764                 loop_cnt++;
1765                 goto again;
1766         }
1767 no_run:
1768         cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1769         /*
1770          * Set flag to tell that we are done for
1771          * any slot input that happens during
1772          * input.
1773          */
1774         hpts->p_wheel_complete = 1;
1775         /*
1776          * Run any input that may be there not covered
1777          * in running data.
1778          */
1779         if (!TAILQ_EMPTY(&hpts->p_input)) {
1780                 tcp_input_data(hpts, &tv);
1781                 /*
1782                  * Now did we spend too long running input and need to run more ticks?
1783                  * Note that if wrap_loop_cnt < 2 then we should have the conditions
1784                  * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1785                  * is greater than 2, then the condtion most likely are *not* true. Also
1786                  * if we are called not from the callout, we don't run the wheel multiple
1787                  * times so the slots may not align either.
1788                  */
1789                 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1790                          (wrap_loop_cnt >= 2) || (from_callout == 0)),
1791                         ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1792                          hpts->p_prev_slot, hpts->p_cur_slot));
1793                 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1794                          || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1795                         ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1796                          hpts->p_lasttick, hpts->p_curtick));
1797                 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1798                         hpts->p_curtick = tcp_gethptstick(&tv);
1799                         counter_u64_add(hpts_loops, 1);
1800                         hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1801                         goto again;
1802                 }
1803         }
1804         if (from_callout){
1805                 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1806         }
1807         if (seen_endpoint)
1808                 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1809         else
1810                 return (0);
1811 }
1812
1813 void
1814 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1815 {
1816         struct tcp_hpts_entry *hpts;
1817         int failed;
1818
1819         INP_WLOCK_ASSERT(inp);
1820         hpts = tcp_hpts_lock(inp);
1821         if ((inp->inp_in_hpts == 0) &&
1822             (inp->inp_hpts_cpu_set == 0)) {
1823                 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1824                 if (failed == 0)
1825                         inp->inp_hpts_cpu_set = 1;
1826         }
1827         mtx_unlock(&hpts->p_mtx);
1828         hpts = tcp_input_lock(inp);
1829         if ((inp->inp_input_cpu_set == 0) &&
1830             (inp->inp_in_input == 0)) {
1831                 inp->inp_input_cpu = hpts_cpuid(inp, &failed);
1832                 if (failed == 0)
1833                         inp->inp_input_cpu_set = 1;
1834         }
1835         mtx_unlock(&hpts->p_mtx);
1836 }
1837
1838 uint16_t
1839 tcp_hpts_delayedby(struct inpcb *inp){
1840         return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1841 }
1842
1843 static void
1844 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1845 {
1846         int ticks_ran;
1847
1848         if (hpts->p_hpts_active) {
1849                 /* Already active */
1850                 return;
1851         }
1852         if (mtx_trylock(&hpts->p_mtx) == 0) {
1853                 /* Someone else got the lock */
1854                 return;
1855         }
1856         if (hpts->p_hpts_active)
1857                 goto out_with_mtx;
1858         hpts->syscall_cnt++;
1859         counter_u64_add(hpts_direct_call, 1);
1860         hpts->p_hpts_active = 1;
1861         ticks_ran = tcp_hptsi(hpts, 0);
1862         /* We may want to adjust the sleep values here */
1863         if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1864                 if (ticks_ran > ticks_indicate_less_sleep) {
1865                         struct timeval tv;
1866                         sbintime_t sb;
1867                         int cpu;
1868
1869                         hpts->p_mysleep.tv_usec /= 2;
1870                         if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1871                                 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1872                         /* Reschedule with new to value */
1873                         tcp_hpts_set_max_sleep(hpts, 0);
1874                         tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1875                         /* Validate its in the right ranges */
1876                         if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1877                                 hpts->overidden_sleep = tv.tv_usec;
1878                                 tv.tv_usec = hpts->p_mysleep.tv_usec;
1879                         } else if (tv.tv_usec > dynamic_max_sleep) {
1880                                 /* Lets not let sleep get above this value */
1881                                 hpts->overidden_sleep = tv.tv_usec;
1882                                 tv.tv_usec = dynamic_max_sleep;
1883                         }
1884                         /*
1885                          * In this mode the timer is a backstop to
1886                          * all the userret/lro_flushes so we use
1887                          * the dynamic value and set the on_min_sleep
1888                          * flag so we will not be awoken.
1889                          */
1890                         sb = tvtosbt(tv);
1891                         cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
1892                         /* Store off to make visible the actual sleep time */
1893                         hpts->sleeping = tv.tv_usec;
1894                         callout_reset_sbt_on(&hpts->co, sb, 0,
1895                                              hpts_timeout_swi, hpts, cpu,
1896                                              (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1897                 } else if (ticks_ran < ticks_indicate_more_sleep) {
1898                         /* For the further sleep, don't reschedule  hpts */
1899                         hpts->p_mysleep.tv_usec *= 2;
1900                         if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1901                                 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1902                 }
1903                 hpts->p_on_min_sleep = 1;
1904         }
1905         hpts->p_hpts_active = 0;
1906 out_with_mtx:
1907         HPTS_MTX_ASSERT(hpts);
1908         mtx_unlock(&hpts->p_mtx);
1909 }
1910
1911 static struct tcp_hpts_entry *
1912 tcp_choose_hpts_to_run(void)
1913 {
1914         int i, oldest_idx;
1915         uint32_t cts, time_since_ran, calc;
1916
1917         if ((hpts_uses_oldest == 0) ||
1918             ((hpts_uses_oldest > 1) &&
1919              (tcp_pace.rp_ent[(tcp_pace.rp_num_hptss-1)]->p_on_queue_cnt >= hpts_uses_oldest))) {
1920                 /*
1921                  * We have either disabled the feature (0), or
1922                  * we have crossed over the oldest threshold on the
1923                  * last hpts. We use the last one for simplification
1924                  * since we don't want to use the first one (it may
1925                  * have starting connections that have not settled
1926                  * on the cpu yet).
1927                  */
1928                 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1929         }
1930         /* Lets find the oldest hpts to attempt to run */
1931         cts = tcp_get_usecs(NULL);
1932         time_since_ran = 0;
1933         oldest_idx = -1;
1934         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1935                 if (TSTMP_GT(cts, cts_last_ran[i]))
1936                         calc = cts - cts_last_ran[i];
1937                 else
1938                         calc = 0;
1939                 if (calc > time_since_ran) {
1940                         oldest_idx = i;
1941                         time_since_ran = calc;
1942                 }
1943         }
1944         if (oldest_idx >= 0)
1945                 return(tcp_pace.rp_ent[oldest_idx]);
1946         else
1947                 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1948 }
1949
1950
1951 void
1952 tcp_run_hpts(void)
1953 {
1954         static struct tcp_hpts_entry *hpts;
1955         struct epoch_tracker et;
1956
1957         NET_EPOCH_ENTER(et);
1958         hpts = tcp_choose_hpts_to_run();
1959         __tcp_run_hpts(hpts);
1960         NET_EPOCH_EXIT(et);
1961 }
1962
1963
1964 static void
1965 tcp_hpts_thread(void *ctx)
1966 {
1967         struct tcp_hpts_entry *hpts;
1968         struct epoch_tracker et;
1969         struct timeval tv;
1970         sbintime_t sb;
1971         int cpu, ticks_ran;
1972
1973         hpts = (struct tcp_hpts_entry *)ctx;
1974         mtx_lock(&hpts->p_mtx);
1975         if (hpts->p_direct_wake) {
1976                 /* Signaled by input or output with low occupancy count. */
1977                 callout_stop(&hpts->co);
1978                 counter_u64_add(hpts_direct_awakening, 1);
1979         } else {
1980                 /* Timed out, the normal case. */
1981                 counter_u64_add(hpts_wake_timeout, 1);
1982                 if (callout_pending(&hpts->co) ||
1983                     !callout_active(&hpts->co)) {
1984                         mtx_unlock(&hpts->p_mtx);
1985                         return;
1986                 }
1987         }
1988         callout_deactivate(&hpts->co);
1989         hpts->p_hpts_wake_scheduled = 0;
1990         NET_EPOCH_ENTER(et);
1991         if (hpts->p_hpts_active) {
1992                 /*
1993                  * We are active already. This means that a syscall
1994                  * trap or LRO is running in behalf of hpts. In that case
1995                  * we need to double our timeout since there seems to be
1996                  * enough activity in the system that we don't need to
1997                  * run as often (if we were not directly woken).
1998                  */
1999                 if (hpts->p_direct_wake == 0) {
2000                         counter_u64_add(hpts_back_tosleep, 1);
2001                         if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
2002                                 hpts->p_mysleep.tv_usec *= 2;
2003                                 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
2004                                         hpts->p_mysleep.tv_usec = dynamic_max_sleep;
2005                                 tv.tv_usec = hpts->p_mysleep.tv_usec;
2006                                 hpts->p_on_min_sleep = 1;
2007                         } else {
2008                                 /*
2009                                  * Here we have low count on the wheel, but
2010                                  * somehow we still collided with one of the
2011                                  * connections. Lets go back to sleep for a
2012                                  * min sleep time, but clear the flag so we
2013                                  * can be awoken by insert.
2014                                  */
2015                                 hpts->p_on_min_sleep = 0;
2016                                 tv.tv_usec = tcp_min_hptsi_time;
2017                         }
2018                 } else {
2019                         /*
2020                          * Directly woken most likely to reset the
2021                          * callout time.
2022                          */
2023                         tv.tv_sec = 0;
2024                         tv.tv_usec = hpts->p_mysleep.tv_usec;
2025                 }
2026                 goto back_to_sleep;
2027         }
2028         hpts->sleeping = 0;
2029         hpts->p_hpts_active = 1;
2030         ticks_ran = tcp_hptsi(hpts, 1);
2031         tv.tv_sec = 0;
2032         tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2033         if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
2034                 if(hpts->p_direct_wake == 0) {
2035                         /*
2036                          * Only adjust sleep time if we were
2037                          * called from the callout i.e. direct_wake == 0.
2038                          */
2039                         if (ticks_ran < ticks_indicate_more_sleep) {
2040                                 hpts->p_mysleep.tv_usec *= 2;
2041                                 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
2042                                         hpts->p_mysleep.tv_usec = dynamic_max_sleep;
2043                         } else if (ticks_ran > ticks_indicate_less_sleep) {
2044                                 hpts->p_mysleep.tv_usec /= 2;
2045                                 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
2046                                         hpts->p_mysleep.tv_usec = dynamic_min_sleep;
2047                         }
2048                 }
2049                 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
2050                         hpts->overidden_sleep = tv.tv_usec;
2051                         tv.tv_usec = hpts->p_mysleep.tv_usec;
2052                 } else if (tv.tv_usec > dynamic_max_sleep) {
2053                         /* Lets not let sleep get above this value */
2054                         hpts->overidden_sleep = tv.tv_usec;
2055                         tv.tv_usec = dynamic_max_sleep;
2056                 }
2057                 /*
2058                  * In this mode the timer is a backstop to
2059                  * all the userret/lro_flushes so we use
2060                  * the dynamic value and set the on_min_sleep
2061                  * flag so we will not be awoken.
2062                  */
2063                 hpts->p_on_min_sleep = 1;
2064         } else if (hpts->p_on_queue_cnt == 0)  {
2065                 /*
2066                  * No one on the wheel, please wake us up
2067                  * if you insert on the wheel.
2068                  */
2069                 hpts->p_on_min_sleep = 0;
2070                 hpts->overidden_sleep = 0;
2071         } else {
2072                 /*
2073                  * We hit here when we have a low number of
2074                  * clients on the wheel (our else clause).
2075                  * We may need to go on min sleep, if we set
2076                  * the flag we will not be awoken if someone
2077                  * is inserted ahead of us. Clearing the flag
2078                  * means we can be awoken. This is "old mode"
2079                  * where the timer is what runs hpts mainly.
2080                  */
2081                 if (tv.tv_usec < tcp_min_hptsi_time) {
2082                         /*
2083                          * Yes on min sleep, which means
2084                          * we cannot be awoken.
2085                          */
2086                         hpts->overidden_sleep = tv.tv_usec;
2087                         tv.tv_usec = tcp_min_hptsi_time;
2088                         hpts->p_on_min_sleep = 1;
2089                 } else {
2090                         /* Clear the min sleep flag */
2091                         hpts->overidden_sleep = 0;
2092                         hpts->p_on_min_sleep = 0;
2093                 }
2094         }
2095         HPTS_MTX_ASSERT(hpts);
2096         hpts->p_hpts_active = 0;
2097 back_to_sleep:
2098         hpts->p_direct_wake = 0;
2099         sb = tvtosbt(tv);
2100         cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
2101         /* Store off to make visible the actual sleep time */
2102         hpts->sleeping = tv.tv_usec;
2103         callout_reset_sbt_on(&hpts->co, sb, 0,
2104                              hpts_timeout_swi, hpts, cpu,
2105                              (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2106         NET_EPOCH_EXIT(et);
2107         mtx_unlock(&hpts->p_mtx);
2108 }
2109
2110 #undef  timersub
2111
2112 static void
2113 tcp_init_hptsi(void *st)
2114 {
2115         int32_t i, j, error, bound = 0, created = 0;
2116         size_t sz, asz;
2117         struct timeval tv;
2118         sbintime_t sb;
2119         struct tcp_hpts_entry *hpts;
2120         struct pcpu *pc;
2121         cpuset_t cs;
2122         char unit[16];
2123         uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
2124         int count, domain, cpu;
2125
2126         tcp_pace.rp_proc = NULL;
2127         tcp_pace.rp_num_hptss = ncpus;
2128         hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
2129         hpts_loops = counter_u64_alloc(M_WAITOK);
2130         back_tosleep = counter_u64_alloc(M_WAITOK);
2131         combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
2132         wheel_wrap = counter_u64_alloc(M_WAITOK);
2133         hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
2134         hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
2135         hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
2136         hpts_direct_call = counter_u64_alloc(M_WAITOK);
2137         cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
2138         cpu_uses_random = counter_u64_alloc(M_WAITOK);
2139
2140
2141         sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
2142         tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
2143         sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
2144         cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
2145         asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
2146         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
2147                 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
2148                     M_TCPHPTS, M_WAITOK | M_ZERO);
2149                 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
2150                     M_TCPHPTS, M_WAITOK);
2151                 hpts = tcp_pace.rp_ent[i];
2152                 /*
2153                  * Init all the hpts structures that are not specifically
2154                  * zero'd by the allocations. Also lets attach them to the
2155                  * appropriate sysctl block as well.
2156                  */
2157                 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
2158                     "hpts", MTX_DEF | MTX_DUPOK);
2159                 TAILQ_INIT(&hpts->p_input);
2160                 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
2161                         TAILQ_INIT(&hpts->p_hptss[j]);
2162                 }
2163                 sysctl_ctx_init(&hpts->hpts_ctx);
2164                 sprintf(unit, "%d", i);
2165                 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
2166                     SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
2167                     OID_AUTO,
2168                     unit,
2169                     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
2170                     "");
2171                 SYSCTL_ADD_INT(&hpts->hpts_ctx,
2172                     SYSCTL_CHILDREN(hpts->hpts_root),
2173                     OID_AUTO, "in_qcnt", CTLFLAG_RD,
2174                     &hpts->p_on_inqueue_cnt, 0,
2175                     "Count TCB's awaiting input processing");
2176                 SYSCTL_ADD_INT(&hpts->hpts_ctx,
2177                     SYSCTL_CHILDREN(hpts->hpts_root),
2178                     OID_AUTO, "out_qcnt", CTLFLAG_RD,
2179                     &hpts->p_on_queue_cnt, 0,
2180                     "Count TCB's awaiting output processing");
2181                 SYSCTL_ADD_U16(&hpts->hpts_ctx,
2182                     SYSCTL_CHILDREN(hpts->hpts_root),
2183                     OID_AUTO, "active", CTLFLAG_RD,
2184                     &hpts->p_hpts_active, 0,
2185                     "Is the hpts active");
2186                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2187                     SYSCTL_CHILDREN(hpts->hpts_root),
2188                     OID_AUTO, "curslot", CTLFLAG_RD,
2189                     &hpts->p_cur_slot, 0,
2190                     "What the current running pacers goal");
2191                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2192                     SYSCTL_CHILDREN(hpts->hpts_root),
2193                     OID_AUTO, "runtick", CTLFLAG_RD,
2194                     &hpts->p_runningslot, 0,
2195                     "What the running pacers current slot is");
2196                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2197                     SYSCTL_CHILDREN(hpts->hpts_root),
2198                     OID_AUTO, "curtick", CTLFLAG_RD,
2199                     &hpts->p_curtick, 0,
2200                     "What the running pacers last tick mapped to the wheel was");
2201                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2202                     SYSCTL_CHILDREN(hpts->hpts_root),
2203                     OID_AUTO, "lastran", CTLFLAG_RD,
2204                     &cts_last_ran[i], 0,
2205                     "The last usec tick that this hpts ran");
2206                 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
2207                     SYSCTL_CHILDREN(hpts->hpts_root),
2208                     OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
2209                     &hpts->p_mysleep.tv_usec,
2210                     "What the running pacers is using for p_mysleep.tv_usec");
2211                 SYSCTL_ADD_U64(&hpts->hpts_ctx,
2212                     SYSCTL_CHILDREN(hpts->hpts_root),
2213                     OID_AUTO, "now_sleeping", CTLFLAG_RD,
2214                     &hpts->sleeping, 0,
2215                     "What the running pacers is actually sleeping for");
2216                 SYSCTL_ADD_U64(&hpts->hpts_ctx,
2217                     SYSCTL_CHILDREN(hpts->hpts_root),
2218                     OID_AUTO, "syscall_cnt", CTLFLAG_RD,
2219                     &hpts->syscall_cnt, 0,
2220                     "How many times we had syscalls on this hpts");
2221
2222                 hpts->p_hpts_sleep_time = hpts_sleep_max;
2223                 hpts->p_num = i;
2224                 hpts->p_curtick = tcp_gethptstick(&tv);
2225                 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
2226                 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
2227                 hpts->p_cpu = 0xffff;
2228                 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
2229                 callout_init(&hpts->co, 1);
2230         }
2231
2232         /* Don't try to bind to NUMA domains if we don't have any */
2233         if (vm_ndomains == 1 && tcp_bind_threads == 2)
2234                 tcp_bind_threads = 0;
2235
2236         /*
2237          * Now lets start ithreads to handle the hptss.
2238          */
2239         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
2240                 hpts = tcp_pace.rp_ent[i];
2241                 hpts->p_cpu = i;
2242                 error = swi_add(&hpts->ie, "hpts",
2243                     tcp_hpts_thread, (void *)hpts,
2244                     SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
2245                 KASSERT(error == 0,
2246                         ("Can't add hpts:%p i:%d err:%d",
2247                          hpts, i, error));
2248                 created++;
2249                 hpts->p_mysleep.tv_sec = 0;
2250                 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
2251                 if (tcp_bind_threads == 1) {
2252                         if (intr_event_bind(hpts->ie, i) == 0)
2253                                 bound++;
2254                 } else if (tcp_bind_threads == 2) {
2255                         pc = pcpu_find(i);
2256                         domain = pc->pc_domain;
2257                         CPU_COPY(&cpuset_domain[domain], &cs);
2258                         if (intr_event_bind_ithread_cpuset(hpts->ie, &cs)
2259                             == 0) {
2260                                 bound++;
2261                                 count = hpts_domains[domain].count;
2262                                 hpts_domains[domain].cpu[count] = i;
2263                                 hpts_domains[domain].count++;
2264                         }
2265                 }
2266                 tv.tv_sec = 0;
2267                 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2268                 hpts->sleeping = tv.tv_usec;
2269                 sb = tvtosbt(tv);
2270                 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
2271                 callout_reset_sbt_on(&hpts->co, sb, 0,
2272                                      hpts_timeout_swi, hpts, cpu,
2273                                      (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2274         }
2275         /*
2276          * If we somehow have an empty domain, fall back to choosing
2277          * among all htps threads.
2278          */
2279         for (i = 0; i < vm_ndomains; i++) {
2280                 if (hpts_domains[i].count == 0) {
2281                         tcp_bind_threads = 0;
2282                         break;
2283                 }
2284         }
2285         printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2286             created, bound,
2287             tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2288 #ifdef INVARIANTS
2289         printf("HPTS is in INVARIANT mode!!\n");
2290 #endif
2291 }
2292
2293 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
2294 MODULE_VERSION(tcphpts, 1);