]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_hpts.c
tcp: Fix 32 bit platform breakage
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_hpts.c
1 /*-
2  * Copyright (c) 2016-2018 Netflix, Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_rss.h"
32 #include "opt_tcpdebug.h"
33
34 /**
35  * Some notes about usage.
36  *
37  * The tcp_hpts system is designed to provide a high precision timer
38  * system for tcp. Its main purpose is to provide a mechanism for
39  * pacing packets out onto the wire. It can be used in two ways
40  * by a given TCP stack (and those two methods can be used simultaneously).
41  *
42  * First, and probably the main thing its used by Rack and BBR, it can
43  * be used to call tcp_output() of a transport stack at some time in the future.
44  * The normal way this is done is that tcp_output() of the stack schedules
45  * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
46  * slot is the time from now that the stack wants to be called but it
47  * must be converted to tcp_hpts's notion of slot. This is done with
48  * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
49  * call from the tcp_output() routine might look like:
50  *
51  * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
52  *
53  * The above would schedule tcp_ouput() to be called in 550 useconds.
54  * Note that if using this mechanism the stack will want to add near
55  * its top a check to prevent unwanted calls (from user land or the
56  * arrival of incoming ack's). So it would add something like:
57  *
58  * if (inp->inp_in_hpts)
59  *    return;
60  *
61  * to prevent output processing until the time alotted has gone by.
62  * Of course this is a bare bones example and the stack will probably
63  * have more consideration then just the above.
64  *
65  * Now the second function (actually two functions I guess :D)
66  * the tcp_hpts system provides is the  ability to either abort
67  * a connection (later) or process input on a connection.
68  * Why would you want to do this? To keep processor locality
69  * and or not have to worry about untangling any recursive
70  * locks. The input function now is hooked to the new LRO
71  * system as well.
72  *
73  * In order to use the input redirection function the
74  * tcp stack must define an input function for
75  * tfb_do_queued_segments(). This function understands
76  * how to dequeue a array of packets that were input and
77  * knows how to call the correct processing routine.
78  *
79  * Locking in this is important as well so most likely the
80  * stack will need to define the tfb_do_segment_nounlock()
81  * splitting tfb_do_segment() into two parts. The main processing
82  * part that does not unlock the INP and returns a value of 1 or 0.
83  * It returns 0 if all is well and the lock was not released. It
84  * returns 1 if we had to destroy the TCB (a reset received etc).
85  * The remains of tfb_do_segment() then become just a simple call
86  * to the tfb_do_segment_nounlock() function and check the return
87  * code and possibly unlock.
88  *
89  * The stack must also set the flag on the INP that it supports this
90  * feature i.e. INP_SUPPORTS_MBUFQ. The LRO code recoginizes
91  * this flag as well and will queue packets when it is set.
92  * There are other flags as well INP_MBUF_QUEUE_READY and
93  * INP_DONT_SACK_QUEUE. The first flag tells the LRO code
94  * that we are in the pacer for output so there is no
95  * need to wake up the hpts system to get immediate
96  * input. The second tells the LRO code that its okay
97  * if a SACK arrives you can still defer input and let
98  * the current hpts timer run (this is usually set when
99  * a rack timer is up so we know SACK's are happening
100  * on the connection already and don't want to wakeup yet).
101  *
102  * There is a common functions within the rack_bbr_common code
103  * version i.e. ctf_do_queued_segments(). This function
104  * knows how to take the input queue of packets from
105  * tp->t_in_pkts and process them digging out
106  * all the arguments, calling any bpf tap and
107  * calling into tfb_do_segment_nounlock(). The common
108  * function (ctf_do_queued_segments())  requires that
109  * you have defined the tfb_do_segment_nounlock() as
110  * described above.
111  *
112  * The second feature of the input side of hpts is the
113  * dropping of a connection. This is due to the way that
114  * locking may have occured on the INP_WLOCK. So if
115  * a stack wants to drop a connection it calls:
116  *
117  *     tcp_set_inp_to_drop(tp, ETIMEDOUT)
118  *
119  * To schedule the tcp_hpts system to call
120  *
121  *    tcp_drop(tp, drop_reason)
122  *
123  * at a future point. This is quite handy to prevent locking
124  * issues when dropping connections.
125  *
126  */
127
128 #include <sys/param.h>
129 #include <sys/bus.h>
130 #include <sys/interrupt.h>
131 #include <sys/module.h>
132 #include <sys/kernel.h>
133 #include <sys/hhook.h>
134 #include <sys/malloc.h>
135 #include <sys/mbuf.h>
136 #include <sys/proc.h>           /* for proc0 declaration */
137 #include <sys/socket.h>
138 #include <sys/socketvar.h>
139 #include <sys/sysctl.h>
140 #include <sys/systm.h>
141 #include <sys/refcount.h>
142 #include <sys/sched.h>
143 #include <sys/queue.h>
144 #include <sys/smp.h>
145 #include <sys/counter.h>
146 #include <sys/time.h>
147 #include <sys/kthread.h>
148 #include <sys/kern_prefetch.h>
149
150 #include <vm/uma.h>
151 #include <vm/vm.h>
152
153 #include <net/route.h>
154 #include <net/vnet.h>
155
156 #ifdef RSS
157 #include <net/netisr.h>
158 #include <net/rss_config.h>
159 #endif
160
161 #define TCPSTATES               /* for logging */
162
163 #include <netinet/in.h>
164 #include <netinet/in_kdtrace.h>
165 #include <netinet/in_pcb.h>
166 #include <netinet/ip.h>
167 #include <netinet/ip_icmp.h>    /* required for icmp_var.h */
168 #include <netinet/icmp_var.h>   /* for ICMP_BANDLIM */
169 #include <netinet/ip_var.h>
170 #include <netinet/ip6.h>
171 #include <netinet6/in6_pcb.h>
172 #include <netinet6/ip6_var.h>
173 #include <netinet/tcp.h>
174 #include <netinet/tcp_fsm.h>
175 #include <netinet/tcp_seq.h>
176 #include <netinet/tcp_timer.h>
177 #include <netinet/tcp_var.h>
178 #include <netinet/tcpip.h>
179 #include <netinet/cc/cc.h>
180 #include <netinet/tcp_hpts.h>
181 #include <netinet/tcp_log_buf.h>
182
183 #ifdef tcpdebug
184 #include <netinet/tcp_debug.h>
185 #endif                          /* tcpdebug */
186 #ifdef tcp_offload
187 #include <netinet/tcp_offload.h>
188 #endif
189
190 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
191 #ifdef RSS
192 static int tcp_bind_threads = 1;
193 #else
194 static int tcp_bind_threads = 2;
195 #endif
196 static int tcp_use_irq_cpu = 0;
197 static struct tcp_hptsi tcp_pace;
198 static uint32_t *cts_last_ran;
199 static int hpts_does_tp_logging = 0;
200 static int hpts_use_assigned_cpu = 1;
201 static int32_t hpts_uses_oldest = OLDEST_THRESHOLD;
202
203 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
204 static int32_t tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout);
205 static void tcp_hpts_thread(void *ctx);
206 static void tcp_init_hptsi(void *st);
207
208 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
209 static int conn_cnt_thresh = DEFAULT_CONNECTION_THESHOLD;
210 static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
211 static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
212
213
214
215 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
216     "TCP Hpts controls");
217 SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
218     "TCP Hpts statistics");
219
220 #define timersub(tvp, uvp, vvp)                                         \
221         do {                                                            \
222                 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;          \
223                 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;       \
224                 if ((vvp)->tv_usec < 0) {                               \
225                         (vvp)->tv_sec--;                                \
226                         (vvp)->tv_usec += 1000000;                      \
227                 }                                                       \
228         } while (0)
229
230 static int32_t tcp_hpts_precision = 120;
231
232 struct hpts_domain_info {
233         int count;
234         int cpu[MAXCPU];
235 };
236
237 struct hpts_domain_info hpts_domains[MAXMEMDOM];
238
239 counter_u64_t hpts_hopelessly_behind;
240
241 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
242     &hpts_hopelessly_behind,
243     "Number of times hpts could not catch up and was behind hopelessly");
244
245 counter_u64_t hpts_loops;
246
247 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
248     &hpts_loops, "Number of times hpts had to loop to catch up");
249
250 counter_u64_t back_tosleep;
251
252 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
253     &back_tosleep, "Number of times hpts found no tcbs");
254
255 counter_u64_t combined_wheel_wrap;
256
257 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
258     &combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
259
260 counter_u64_t wheel_wrap;
261
262 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
263     &wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
264
265 counter_u64_t hpts_direct_call;
266 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
267     &hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
268
269 counter_u64_t hpts_wake_timeout;
270
271 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
272     &hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
273
274 counter_u64_t hpts_direct_awakening;
275
276 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
277     &hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
278
279 counter_u64_t hpts_back_tosleep;
280
281 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
282     &hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
283
284 counter_u64_t cpu_uses_flowid;
285 counter_u64_t cpu_uses_random;
286
287 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
288     &cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
289 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
290     &cpu_uses_random, "Number of times when setting cpuid we used the a random value");
291
292 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
293 TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
294 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
295     &tcp_bind_threads, 2,
296     "Thread Binding tunable");
297 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
298     &tcp_use_irq_cpu, 0,
299     "Use of irq CPU  tunable");
300 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
301     &tcp_hpts_precision, 120,
302     "Value for PRE() precision of callout");
303 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
304     &conn_cnt_thresh, 0,
305     "How many connections (below) make us use the callout based mechanism");
306 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
307     &hpts_does_tp_logging, 0,
308     "Do we add to any tp that has logging on pacer logs");
309 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_assigned_cpu, CTLFLAG_RW,
310     &hpts_use_assigned_cpu, 0,
311     "Do we start any hpts timer on the assigned cpu?");
312 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_oldest, CTLFLAG_RW,
313     &hpts_uses_oldest, OLDEST_THRESHOLD,
314     "Do syscalls look for the hpts that has been the longest since running (or just use cpu no if 0)?");
315 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
316     &dynamic_min_sleep, 250,
317     "What is the dynamic minsleep value?");
318 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
319     &dynamic_max_sleep, 5000,
320     "What is the dynamic maxsleep value?");
321
322
323
324
325
326 static int32_t max_pacer_loops = 10;
327 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
328     &max_pacer_loops, 10,
329     "What is the maximum number of times the pacer will loop trying to catch up");
330
331 #define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
332
333 static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
334
335 static int
336 sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
337 {
338         int error;
339         uint32_t new;
340
341         new = hpts_sleep_max;
342         error = sysctl_handle_int(oidp, &new, 0, req);
343         if (error == 0 && req->newptr) {
344                 if ((new < dynamic_min_sleep) ||
345                     (new > HPTS_MAX_SLEEP_ALLOWED))
346                         error = EINVAL;
347                 else
348                         hpts_sleep_max = new;
349         }
350         return (error);
351 }
352
353 static int
354 sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
355 {
356         int error;
357         uint32_t new;
358
359         new = tcp_min_hptsi_time;
360         error = sysctl_handle_int(oidp, &new, 0, req);
361         if (error == 0 && req->newptr) {
362                 if (new < LOWEST_SLEEP_ALLOWED)
363                         error = EINVAL;
364                 else
365                         tcp_min_hptsi_time = new;
366         }
367         return (error);
368 }
369
370 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
371     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
372     &hpts_sleep_max, 0,
373     &sysctl_net_inet_tcp_hpts_max_sleep, "IU",
374     "Maximum time hpts will sleep");
375
376 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
377     CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
378     &tcp_min_hptsi_time, 0,
379     &sysctl_net_inet_tcp_hpts_min_sleep, "IU",
380     "The minimum time the hpts must sleep before processing more slots");
381
382 static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP;
383 static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP;
384 static int tcp_hpts_no_wake_over_thresh = 1;
385
386 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
387     &ticks_indicate_more_sleep, 0,
388     "If we only process this many or less on a timeout, we need longer sleep on the next callout");
389 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
390     &ticks_indicate_less_sleep, 0,
391     "If we process this many or more on a timeout, we need less sleep on the next callout");
392 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
393     &tcp_hpts_no_wake_over_thresh, 0,
394     "When we are over the threshold on the pacer do we prohibit wakeups?");
395
396 static void
397 tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
398              int slots_to_run, int idx, int from_callout)
399 {
400         union tcp_log_stackspecific log;
401         /*
402          * Unused logs are
403          * 64 bit - delRate, rttProp, bw_inuse
404          * 16 bit - cwnd_gain
405          *  8 bit - bbr_state, bbr_substate, inhpts, ininput;
406          */
407         memset(&log.u_bbr, 0, sizeof(log.u_bbr));
408         log.u_bbr.flex1 = hpts->p_nxt_slot;
409         log.u_bbr.flex2 = hpts->p_cur_slot;
410         log.u_bbr.flex3 = hpts->p_prev_slot;
411         log.u_bbr.flex4 = idx;
412         log.u_bbr.flex5 = hpts->p_curtick;
413         log.u_bbr.flex6 = hpts->p_on_queue_cnt;
414         log.u_bbr.flex7 = hpts->p_cpu;
415         log.u_bbr.flex8 = (uint8_t)from_callout;
416         log.u_bbr.inflight = slots_to_run;
417         log.u_bbr.applimited = hpts->overidden_sleep;
418         log.u_bbr.delivered = hpts->saved_curtick;
419         log.u_bbr.timeStamp = tcp_tv_to_usectick(tv);
420         log.u_bbr.epoch = hpts->saved_curslot;
421         log.u_bbr.lt_epoch = hpts->saved_prev_slot;
422         log.u_bbr.pkts_out = hpts->p_delayed_by;
423         log.u_bbr.lost = hpts->p_hpts_sleep_time;
424         log.u_bbr.pacing_gain = hpts->p_cpu;
425         log.u_bbr.pkt_epoch = hpts->p_runningslot;
426         log.u_bbr.use_lt_bw = 1;
427         TCP_LOG_EVENTP(tp, NULL,
428                        &tp->t_inpcb->inp_socket->so_rcv,
429                        &tp->t_inpcb->inp_socket->so_snd,
430                        BBR_LOG_HPTSDIAG, 0,
431                        0, &log, false, tv);
432 }
433
434 static void
435 tcp_wakehpts(struct tcp_hpts_entry *hpts)
436 {
437         HPTS_MTX_ASSERT(hpts);
438
439         if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
440                 hpts->p_direct_wake = 0;
441                 return;
442         }
443         if (hpts->p_hpts_wake_scheduled == 0) {
444                 hpts->p_hpts_wake_scheduled = 1;
445                 swi_sched(hpts->ie_cookie, 0);
446         }
447 }
448
449 static void
450 hpts_timeout_swi(void *arg)
451 {
452         struct tcp_hpts_entry *hpts;
453
454         hpts = (struct tcp_hpts_entry *)arg;
455         swi_sched(hpts->ie_cookie, 0);
456 }
457
458 static inline void
459 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
460 {
461         HPTS_MTX_ASSERT(hpts);
462         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu, ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
463         KASSERT(inp->inp_in_hpts != 0, ("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp));
464         TAILQ_REMOVE(head, inp, inp_hpts);
465         hpts->p_on_queue_cnt--;
466         KASSERT(hpts->p_on_queue_cnt >= 0,
467                 ("Hpts goes negative inp:%p hpts:%p",
468                  inp, hpts));
469         if (clear) {
470                 inp->inp_hpts_request = 0;
471                 inp->inp_in_hpts = 0;
472         }
473 }
474
475 static inline void
476 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
477 {
478         HPTS_MTX_ASSERT(hpts);
479         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
480                 ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
481         KASSERT(((noref == 1) && (inp->inp_in_hpts == 1)) ||
482                 ((noref == 0) && (inp->inp_in_hpts == 0)),
483                 ("%s: hpts:%p inp:%p already on the hpts?",
484                  __FUNCTION__, hpts, inp));
485         TAILQ_INSERT_TAIL(head, inp, inp_hpts);
486         inp->inp_in_hpts = 1;
487         hpts->p_on_queue_cnt++;
488         if (noref == 0) {
489                 in_pcbref(inp);
490         }
491 }
492
493 static inline void
494 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
495 {
496         HPTS_MTX_ASSERT(hpts);
497         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
498                 ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
499         KASSERT(inp->inp_in_input != 0,
500                 ("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp));
501         TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
502         hpts->p_on_inqueue_cnt--;
503         KASSERT(hpts->p_on_inqueue_cnt >= 0,
504                 ("Hpts in goes negative inp:%p hpts:%p",
505                  inp, hpts));
506         KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
507                  ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
508                 ("%s hpts:%p input cnt (p_on_inqueue):%d and queue state mismatch",
509                  __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
510         if (clear)
511                 inp->inp_in_input = 0;
512 }
513
514 static inline void
515 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
516 {
517         HPTS_MTX_ASSERT(hpts);
518         KASSERT(hpts->p_cpu == inp->inp_hpts_cpu,
519                 ("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp));
520         KASSERT(inp->inp_in_input == 0,
521                 ("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp));
522         TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
523         inp->inp_in_input = 1;
524         hpts->p_on_inqueue_cnt++;
525         in_pcbref(inp);
526 }
527
528 struct tcp_hpts_entry *
529 tcp_cur_hpts(struct inpcb *inp)
530 {
531         int32_t hpts_num;
532         struct tcp_hpts_entry *hpts;
533
534         hpts_num = inp->inp_hpts_cpu;
535         hpts = tcp_pace.rp_ent[hpts_num];
536         return (hpts);
537 }
538
539 struct tcp_hpts_entry *
540 tcp_hpts_lock(struct inpcb *inp)
541 {
542         struct tcp_hpts_entry *hpts;
543         int32_t hpts_num;
544
545 again:
546         hpts_num = inp->inp_hpts_cpu;
547         hpts = tcp_pace.rp_ent[hpts_num];
548         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
549                 ("Hpts:%p owns mtx prior-to lock line:%d",
550                  hpts, __LINE__));
551         mtx_lock(&hpts->p_mtx);
552         if (hpts_num != inp->inp_hpts_cpu) {
553                 mtx_unlock(&hpts->p_mtx);
554                 goto again;
555         }
556         return (hpts);
557 }
558
559 struct tcp_hpts_entry *
560 tcp_input_lock(struct inpcb *inp)
561 {
562         struct tcp_hpts_entry *hpts;
563         int32_t hpts_num;
564
565 again:
566         hpts_num = inp->inp_input_cpu;
567         hpts = tcp_pace.rp_ent[hpts_num];
568         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
569                 ("Hpts:%p owns mtx prior-to lock line:%d",
570                 hpts, __LINE__));
571         mtx_lock(&hpts->p_mtx);
572         if (hpts_num != inp->inp_input_cpu) {
573                 mtx_unlock(&hpts->p_mtx);
574                 goto again;
575         }
576         return (hpts);
577 }
578
579 static void
580 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
581 {
582         int32_t add_freed;
583         int32_t ret;
584
585         if (inp->inp_flags2 & INP_FREED) {
586                 /*
587                  * Need to play a special trick so that in_pcbrele_wlocked
588                  * does not return 1 when it really should have returned 0.
589                  */
590                 add_freed = 1;
591                 inp->inp_flags2 &= ~INP_FREED;
592         } else {
593                 add_freed = 0;
594         }
595 #ifndef INP_REF_DEBUG
596         ret = in_pcbrele_wlocked(inp);
597 #else
598         ret = __in_pcbrele_wlocked(inp, line);
599 #endif
600         KASSERT(ret != 1, ("inpcb:%p release ret 1", inp));
601         if (add_freed) {
602                 inp->inp_flags2 |= INP_FREED;
603         }
604 }
605
606 static void
607 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
608 {
609         if (inp->inp_in_hpts) {
610                 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
611                 tcp_remove_hpts_ref(inp, hpts, line);
612         }
613 }
614
615 static void
616 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
617 {
618         HPTS_MTX_ASSERT(hpts);
619         if (inp->inp_in_input) {
620                 hpts_sane_input_remove(hpts, inp, 1);
621                 tcp_remove_hpts_ref(inp, hpts, line);
622         }
623 }
624
625 /*
626  * Called normally with the INP_LOCKED but it
627  * does not matter, the hpts lock is the key
628  * but the lock order allows us to hold the
629  * INP lock and then get the hpts lock.
630  *
631  * Valid values in the flags are
632  * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
633  * HPTS_REMOVE_INPUT - remove from the input of the hpts.
634  * Note that you can use one or both values together
635  * and get two actions.
636  */
637 void
638 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
639 {
640         struct tcp_hpts_entry *hpts;
641
642         INP_WLOCK_ASSERT(inp);
643         if (flags & HPTS_REMOVE_OUTPUT) {
644                 hpts = tcp_hpts_lock(inp);
645                 tcp_hpts_remove_locked_output(hpts, inp, flags, line);
646                 mtx_unlock(&hpts->p_mtx);
647         }
648         if (flags & HPTS_REMOVE_INPUT) {
649                 hpts = tcp_input_lock(inp);
650                 tcp_hpts_remove_locked_input(hpts, inp, flags, line);
651                 mtx_unlock(&hpts->p_mtx);
652         }
653 }
654
655 static inline int
656 hpts_slot(uint32_t wheel_slot, uint32_t plus)
657 {
658         /*
659          * Given a slot on the wheel, what slot
660          * is that plus ticks out?
661          */
662         KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid tick %u not on wheel", wheel_slot));
663         return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
664 }
665
666 static inline int
667 tick_to_wheel(uint32_t cts_in_wticks)
668 {
669         /*
670          * Given a timestamp in ticks (so by
671          * default to get it to a real time one
672          * would multiply by 10.. i.e the number
673          * of ticks in a slot) map it to our limited
674          * space wheel.
675          */
676         return (cts_in_wticks % NUM_OF_HPTSI_SLOTS);
677 }
678
679 static inline int
680 hpts_slots_diff(int prev_slot, int slot_now)
681 {
682         /*
683          * Given two slots that are someplace
684          * on our wheel. How far are they apart?
685          */
686         if (slot_now > prev_slot)
687                 return (slot_now - prev_slot);
688         else if (slot_now == prev_slot)
689                 /*
690                  * Special case, same means we can go all of our
691                  * wheel less one slot.
692                  */
693                 return (NUM_OF_HPTSI_SLOTS - 1);
694         else
695                 return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
696 }
697
698 /*
699  * Given a slot on the wheel that is the current time
700  * mapped to the wheel (wheel_slot), what is the maximum
701  * distance forward that can be obtained without
702  * wrapping past either prev_slot or running_slot
703  * depending on the htps state? Also if passed
704  * a uint32_t *, fill it with the slot location.
705  *
706  * Note if you do not give this function the current
707  * time (that you think it is) mapped to the wheel slot
708  * then the results will not be what you expect and
709  * could lead to invalid inserts.
710  */
711 static inline int32_t
712 max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
713 {
714         uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
715
716         if ((hpts->p_hpts_active == 1) &&
717             (hpts->p_wheel_complete == 0)) {
718                 end_slot = hpts->p_runningslot;
719                 /* Back up one tick */
720                 if (end_slot == 0)
721                         end_slot = NUM_OF_HPTSI_SLOTS - 1;
722                 else
723                         end_slot--;
724                 if (target_slot)
725                         *target_slot = end_slot;
726         } else {
727                 /*
728                  * For the case where we are
729                  * not active, or we have
730                  * completed the pass over
731                  * the wheel, we can use the
732                  * prev tick and subtract one from it. This puts us
733                  * as far out as possible on the wheel.
734                  */
735                 end_slot = hpts->p_prev_slot;
736                 if (end_slot == 0)
737                         end_slot = NUM_OF_HPTSI_SLOTS - 1;
738                 else
739                         end_slot--;
740                 if (target_slot)
741                         *target_slot = end_slot;
742                 /*
743                  * Now we have close to the full wheel left minus the
744                  * time it has been since the pacer went to sleep. Note
745                  * that wheel_tick, passed in, should be the current time
746                  * from the perspective of the caller, mapped to the wheel.
747                  */
748                 if (hpts->p_prev_slot != wheel_slot)
749                         dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
750                 else
751                         dis_to_travel = 1;
752                 /*
753                  * dis_to_travel in this case is the space from when the
754                  * pacer stopped (p_prev_slot) and where our wheel_slot
755                  * is now. To know how many slots we can put it in we
756                  * subtract from the wheel size. We would not want
757                  * to place something after p_prev_slot or it will
758                  * get ran too soon.
759                  */
760                 return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
761         }
762         /*
763          * So how many slots are open between p_runningslot -> p_cur_slot
764          * that is what is currently un-available for insertion. Special
765          * case when we are at the last slot, this gets 1, so that
766          * the answer to how many slots are available is all but 1.
767          */
768         if (hpts->p_runningslot == hpts->p_cur_slot)
769                 dis_to_travel = 1;
770         else
771                 dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
772         /*
773          * How long has the pacer been running?
774          */
775         if (hpts->p_cur_slot != wheel_slot) {
776                 /* The pacer is a bit late */
777                 pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
778         } else {
779                 /* The pacer is right on time, now == pacers start time */
780                 pacer_to_now = 0;
781         }
782         /*
783          * To get the number left we can insert into we simply
784          * subract the distance the pacer has to run from how
785          * many slots there are.
786          */
787         avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
788         /*
789          * Now how many of those we will eat due to the pacer's
790          * time (p_cur_slot) of start being behind the
791          * real time (wheel_slot)?
792          */
793         if (avail_on_wheel <= pacer_to_now) {
794                 /*
795                  * Wheel wrap, we can't fit on the wheel, that
796                  * is unusual the system must be way overloaded!
797                  * Insert into the assured slot, and return special
798                  * "0".
799                  */
800                 counter_u64_add(combined_wheel_wrap, 1);
801                 *target_slot = hpts->p_nxt_slot;
802                 return (0);
803         } else {
804                 /*
805                  * We know how many slots are open
806                  * on the wheel (the reverse of what
807                  * is left to run. Take away the time
808                  * the pacer started to now (wheel_slot)
809                  * and that tells you how many slots are
810                  * open that can be inserted into that won't
811                  * be touched by the pacer until later.
812                  */
813                 return (avail_on_wheel - pacer_to_now);
814         }
815 }
816
817 static int
818 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
819 {
820         uint32_t need_wake = 0;
821
822         HPTS_MTX_ASSERT(hpts);
823         if (inp->inp_in_hpts == 0) {
824                 /* Ok we need to set it on the hpts in the current slot */
825                 inp->inp_hpts_request = 0;
826                 if ((hpts->p_hpts_active == 0) ||
827                     (hpts->p_wheel_complete)) {
828                         /*
829                          * A sleeping hpts we want in next slot to run
830                          * note that in this state p_prev_slot == p_cur_slot
831                          */
832                         inp->inp_hptsslot = hpts_slot(hpts->p_prev_slot, 1);
833                         if ((hpts->p_on_min_sleep == 0) && (hpts->p_hpts_active == 0))
834                                 need_wake = 1;
835                 } else if ((void *)inp == hpts->p_inp) {
836                         /*
837                          * The hpts system is running and the caller
838                          * was awoken by the hpts system.
839                          * We can't allow you to go into the same slot we
840                          * are in (we don't want a loop :-D).
841                          */
842                         inp->inp_hptsslot = hpts->p_nxt_slot;
843                 } else
844                         inp->inp_hptsslot = hpts->p_runningslot;
845                 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
846                 if (need_wake) {
847                         /*
848                          * Activate the hpts if it is sleeping and its
849                          * timeout is not 1.
850                          */
851                         hpts->p_direct_wake = 1;
852                         tcp_wakehpts(hpts);
853                 }
854         }
855         return (need_wake);
856 }
857
858 int
859 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
860 {
861         int32_t ret;
862         struct tcp_hpts_entry *hpts;
863
864         INP_WLOCK_ASSERT(inp);
865         hpts = tcp_hpts_lock(inp);
866         ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
867         mtx_unlock(&hpts->p_mtx);
868         return (ret);
869 }
870
871 #ifdef INVARIANTS
872 static void
873 check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t inp_hptsslot, int line)
874 {
875         /*
876          * Sanity checks for the pacer with invariants
877          * on insert.
878          */
879         KASSERT(inp_hptsslot < NUM_OF_HPTSI_SLOTS,
880                 ("hpts:%p inp:%p slot:%d > max",
881                  hpts, inp, inp_hptsslot));
882         if ((hpts->p_hpts_active) &&
883             (hpts->p_wheel_complete == 0)) {
884                 /*
885                  * If the pacer is processing a arc
886                  * of the wheel, we need to make
887                  * sure we are not inserting within
888                  * that arc.
889                  */
890                 int distance, yet_to_run;
891
892                 distance = hpts_slots_diff(hpts->p_runningslot, inp_hptsslot);
893                 if (hpts->p_runningslot != hpts->p_cur_slot)
894                         yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
895                 else
896                         yet_to_run = 0; /* processing last slot */
897                 KASSERT(yet_to_run <= distance,
898                         ("hpts:%p inp:%p slot:%d distance:%d yet_to_run:%d rs:%d cs:%d",
899                          hpts, inp, inp_hptsslot,
900                          distance, yet_to_run,
901                          hpts->p_runningslot, hpts->p_cur_slot));
902         }
903 }
904 #endif
905
906 static void
907 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, int32_t line,
908                        struct hpts_diag *diag, struct timeval *tv)
909 {
910         uint32_t need_new_to = 0;
911         uint32_t wheel_cts; 
912         int32_t wheel_slot, maxslots, last_slot;
913         int cpu;
914         int8_t need_wakeup = 0;
915
916         HPTS_MTX_ASSERT(hpts);
917         if (diag) {
918                 memset(diag, 0, sizeof(struct hpts_diag));
919                 diag->p_hpts_active = hpts->p_hpts_active;
920                 diag->p_prev_slot = hpts->p_prev_slot;
921                 diag->p_runningslot = hpts->p_runningslot;
922                 diag->p_nxt_slot = hpts->p_nxt_slot;
923                 diag->p_cur_slot = hpts->p_cur_slot;
924                 diag->p_curtick = hpts->p_curtick;
925                 diag->p_lasttick = hpts->p_lasttick;
926                 diag->slot_req = slot;
927                 diag->p_on_min_sleep = hpts->p_on_min_sleep;
928                 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
929         }
930         KASSERT(inp->inp_in_hpts == 0, ("Hpts:%p tp:%p already on hpts and add?", hpts, inp));
931         if (slot == 0) {
932                 /* Immediate */
933                 tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
934                 return;
935         }
936         /* Get the current time relative to the wheel */
937         wheel_cts = tcp_tv_to_hptstick(tv);
938         /* Map it onto the wheel */
939         wheel_slot = tick_to_wheel(wheel_cts);
940         /* Now what's the max we can place it at? */
941         maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
942         if (diag) {
943                 diag->wheel_slot = wheel_slot;
944                 diag->maxslots = maxslots;
945                 diag->wheel_cts = wheel_cts;
946         }
947         if (maxslots == 0) {
948                 /* The pacer is in a wheel wrap behind, yikes! */
949                 if (slot > 1) {
950                         /*
951                          * Reduce by 1 to prevent a forever loop in
952                          * case something else is wrong. Note this
953                          * probably does not hurt because the pacer
954                          * if its true is so far behind we will be
955                          * > 1second late calling anyway.
956                          */
957                         slot--;
958                 }
959                 inp->inp_hptsslot = last_slot;
960                 inp->inp_hpts_request = slot;
961         } else  if (maxslots >= slot) {
962                 /* It all fits on the wheel */
963                 inp->inp_hpts_request = 0;
964                 inp->inp_hptsslot = hpts_slot(wheel_slot, slot);
965         } else {
966                 /* It does not fit */
967                 inp->inp_hpts_request = slot - maxslots;
968                 inp->inp_hptsslot = last_slot;
969         }
970         if (diag) {
971                 diag->slot_remaining = inp->inp_hpts_request;
972                 diag->inp_hptsslot = inp->inp_hptsslot;
973         }
974 #ifdef INVARIANTS
975         check_if_slot_would_be_wrong(hpts, inp, inp->inp_hptsslot, line);
976 #endif
977         hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, 0);
978         if ((hpts->p_hpts_active == 0) &&
979             (inp->inp_hpts_request == 0) &&
980             (hpts->p_on_min_sleep == 0)) {
981                 /*
982                  * The hpts is sleeping and NOT on a minimum
983                  * sleep time, we need to figure out where
984                  * it will wake up at and if we need to reschedule
985                  * its time-out.
986                  */
987                 uint32_t have_slept, yet_to_sleep;
988
989                 /* Now do we need to restart the hpts's timer? */
990                 have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
991                 if (have_slept < hpts->p_hpts_sleep_time)
992                         yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
993                 else {
994                         /* We are over-due */
995                         yet_to_sleep = 0;
996                         need_wakeup = 1;
997                 }
998                 if (diag) {
999                         diag->have_slept = have_slept;
1000                         diag->yet_to_sleep = yet_to_sleep;
1001                 }
1002                 if (yet_to_sleep &&
1003                     (yet_to_sleep > slot)) {
1004                         /*
1005                          * We need to reschedule the hpts's time-out.
1006                          */
1007                         hpts->p_hpts_sleep_time = slot;
1008                         need_new_to = slot * HPTS_TICKS_PER_SLOT;
1009                 }
1010         }
1011         /*
1012          * Now how far is the hpts sleeping to? if active is 1, its
1013          * up and ticking we do nothing, otherwise we may need to
1014          * reschedule its callout if need_new_to is set from above.
1015          */
1016         if (need_wakeup) {
1017                 hpts->p_direct_wake = 1;
1018                 tcp_wakehpts(hpts);
1019                 if (diag) {
1020                         diag->need_new_to = 0;
1021                         diag->co_ret = 0xffff0000;
1022                 }
1023         } else if (need_new_to) {
1024                 int32_t co_ret;
1025                 struct timeval tv;
1026                 sbintime_t sb;
1027
1028                 tv.tv_sec = 0;
1029                 tv.tv_usec = 0;
1030                 while (need_new_to > HPTS_USEC_IN_SEC) {
1031                         tv.tv_sec++;
1032                         need_new_to -= HPTS_USEC_IN_SEC;
1033                 }
1034                 tv.tv_usec = need_new_to;
1035                 sb = tvtosbt(tv);
1036                 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
1037                 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
1038                                               hpts_timeout_swi, hpts, cpu,
1039                                               (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1040                 if (diag) {
1041                         diag->need_new_to = need_new_to;
1042                         diag->co_ret = co_ret;
1043                 }
1044         }
1045 }
1046
1047 uint32_t
1048 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag)
1049 {
1050         struct tcp_hpts_entry *hpts;
1051         uint32_t slot_on;
1052         struct timeval tv;
1053
1054         /*
1055          * We now return the next-slot the hpts will be on, beyond its
1056          * current run (if up) or where it was when it stopped if it is
1057          * sleeping.
1058          */
1059         INP_WLOCK_ASSERT(inp);
1060         hpts = tcp_hpts_lock(inp);
1061         microuptime(&tv);
1062         tcp_hpts_insert_locked(hpts, inp, slot, line, diag, &tv);
1063         slot_on = hpts->p_nxt_slot;
1064         mtx_unlock(&hpts->p_mtx);
1065         return (slot_on);
1066 }
1067
1068 uint32_t
1069 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
1070         return (tcp_hpts_insert_diag(inp, slot, line, NULL));
1071 }
1072
1073 int
1074 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
1075 {
1076         int32_t retval = 0;
1077
1078         HPTS_MTX_ASSERT(hpts);
1079         if (inp->inp_in_input == 0) {
1080                 /* Ok we need to set it on the hpts in the current slot */
1081                 hpts_sane_input_insert(hpts, inp, line);
1082                 retval = 1;
1083                 if ((hpts->p_hpts_active == 0) &&
1084                     (hpts->p_on_min_sleep == 0)){
1085                         /*
1086                          * Activate the hpts if it is sleeping.
1087                          */
1088                         retval = 2;
1089                         hpts->p_direct_wake = 1;
1090                         tcp_wakehpts(hpts);
1091                 }
1092         } else if ((hpts->p_hpts_active == 0) &&
1093                    (hpts->p_on_min_sleep == 0)){
1094                 retval = 4;
1095                 hpts->p_direct_wake = 1;
1096                 tcp_wakehpts(hpts);
1097         }
1098         return (retval);
1099 }
1100
1101 int32_t
1102 __tcp_queue_to_input(struct inpcb *inp, int line)
1103 {
1104         struct tcp_hpts_entry *hpts;
1105         int32_t ret;
1106
1107         hpts = tcp_input_lock(inp);
1108         ret = __tcp_queue_to_input_locked(inp, hpts, line);
1109         mtx_unlock(&hpts->p_mtx);
1110         return (ret);
1111 }
1112
1113 void
1114 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1115 {
1116         struct tcp_hpts_entry *hpts;
1117         struct tcpcb *tp;
1118
1119         tp = intotcpcb(inp);
1120         hpts = tcp_input_lock(tp->t_inpcb);
1121         if (inp->inp_in_input == 0) {
1122                 /* Ok we need to set it on the hpts in the current slot */
1123                 hpts_sane_input_insert(hpts, inp, line);
1124                 if ((hpts->p_hpts_active == 0) &&
1125                     (hpts->p_on_min_sleep == 0)){
1126                         /*
1127                          * Activate the hpts if it is sleeping.
1128                          */
1129                         hpts->p_direct_wake = 1;
1130                         tcp_wakehpts(hpts);
1131                 }
1132         } else if ((hpts->p_hpts_active == 0) &&
1133                    (hpts->p_on_min_sleep == 0)){
1134                 hpts->p_direct_wake = 1;
1135                 tcp_wakehpts(hpts);
1136         }
1137         inp->inp_hpts_drop_reas = reason;
1138         mtx_unlock(&hpts->p_mtx);
1139 }
1140
1141 uint16_t
1142 hpts_random_cpu(struct inpcb *inp){
1143         /*
1144          * No flow type set distribute the load randomly.
1145          */
1146         uint16_t cpuid;
1147         uint32_t ran;
1148
1149         /*
1150          * If one has been set use it i.e. we want both in and out on the
1151          * same hpts.
1152          */
1153         if (inp->inp_input_cpu_set) {
1154                 return (inp->inp_input_cpu);
1155         } else if (inp->inp_hpts_cpu_set) {
1156                 return (inp->inp_hpts_cpu);
1157         }
1158         /* Nothing set use a random number */
1159         ran = arc4random();
1160         cpuid = (((ran & 0xffff) % mp_ncpus) % tcp_pace.rp_num_hptss);
1161         return (cpuid);
1162 }
1163
1164 static uint16_t
1165 hpts_cpuid(struct inpcb *inp, int *failed)
1166 {
1167         u_int cpuid;
1168 #if !defined(RSS) && defined(NUMA)
1169         struct hpts_domain_info *di;
1170 #endif
1171
1172         *failed = 0;
1173         /*
1174          * If one has been set use it i.e. we want both in and out on the
1175          * same hpts.
1176          */
1177         if (inp->inp_input_cpu_set) {
1178                 return (inp->inp_input_cpu);
1179         } else if (inp->inp_hpts_cpu_set) {
1180                 return (inp->inp_hpts_cpu);
1181         }
1182         /*
1183          * If we are using the irq cpu set by LRO or
1184          * the driver then it overrides all other domains.
1185          */
1186         if (tcp_use_irq_cpu) {
1187                 if (inp->inp_irq_cpu_set == 0) {
1188                         *failed = 1;
1189                         return(0);
1190                 }
1191                 return(inp->inp_irq_cpu);
1192         }
1193         /* If one is set the other must be the same */
1194 #ifdef RSS
1195         cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1196         if (cpuid == NETISR_CPUID_NONE)
1197                 return (hpts_random_cpu(inp));
1198         else
1199                 return (cpuid);
1200 #else
1201         /*
1202          * We don't have a flowid -> cpuid mapping, so cheat and just map
1203          * unknown cpuids to curcpu.  Not the best, but apparently better
1204          * than defaulting to swi 0.
1205          */
1206         if (inp->inp_flowtype == M_HASHTYPE_NONE) {
1207                 counter_u64_add(cpu_uses_random, 1);
1208                 return (hpts_random_cpu(inp));
1209         }
1210         /*
1211          * Hash to a thread based on the flowid.  If we are using numa,
1212          * then restrict the hash to the numa domain where the inp lives.
1213          */
1214 #ifdef NUMA
1215         if (tcp_bind_threads == 2 && inp->inp_numa_domain != M_NODOM) {
1216                 di = &hpts_domains[inp->inp_numa_domain];
1217                 cpuid = di->cpu[inp->inp_flowid % di->count];
1218         } else
1219 #endif
1220                 cpuid = inp->inp_flowid % mp_ncpus;
1221         counter_u64_add(cpu_uses_flowid, 1);
1222         return (cpuid);
1223 #endif
1224 }
1225
1226 static void
1227 tcp_drop_in_pkts(struct tcpcb *tp)
1228 {
1229         struct mbuf *m, *n;
1230
1231         m = tp->t_in_pkt;
1232         if (m)
1233                 n = m->m_nextpkt;
1234         else
1235                 n = NULL;
1236         tp->t_in_pkt = NULL;
1237         while (m) {
1238                 m_freem(m);
1239                 m = n;
1240                 if (m)
1241                         n = m->m_nextpkt;
1242         }
1243 }
1244
1245 /*
1246  * Do NOT try to optimize the processing of inp's
1247  * by first pulling off all the inp's into a temporary
1248  * list (e.g. TAILQ_CONCAT). If you do that the subtle
1249  * interactions of switching CPU's will kill because of
1250  * problems in the linked list manipulation. Basically
1251  * you would switch cpu's with the hpts mutex locked
1252  * but then while you were processing one of the inp's
1253  * some other one that you switch will get a new
1254  * packet on the different CPU. It will insert it
1255  * on the new hpts's input list. Creating a temporary
1256  * link in the inp will not fix it either, since
1257  * the other hpts will be doing the same thing and
1258  * you will both end up using the temporary link.
1259  *
1260  * You will die in an ASSERT for tailq corruption if you
1261  * run INVARIANTS or you will die horribly without
1262  * INVARIANTS in some unknown way with a corrupt linked
1263  * list.
1264  */
1265 static void
1266 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1267 {
1268         struct tcpcb *tp;
1269         struct inpcb *inp;
1270         uint16_t drop_reason;
1271         int16_t set_cpu;
1272         uint32_t did_prefetch = 0;
1273         int dropped;
1274
1275         HPTS_MTX_ASSERT(hpts);
1276         NET_EPOCH_ASSERT();
1277
1278         while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1279                 HPTS_MTX_ASSERT(hpts);
1280                 hpts_sane_input_remove(hpts, inp, 0);
1281                 if (inp->inp_input_cpu_set == 0) {
1282                         set_cpu = 1;
1283                 } else {
1284                         set_cpu = 0;
1285                 }
1286                 hpts->p_inp = inp;
1287                 drop_reason = inp->inp_hpts_drop_reas;
1288                 inp->inp_in_input = 0;
1289                 mtx_unlock(&hpts->p_mtx);
1290                 INP_WLOCK(inp);
1291 #ifdef VIMAGE
1292                 CURVNET_SET(inp->inp_vnet);
1293 #endif
1294                 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1295                     (inp->inp_flags2 & INP_FREED)) {
1296 out:
1297                         hpts->p_inp = NULL;
1298                         if (in_pcbrele_wlocked(inp) == 0) {
1299                                 INP_WUNLOCK(inp);
1300                         }
1301 #ifdef VIMAGE
1302                         CURVNET_RESTORE();
1303 #endif
1304                         mtx_lock(&hpts->p_mtx);
1305                         continue;
1306                 }
1307                 tp = intotcpcb(inp);
1308                 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1309                         goto out;
1310                 }
1311                 if (drop_reason) {
1312                         /* This tcb is being destroyed for drop_reason */
1313                         tcp_drop_in_pkts(tp);
1314                         tp = tcp_drop(tp, drop_reason);
1315                         if (tp == NULL) {
1316                                 INP_WLOCK(inp);
1317                         }
1318                         if (in_pcbrele_wlocked(inp) == 0)
1319                                 INP_WUNLOCK(inp);
1320 #ifdef VIMAGE
1321                         CURVNET_RESTORE();
1322 #endif
1323                         mtx_lock(&hpts->p_mtx);
1324                         continue;
1325                 }
1326                 if (set_cpu) {
1327                         /*
1328                          * Setup so the next time we will move to the right
1329                          * CPU. This should be a rare event. It will
1330                          * sometimes happens when we are the client side
1331                          * (usually not the server). Somehow tcp_output()
1332                          * gets called before the tcp_do_segment() sets the
1333                          * intial state. This means the r_cpu and r_hpts_cpu
1334                          * is 0. We get on the hpts, and then tcp_input()
1335                          * gets called setting up the r_cpu to the correct
1336                          * value. The hpts goes off and sees the mis-match.
1337                          * We simply correct it here and the CPU will switch
1338                          * to the new hpts nextime the tcb gets added to the
1339                          * the hpts (not this time) :-)
1340                          */
1341                         tcp_set_hpts(inp);
1342                 }
1343                 if (tp->t_fb_ptr != NULL) {
1344                         kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1345                         did_prefetch = 1;
1346                 }
1347                 if ((tp->t_fb->tfb_do_queued_segments != NULL) && tp->t_in_pkt) {
1348                         if (inp->inp_in_input)
1349                                 tcp_hpts_remove(inp, HPTS_REMOVE_INPUT);
1350                         dropped = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1351                         if (dropped) {
1352                                 /* Re-acquire the wlock so we can release the reference */
1353                                 INP_WLOCK(inp);
1354                         }
1355                 } else if (tp->t_in_pkt) {
1356                         /*
1357                          * We reach here only if we had a
1358                          * stack that supported INP_SUPPORTS_MBUFQ
1359                          * and then somehow switched to a stack that
1360                          * does not. The packets are basically stranded
1361                          * and would hang with the connection until
1362                          * cleanup without this code. Its not the
1363                          * best way but I know of no other way to
1364                          * handle it since the stack needs functions
1365                          * it does not have to handle queued packets.
1366                          */
1367                         tcp_drop_in_pkts(tp);
1368                 }
1369                 if (in_pcbrele_wlocked(inp) == 0)
1370                         INP_WUNLOCK(inp);
1371                 INP_UNLOCK_ASSERT(inp);
1372 #ifdef VIMAGE
1373                 CURVNET_RESTORE();
1374 #endif
1375                 mtx_lock(&hpts->p_mtx);
1376                 hpts->p_inp = NULL;
1377         }
1378 }
1379
1380 static void
1381 tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
1382 {
1383         uint32_t t = 0, i, fnd = 0;
1384
1385         if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
1386                 /*
1387                  * Find next slot that is occupied and use that to
1388                  * be the sleep time.
1389                  */
1390                 for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
1391                         if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1392                                 fnd = 1;
1393                                 break;
1394                         }
1395                         t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1396                 }
1397                 KASSERT(fnd != 0, ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
1398                 hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
1399         } else {
1400                 /* No one on the wheel sleep for all but 400 slots or sleep max  */
1401                 hpts->p_hpts_sleep_time = hpts_sleep_max;
1402         }
1403 }
1404
1405 static int32_t
1406 tcp_hptsi(struct tcp_hpts_entry *hpts, int from_callout)
1407 {
1408         struct tcpcb *tp;
1409         struct inpcb *inp = NULL, *ninp;
1410         struct timeval tv;
1411         uint64_t total_slots_processed = 0;
1412         int32_t slots_to_run, i, error;
1413         int32_t paced_cnt = 0;
1414         int32_t loop_cnt = 0;
1415         int32_t did_prefetch = 0;
1416         int32_t prefetch_ninp = 0;
1417         int32_t prefetch_tp = 0;
1418         int32_t wrap_loop_cnt = 0;
1419         int32_t slot_pos_of_endpoint = 0;
1420         int32_t orig_exit_slot;
1421         int16_t set_cpu;
1422         int8_t completed_measure = 0, seen_endpoint = 0;
1423
1424         HPTS_MTX_ASSERT(hpts);
1425         NET_EPOCH_ASSERT();
1426         /* record previous info for any logging */
1427         hpts->saved_lasttick = hpts->p_lasttick;
1428         hpts->saved_curtick = hpts->p_curtick;
1429         hpts->saved_curslot = hpts->p_cur_slot;
1430         hpts->saved_prev_slot = hpts->p_prev_slot;
1431
1432         hpts->p_lasttick = hpts->p_curtick;
1433         hpts->p_curtick = tcp_gethptstick(&tv);
1434         cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1435         orig_exit_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1436         if ((hpts->p_on_queue_cnt == 0) ||
1437             (hpts->p_lasttick == hpts->p_curtick)) {
1438                 /*
1439                  * No time has yet passed,
1440                  * or nothing to do.
1441                  */
1442                 hpts->p_prev_slot = hpts->p_cur_slot;
1443                 hpts->p_lasttick = hpts->p_curtick;
1444                 goto no_run;
1445         }
1446 again:
1447         hpts->p_wheel_complete = 0;
1448         HPTS_MTX_ASSERT(hpts);
1449         slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
1450         if (((hpts->p_curtick - hpts->p_lasttick) >
1451              ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) &&
1452             (hpts->p_on_queue_cnt != 0)) {
1453                 /*
1454                  * Wheel wrap is occuring, basically we
1455                  * are behind and the distance between
1456                  * run's has spread so much it has exceeded
1457                  * the time on the wheel (1.024 seconds). This
1458                  * is ugly and should NOT be happening. We
1459                  * need to run the entire wheel. We last processed
1460                  * p_prev_slot, so that needs to be the last slot
1461                  * we run. The next slot after that should be our
1462                  * reserved first slot for new, and then starts
1463                  * the running postion. Now the problem is the
1464                  * reserved "not to yet" place does not exist
1465                  * and there may be inp's in there that need
1466                  * running. We can merge those into the
1467                  * first slot at the head.
1468                  */
1469                 wrap_loop_cnt++;
1470                 hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
1471                 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
1472                 /*
1473                  * Adjust p_cur_slot to be where we are starting from
1474                  * hopefully we will catch up (fat chance if something
1475                  * is broken this bad :( )
1476                  */
1477                 hpts->p_cur_slot = hpts->p_prev_slot;
1478                 /*
1479                  * The next slot has guys to run too, and that would
1480                  * be where we would normally start, lets move them into
1481                  * the next slot (p_prev_slot + 2) so that we will
1482                  * run them, the extra 10usecs of late (by being
1483                  * put behind) does not really matter in this situation.
1484                  */
1485 #ifdef INVARIANTS
1486                 /*
1487                  * To prevent a panic we need to update the inpslot to the
1488                  * new location. This is safe since it takes both the
1489                  * INP lock and the pacer mutex to change the inp_hptsslot.
1490                  */
1491                 TAILQ_FOREACH(inp, &hpts->p_hptss[hpts->p_nxt_slot], inp_hpts) {
1492                         inp->inp_hptsslot = hpts->p_runningslot;
1493                 }
1494 #endif
1495                 TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot],
1496                              &hpts->p_hptss[hpts->p_nxt_slot], inp_hpts);
1497                 slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
1498                 counter_u64_add(wheel_wrap, 1);
1499         } else {
1500                 /*
1501                  * Nxt slot is always one after p_runningslot though
1502                  * its not used usually unless we are doing wheel wrap.
1503                  */
1504                 hpts->p_nxt_slot = hpts->p_prev_slot;
1505                 hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
1506         }
1507         KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
1508                  ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
1509                 ("%s hpts:%p in_hpts cnt:%d and queue state mismatch",
1510                  __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
1511         HPTS_MTX_ASSERT(hpts);
1512         if (hpts->p_on_queue_cnt == 0) {
1513                 goto no_one;
1514         }
1515         HPTS_MTX_ASSERT(hpts);
1516         for (i = 0; i < slots_to_run; i++) {
1517                 /*
1518                  * Calculate our delay, if there are no extra ticks there
1519                  * was not any (i.e. if slots_to_run == 1, no delay).
1520                  */
1521                 hpts->p_delayed_by = (slots_to_run - (i + 1)) * HPTS_TICKS_PER_SLOT;
1522                 HPTS_MTX_ASSERT(hpts);
1523                 while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_runningslot])) != NULL) {
1524                         HPTS_MTX_ASSERT(hpts);
1525                         /* For debugging */
1526                         if (seen_endpoint == 0) {
1527                                 seen_endpoint = 1;
1528                                 orig_exit_slot = slot_pos_of_endpoint = hpts->p_runningslot;
1529                         } else if (completed_measure == 0) {
1530                                 /* Record the new position */
1531                                 orig_exit_slot = hpts->p_runningslot;
1532                         }
1533                         total_slots_processed++;
1534                         hpts->p_inp = inp;
1535                         paced_cnt++;
1536                         KASSERT(hpts->p_runningslot == inp->inp_hptsslot,
1537                                 ("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1538                                  hpts, inp, hpts->p_runningslot, inp->inp_hptsslot));
1539                         /* Now pull it */
1540                         if (inp->inp_hpts_cpu_set == 0) {
1541                                 set_cpu = 1;
1542                         } else {
1543                                 set_cpu = 0;
1544                         }
1545                         hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_runningslot], 0);
1546                         if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_runningslot])) != NULL) {
1547                                 /* We prefetch the next inp if possible */
1548                                 kern_prefetch(ninp, &prefetch_ninp);
1549                                 prefetch_ninp = 1;
1550                         }
1551                         if (inp->inp_hpts_request) {
1552                                 /*
1553                                  * This guy is deferred out further in time
1554                                  * then our wheel had available on it.
1555                                  * Push him back on the wheel or run it
1556                                  * depending.
1557                                  */
1558                                 uint32_t maxslots, last_slot, remaining_slots;
1559
1560                                 remaining_slots = slots_to_run - (i + 1);
1561                                 if (inp->inp_hpts_request > remaining_slots) {
1562                                         /*
1563                                          * How far out can we go?
1564                                          */
1565                                         maxslots = max_slots_available(hpts, hpts->p_cur_slot, &last_slot);
1566                                         if (maxslots >= inp->inp_hpts_request) {
1567                                                 /* we can place it finally to be processed  */
1568                                                 inp->inp_hptsslot = hpts_slot(hpts->p_runningslot, inp->inp_hpts_request);
1569                                                 inp->inp_hpts_request = 0;
1570                                         } else {
1571                                                 /* Work off some more time */
1572                                                 inp->inp_hptsslot = last_slot;
1573                                                 inp->inp_hpts_request-= maxslots;
1574                                         }
1575                                         hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], __LINE__, 1);
1576                                         hpts->p_inp = NULL;
1577                                         continue;
1578                                 }
1579                                 inp->inp_hpts_request = 0;
1580                                 /* Fall through we will so do it now */
1581                         }
1582                         /*
1583                          * We clear the hpts flag here after dealing with
1584                          * remaining slots. This way anyone looking with the
1585                          * TCB lock will see its on the hpts until just
1586                          * before we unlock.
1587                          */
1588                         inp->inp_in_hpts = 0;
1589                         mtx_unlock(&hpts->p_mtx);
1590                         INP_WLOCK(inp);
1591                         if (in_pcbrele_wlocked(inp)) {
1592                                 mtx_lock(&hpts->p_mtx);
1593                                 hpts->p_inp = NULL;
1594                                 continue;
1595                         }
1596                         if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1597                             (inp->inp_flags2 & INP_FREED)) {
1598                         out_now:
1599                                 KASSERT(mtx_owned(&hpts->p_mtx) == 0,
1600                                         ("Hpts:%p owns mtx prior-to lock line:%d",
1601                                          hpts, __LINE__));
1602                                 INP_WUNLOCK(inp);
1603                                 mtx_lock(&hpts->p_mtx);
1604                                 hpts->p_inp = NULL;
1605                                 continue;
1606                         }
1607                         tp = intotcpcb(inp);
1608                         if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1609                                 goto out_now;
1610                         }
1611                         if (set_cpu) {
1612                                 /*
1613                                  * Setup so the next time we will move to
1614                                  * the right CPU. This should be a rare
1615                                  * event. It will sometimes happens when we
1616                                  * are the client side (usually not the
1617                                  * server). Somehow tcp_output() gets called
1618                                  * before the tcp_do_segment() sets the
1619                                  * intial state. This means the r_cpu and
1620                                  * r_hpts_cpu is 0. We get on the hpts, and
1621                                  * then tcp_input() gets called setting up
1622                                  * the r_cpu to the correct value. The hpts
1623                                  * goes off and sees the mis-match. We
1624                                  * simply correct it here and the CPU will
1625                                  * switch to the new hpts nextime the tcb
1626                                  * gets added to the the hpts (not this one)
1627                                  * :-)
1628                                  */
1629                                 tcp_set_hpts(inp);
1630                         }
1631 #ifdef VIMAGE
1632                         CURVNET_SET(inp->inp_vnet);
1633 #endif
1634                         /* Lets do any logging that we might want to */
1635                         if (hpts_does_tp_logging && (tp->t_logstate != TCP_LOG_STATE_OFF)) {
1636                                 tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
1637                         }
1638                         /*
1639                          * There is a hole here, we get the refcnt on the
1640                          * inp so it will still be preserved but to make
1641                          * sure we can get the INP we need to hold the p_mtx
1642                          * above while we pull out the tp/inp,  as long as
1643                          * fini gets the lock first we are assured of having
1644                          * a sane INP we can lock and test.
1645                          */
1646                         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
1647                                 ("Hpts:%p owns mtx prior-to tcp_output call line:%d",
1648                                  hpts, __LINE__));
1649
1650                         if (tp->t_fb_ptr != NULL) {
1651                                 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1652                                 did_prefetch = 1;
1653                         }
1654                         if ((inp->inp_flags2 & INP_SUPPORTS_MBUFQ) && tp->t_in_pkt) {
1655                                 error = (*tp->t_fb->tfb_do_queued_segments)(inp->inp_socket, tp, 0);
1656                                 if (error) {
1657                                         /* The input killed the connection */
1658                                         goto skip_pacing;
1659                                 }
1660                         }
1661                         inp->inp_hpts_calls = 1;
1662                         error = tp->t_fb->tfb_tcp_output(tp);
1663                         inp->inp_hpts_calls = 0;
1664                         if (ninp && ninp->inp_ppcb) {
1665                                 /*
1666                                  * If we have a nxt inp, see if we can
1667                                  * prefetch its ppcb. Note this may seem
1668                                  * "risky" since we have no locks (other
1669                                  * than the previous inp) and there no
1670                                  * assurance that ninp was not pulled while
1671                                  * we were processing inp and freed. If this
1672                                  * occured it could mean that either:
1673                                  *
1674                                  * a) Its NULL (which is fine we won't go
1675                                  * here) <or> b) Its valid (which is cool we
1676                                  * will prefetch it) <or> c) The inp got
1677                                  * freed back to the slab which was
1678                                  * reallocated. Then the piece of memory was
1679                                  * re-used and something else (not an
1680                                  * address) is in inp_ppcb. If that occurs
1681                                  * we don't crash, but take a TLB shootdown
1682                                  * performance hit (same as if it was NULL
1683                                  * and we tried to pre-fetch it).
1684                                  *
1685                                  * Considering that the likelyhood of <c> is
1686                                  * quite rare we will take a risk on doing
1687                                  * this. If performance drops after testing
1688                                  * we can always take this out. NB: the
1689                                  * kern_prefetch on amd64 actually has
1690                                  * protection against a bad address now via
1691                                  * the DMAP_() tests. This will prevent the
1692                                  * TLB hit, and instead if <c> occurs just
1693                                  * cause us to load cache with a useless
1694                                  * address (to us).
1695                                  */
1696                                 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1697                                 prefetch_tp = 1;
1698                         }
1699                         INP_WUNLOCK(inp);
1700                 skip_pacing:
1701 #ifdef VIMAGE
1702                         CURVNET_RESTORE();
1703 #endif
1704                         INP_UNLOCK_ASSERT(inp);
1705                         KASSERT(mtx_owned(&hpts->p_mtx) == 0,
1706                                 ("Hpts:%p owns mtx prior-to lock line:%d",
1707                                  hpts, __LINE__));
1708                         mtx_lock(&hpts->p_mtx);
1709                         hpts->p_inp = NULL;
1710                 }
1711                 if (seen_endpoint) {
1712                         /*
1713                          * We now have a accurate distance between
1714                          * slot_pos_of_endpoint <-> orig_exit_slot
1715                          * to tell us how late we were, orig_exit_slot
1716                          * is where we calculated the end of our cycle to
1717                          * be when we first entered.
1718                          */
1719                         completed_measure = 1;
1720                 }
1721                 HPTS_MTX_ASSERT(hpts);
1722                 hpts->p_inp = NULL;
1723                 hpts->p_runningslot++;
1724                 if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
1725                         hpts->p_runningslot = 0;
1726                 }
1727         }
1728 no_one:
1729         HPTS_MTX_ASSERT(hpts);
1730         hpts->p_delayed_by = 0;
1731         /*
1732          * Check to see if we took an excess amount of time and need to run
1733          * more ticks (if we did not hit eno-bufs).
1734          */
1735         KASSERT((((TAILQ_EMPTY(&hpts->p_input) != 0) && (hpts->p_on_inqueue_cnt == 0)) ||
1736                  ((TAILQ_EMPTY(&hpts->p_input) == 0) && (hpts->p_on_inqueue_cnt > 0))),
1737                 ("%s hpts:%p in_hpts cnt:%d queue state mismatch",
1738                  __FUNCTION__, hpts, hpts->p_on_inqueue_cnt));
1739         hpts->p_prev_slot = hpts->p_cur_slot;
1740         hpts->p_lasttick = hpts->p_curtick;
1741         if ((from_callout == 0) || (loop_cnt > max_pacer_loops)) {
1742                 /*
1743                  * Something is serious slow we have
1744                  * looped through processing the wheel
1745                  * and by the time we cleared the
1746                  * needs to run max_pacer_loops time
1747                  * we still needed to run. That means
1748                  * the system is hopelessly behind and
1749                  * can never catch up :(
1750                  *
1751                  * We will just lie to this thread
1752                  * and let it thing p_curtick is
1753                  * correct. When it next awakens
1754                  * it will find itself further behind.
1755                  */
1756                 if (from_callout)
1757                         counter_u64_add(hpts_hopelessly_behind, 1);
1758                 goto no_run;
1759         }
1760         hpts->p_curtick = tcp_gethptstick(&tv);
1761         hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1762         if (seen_endpoint == 0) {
1763                 /* We saw no endpoint but we may be looping */
1764                 orig_exit_slot = hpts->p_cur_slot;
1765         }
1766         if ((wrap_loop_cnt < 2) &&
1767             (hpts->p_lasttick != hpts->p_curtick)) {
1768                 counter_u64_add(hpts_loops, 1);
1769                 loop_cnt++;
1770                 goto again;
1771         }
1772 no_run:
1773         cts_last_ran[hpts->p_num] = tcp_tv_to_usectick(&tv);
1774         /*
1775          * Set flag to tell that we are done for
1776          * any slot input that happens during
1777          * input.
1778          */
1779         hpts->p_wheel_complete = 1;
1780         /*
1781          * Run any input that may be there not covered
1782          * in running data.
1783          */
1784         if (!TAILQ_EMPTY(&hpts->p_input)) {
1785                 tcp_input_data(hpts, &tv);
1786                 /*
1787                  * Now did we spend too long running input and need to run more ticks?
1788                  * Note that if wrap_loop_cnt < 2 then we should have the conditions
1789                  * in the KASSERT's true. But if the wheel is behind i.e. wrap_loop_cnt
1790                  * is greater than 2, then the condtion most likely are *not* true. Also
1791                  * if we are called not from the callout, we don't run the wheel multiple
1792                  * times so the slots may not align either.
1793                  */
1794                 KASSERT(((hpts->p_prev_slot == hpts->p_cur_slot) ||
1795                          (wrap_loop_cnt >= 2) || (from_callout == 0)),
1796                         ("H:%p p_prev_slot:%u not equal to p_cur_slot:%u", hpts,
1797                          hpts->p_prev_slot, hpts->p_cur_slot));
1798                 KASSERT(((hpts->p_lasttick == hpts->p_curtick)
1799                          || (wrap_loop_cnt >= 2) || (from_callout == 0)),
1800                         ("H:%p p_lasttick:%u not equal to p_curtick:%u", hpts,
1801                          hpts->p_lasttick, hpts->p_curtick));
1802                 if (from_callout && (hpts->p_lasttick != hpts->p_curtick)) {
1803                         hpts->p_curtick = tcp_gethptstick(&tv);
1804                         counter_u64_add(hpts_loops, 1);
1805                         hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
1806                         goto again;
1807                 }
1808         }
1809         if (from_callout){
1810                 tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
1811         }
1812         if (seen_endpoint)
1813                 return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
1814         else
1815                 return (0);
1816 }
1817
1818 void
1819 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1820 {
1821         struct tcp_hpts_entry *hpts;
1822         int failed;
1823
1824         INP_WLOCK_ASSERT(inp);
1825         hpts = tcp_hpts_lock(inp);
1826         if ((inp->inp_in_hpts == 0) &&
1827             (inp->inp_hpts_cpu_set == 0)) {
1828                 inp->inp_hpts_cpu = hpts_cpuid(inp, &failed);
1829                 if (failed == 0)
1830                         inp->inp_hpts_cpu_set = 1;
1831         }
1832         mtx_unlock(&hpts->p_mtx);
1833         hpts = tcp_input_lock(inp);
1834         if ((inp->inp_input_cpu_set == 0) &&
1835             (inp->inp_in_input == 0)) {
1836                 inp->inp_input_cpu = hpts_cpuid(inp, &failed);
1837                 if (failed == 0)
1838                         inp->inp_input_cpu_set = 1;
1839         }
1840         mtx_unlock(&hpts->p_mtx);
1841 }
1842
1843 uint16_t
1844 tcp_hpts_delayedby(struct inpcb *inp){
1845         return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1846 }
1847
1848 static void
1849 __tcp_run_hpts(struct tcp_hpts_entry *hpts)
1850 {
1851         int ticks_ran;
1852
1853         if (hpts->p_hpts_active) {
1854                 /* Already active */
1855                 return;
1856         }
1857         if (mtx_trylock(&hpts->p_mtx) == 0) {
1858                 /* Someone else got the lock */
1859                 return;
1860         }
1861         if (hpts->p_hpts_active)
1862                 goto out_with_mtx;
1863         hpts->syscall_cnt++;
1864         counter_u64_add(hpts_direct_call, 1);
1865         hpts->p_hpts_active = 1;
1866         ticks_ran = tcp_hptsi(hpts, 0);
1867         /* We may want to adjust the sleep values here */
1868         if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
1869                 if (ticks_ran > ticks_indicate_less_sleep) {
1870                         struct timeval tv;
1871                         sbintime_t sb;
1872                         int cpu;
1873
1874                         hpts->p_mysleep.tv_usec /= 2;
1875                         if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
1876                                 hpts->p_mysleep.tv_usec = dynamic_min_sleep;
1877                         /* Reschedule with new to value */
1878                         tcp_hpts_set_max_sleep(hpts, 0);
1879                         tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
1880                         /* Validate its in the right ranges */
1881                         if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
1882                                 hpts->overidden_sleep = tv.tv_usec;
1883                                 tv.tv_usec = hpts->p_mysleep.tv_usec;
1884                         } else if (tv.tv_usec > dynamic_max_sleep) {
1885                                 /* Lets not let sleep get above this value */
1886                                 hpts->overidden_sleep = tv.tv_usec;
1887                                 tv.tv_usec = dynamic_max_sleep;
1888                         }
1889                         /*
1890                          * In this mode the timer is a backstop to
1891                          * all the userret/lro_flushes so we use
1892                          * the dynamic value and set the on_min_sleep
1893                          * flag so we will not be awoken.
1894                          */
1895                         sb = tvtosbt(tv);
1896                         cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
1897                         /* Store off to make visible the actual sleep time */
1898                         hpts->sleeping = tv.tv_usec;
1899                         callout_reset_sbt_on(&hpts->co, sb, 0,
1900                                              hpts_timeout_swi, hpts, cpu,
1901                                              (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1902                 } else if (ticks_ran < ticks_indicate_more_sleep) {
1903                         /* For the further sleep, don't reschedule  hpts */
1904                         hpts->p_mysleep.tv_usec *= 2;
1905                         if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
1906                                 hpts->p_mysleep.tv_usec = dynamic_max_sleep;
1907                 }
1908                 hpts->p_on_min_sleep = 1;
1909         }
1910         hpts->p_hpts_active = 0;
1911 out_with_mtx:
1912         HPTS_MTX_ASSERT(hpts);
1913         mtx_unlock(&hpts->p_mtx);
1914 }
1915
1916 static struct tcp_hpts_entry *
1917 tcp_choose_hpts_to_run()
1918 {
1919         int i, oldest_idx;
1920         uint32_t cts, time_since_ran, calc;
1921
1922         if ((hpts_uses_oldest == 0) ||
1923             ((hpts_uses_oldest > 1) &&
1924              (tcp_pace.rp_ent[(tcp_pace.rp_num_hptss-1)]->p_on_queue_cnt >= hpts_uses_oldest))) {
1925                 /*
1926                  * We have either disabled the feature (0), or
1927                  * we have crossed over the oldest threshold on the
1928                  * last hpts. We use the last one for simplification
1929                  * since we don't want to use the first one (it may
1930                  * have starting connections that have not settled
1931                  * on the cpu yet).
1932                  */
1933                 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1934         }
1935         /* Lets find the oldest hpts to attempt to run */
1936         cts = tcp_get_usecs(NULL);
1937         time_since_ran = 0;
1938         oldest_idx = -1;
1939         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1940                 if (TSTMP_GT(cts, cts_last_ran[i]))
1941                         calc = cts - cts_last_ran[i];
1942                 else
1943                         calc = 0;
1944                 if (calc > time_since_ran) {
1945                         oldest_idx = i;
1946                         time_since_ran = calc;
1947                 }
1948         }
1949         if (oldest_idx >= 0)
1950                 return(tcp_pace.rp_ent[oldest_idx]);
1951         else
1952                 return(tcp_pace.rp_ent[(curcpu % tcp_pace.rp_num_hptss)]);
1953 }
1954
1955
1956 void
1957 tcp_run_hpts(void)
1958 {
1959         static struct tcp_hpts_entry *hpts;
1960         struct epoch_tracker et;
1961
1962         NET_EPOCH_ENTER(et);
1963         hpts = tcp_choose_hpts_to_run();
1964         __tcp_run_hpts(hpts);
1965         NET_EPOCH_EXIT(et);
1966 }
1967
1968
1969 static void
1970 tcp_hpts_thread(void *ctx)
1971 {
1972         struct tcp_hpts_entry *hpts;
1973         struct epoch_tracker et;
1974         struct timeval tv;
1975         sbintime_t sb;
1976         int cpu, ticks_ran;
1977
1978         hpts = (struct tcp_hpts_entry *)ctx;
1979         mtx_lock(&hpts->p_mtx);
1980         if (hpts->p_direct_wake) {
1981                 /* Signaled by input or output with low occupancy count. */
1982                 callout_stop(&hpts->co);
1983                 counter_u64_add(hpts_direct_awakening, 1);
1984         } else {
1985                 /* Timed out, the normal case. */
1986                 counter_u64_add(hpts_wake_timeout, 1);
1987                 if (callout_pending(&hpts->co) ||
1988                     !callout_active(&hpts->co)) {
1989                         mtx_unlock(&hpts->p_mtx);
1990                         return;
1991                 }
1992         }
1993         callout_deactivate(&hpts->co);
1994         hpts->p_hpts_wake_scheduled = 0;
1995         NET_EPOCH_ENTER(et);
1996         if (hpts->p_hpts_active) {
1997                 /*
1998                  * We are active already. This means that a syscall
1999                  * trap or LRO is running in behalf of hpts. In that case
2000                  * we need to double our timeout since there seems to be
2001                  * enough activity in the system that we don't need to
2002                  * run as often (if we were not directly woken).
2003                  */
2004                 if (hpts->p_direct_wake == 0) {
2005                         counter_u64_add(hpts_back_tosleep, 1);
2006                         if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
2007                                 hpts->p_mysleep.tv_usec *= 2;
2008                                 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
2009                                         hpts->p_mysleep.tv_usec = dynamic_max_sleep;
2010                                 tv.tv_usec = hpts->p_mysleep.tv_usec;
2011                                 hpts->p_on_min_sleep = 1;
2012                         } else {
2013                                 /*
2014                                  * Here we have low count on the wheel, but
2015                                  * somehow we still collided with one of the
2016                                  * connections. Lets go back to sleep for a
2017                                  * min sleep time, but clear the flag so we
2018                                  * can be awoken by insert.
2019                                  */
2020                                 hpts->p_on_min_sleep = 0;
2021                                 tv.tv_usec = tcp_min_hptsi_time;
2022                         }
2023                 } else {
2024                         /*
2025                          * Directly woken most likely to reset the
2026                          * callout time.
2027                          */
2028                         tv.tv_sec = 0;
2029                         tv.tv_usec = hpts->p_mysleep.tv_usec;
2030                 }
2031                 goto back_to_sleep;
2032         }
2033         hpts->sleeping = 0;
2034         hpts->p_hpts_active = 1;
2035         ticks_ran = tcp_hptsi(hpts, 1);
2036         tv.tv_sec = 0;
2037         tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2038         if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
2039                 if(hpts->p_direct_wake == 0) {
2040                         /*
2041                          * Only adjust sleep time if we were
2042                          * called from the callout i.e. direct_wake == 0.
2043                          */
2044                         if (ticks_ran < ticks_indicate_more_sleep) {
2045                                 hpts->p_mysleep.tv_usec *= 2;
2046                                 if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
2047                                         hpts->p_mysleep.tv_usec = dynamic_max_sleep;
2048                         } else if (ticks_ran > ticks_indicate_less_sleep) {
2049                                 hpts->p_mysleep.tv_usec /= 2;
2050                                 if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
2051                                         hpts->p_mysleep.tv_usec = dynamic_min_sleep;
2052                         }
2053                 }
2054                 if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
2055                         hpts->overidden_sleep = tv.tv_usec;
2056                         tv.tv_usec = hpts->p_mysleep.tv_usec;
2057                 } else if (tv.tv_usec > dynamic_max_sleep) {
2058                         /* Lets not let sleep get above this value */
2059                         hpts->overidden_sleep = tv.tv_usec;
2060                         tv.tv_usec = dynamic_max_sleep;
2061                 }
2062                 /*
2063                  * In this mode the timer is a backstop to
2064                  * all the userret/lro_flushes so we use
2065                  * the dynamic value and set the on_min_sleep
2066                  * flag so we will not be awoken.
2067                  */
2068                 hpts->p_on_min_sleep = 1;
2069         } else if (hpts->p_on_queue_cnt == 0)  {
2070                 /*
2071                  * No one on the wheel, please wake us up
2072                  * if you insert on the wheel.
2073                  */
2074                 hpts->p_on_min_sleep = 0;
2075                 hpts->overidden_sleep = 0;
2076         } else {
2077                 /*
2078                  * We hit here when we have a low number of
2079                  * clients on the wheel (our else clause).
2080                  * We may need to go on min sleep, if we set
2081                  * the flag we will not be awoken if someone
2082                  * is inserted ahead of us. Clearing the flag
2083                  * means we can be awoken. This is "old mode"
2084                  * where the timer is what runs hpts mainly.
2085                  */
2086                 if (tv.tv_usec < tcp_min_hptsi_time) {
2087                         /*
2088                          * Yes on min sleep, which means
2089                          * we cannot be awoken.
2090                          */
2091                         hpts->overidden_sleep = tv.tv_usec;
2092                         tv.tv_usec = tcp_min_hptsi_time;
2093                         hpts->p_on_min_sleep = 1;
2094                 } else {
2095                         /* Clear the min sleep flag */
2096                         hpts->overidden_sleep = 0;
2097                         hpts->p_on_min_sleep = 0;
2098                 }
2099         }
2100         HPTS_MTX_ASSERT(hpts);
2101         hpts->p_hpts_active = 0;
2102 back_to_sleep:
2103         hpts->p_direct_wake = 0;
2104         sb = tvtosbt(tv);
2105         cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
2106         /* Store off to make visible the actual sleep time */
2107         hpts->sleeping = tv.tv_usec;
2108         callout_reset_sbt_on(&hpts->co, sb, 0,
2109                              hpts_timeout_swi, hpts, cpu,
2110                              (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2111         NET_EPOCH_EXIT(et);
2112         mtx_unlock(&hpts->p_mtx);
2113 }
2114
2115 #undef  timersub
2116
2117 static void
2118 tcp_init_hptsi(void *st)
2119 {
2120         int32_t i, j, error, bound = 0, created = 0;
2121         size_t sz, asz;
2122         struct timeval tv;
2123         sbintime_t sb;
2124         struct tcp_hpts_entry *hpts;
2125         struct pcpu *pc;
2126         cpuset_t cs;
2127         char unit[16];
2128         uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
2129         int count, domain, cpu;
2130
2131         tcp_pace.rp_proc = NULL;
2132         tcp_pace.rp_num_hptss = ncpus;
2133         hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
2134         hpts_loops = counter_u64_alloc(M_WAITOK);
2135         back_tosleep = counter_u64_alloc(M_WAITOK);
2136         combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
2137         wheel_wrap = counter_u64_alloc(M_WAITOK);
2138         hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
2139         hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
2140         hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
2141         hpts_direct_call = counter_u64_alloc(M_WAITOK);
2142         cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
2143         cpu_uses_random = counter_u64_alloc(M_WAITOK);
2144
2145
2146         sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
2147         tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
2148         sz = (sizeof(uint32_t) * tcp_pace.rp_num_hptss);
2149         cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
2150         asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
2151         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
2152                 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
2153                     M_TCPHPTS, M_WAITOK | M_ZERO);
2154                 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
2155                     M_TCPHPTS, M_WAITOK);
2156                 hpts = tcp_pace.rp_ent[i];
2157                 /*
2158                  * Init all the hpts structures that are not specifically
2159                  * zero'd by the allocations. Also lets attach them to the
2160                  * appropriate sysctl block as well.
2161                  */
2162                 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
2163                     "hpts", MTX_DEF | MTX_DUPOK);
2164                 TAILQ_INIT(&hpts->p_input);
2165                 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
2166                         TAILQ_INIT(&hpts->p_hptss[j]);
2167                 }
2168                 sysctl_ctx_init(&hpts->hpts_ctx);
2169                 sprintf(unit, "%d", i);
2170                 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
2171                     SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
2172                     OID_AUTO,
2173                     unit,
2174                     CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
2175                     "");
2176                 SYSCTL_ADD_INT(&hpts->hpts_ctx,
2177                     SYSCTL_CHILDREN(hpts->hpts_root),
2178                     OID_AUTO, "in_qcnt", CTLFLAG_RD,
2179                     &hpts->p_on_inqueue_cnt, 0,
2180                     "Count TCB's awaiting input processing");
2181                 SYSCTL_ADD_INT(&hpts->hpts_ctx,
2182                     SYSCTL_CHILDREN(hpts->hpts_root),
2183                     OID_AUTO, "out_qcnt", CTLFLAG_RD,
2184                     &hpts->p_on_queue_cnt, 0,
2185                     "Count TCB's awaiting output processing");
2186                 SYSCTL_ADD_U16(&hpts->hpts_ctx,
2187                     SYSCTL_CHILDREN(hpts->hpts_root),
2188                     OID_AUTO, "active", CTLFLAG_RD,
2189                     &hpts->p_hpts_active, 0,
2190                     "Is the hpts active");
2191                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2192                     SYSCTL_CHILDREN(hpts->hpts_root),
2193                     OID_AUTO, "curslot", CTLFLAG_RD,
2194                     &hpts->p_cur_slot, 0,
2195                     "What the current running pacers goal");
2196                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2197                     SYSCTL_CHILDREN(hpts->hpts_root),
2198                     OID_AUTO, "runtick", CTLFLAG_RD,
2199                     &hpts->p_runningslot, 0,
2200                     "What the running pacers current slot is");
2201                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2202                     SYSCTL_CHILDREN(hpts->hpts_root),
2203                     OID_AUTO, "curtick", CTLFLAG_RD,
2204                     &hpts->p_curtick, 0,
2205                     "What the running pacers last tick mapped to the wheel was");
2206                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
2207                     SYSCTL_CHILDREN(hpts->hpts_root),
2208                     OID_AUTO, "lastran", CTLFLAG_RD,
2209                     &cts_last_ran[i], 0,
2210                     "The last usec tick that this hpts ran");
2211                 SYSCTL_ADD_LONG(&hpts->hpts_ctx,
2212                     SYSCTL_CHILDREN(hpts->hpts_root),
2213                     OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
2214                     &hpts->p_mysleep.tv_usec,
2215                     "What the running pacers is using for p_mysleep.tv_usec");
2216                 SYSCTL_ADD_U64(&hpts->hpts_ctx,
2217                     SYSCTL_CHILDREN(hpts->hpts_root),
2218                     OID_AUTO, "now_sleeping", CTLFLAG_RD,
2219                     &hpts->sleeping, 0,
2220                     "What the running pacers is actually sleeping for");
2221                 SYSCTL_ADD_U64(&hpts->hpts_ctx,
2222                     SYSCTL_CHILDREN(hpts->hpts_root),
2223                     OID_AUTO, "syscall_cnt", CTLFLAG_RD,
2224                     &hpts->syscall_cnt, 0,
2225                     "How many times we had syscalls on this hpts");
2226
2227                 hpts->p_hpts_sleep_time = hpts_sleep_max;
2228                 hpts->p_num = i;
2229                 hpts->p_curtick = tcp_gethptstick(&tv);
2230                 cts_last_ran[i] = tcp_tv_to_usectick(&tv);
2231                 hpts->p_prev_slot = hpts->p_cur_slot = tick_to_wheel(hpts->p_curtick);
2232                 hpts->p_cpu = 0xffff;
2233                 hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
2234                 callout_init(&hpts->co, 1);
2235         }
2236
2237         /* Don't try to bind to NUMA domains if we don't have any */
2238         if (vm_ndomains == 1 && tcp_bind_threads == 2)
2239                 tcp_bind_threads = 0;
2240
2241         /*
2242          * Now lets start ithreads to handle the hptss.
2243          */
2244         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
2245                 hpts = tcp_pace.rp_ent[i];
2246                 hpts->p_cpu = i;
2247                 error = swi_add(&hpts->ie, "hpts",
2248                     tcp_hpts_thread, (void *)hpts,
2249                     SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
2250                 KASSERT(error == 0,
2251                         ("Can't add hpts:%p i:%d err:%d",
2252                          hpts, i, error));
2253                 created++;
2254                 hpts->p_mysleep.tv_sec = 0;
2255                 hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
2256                 if (tcp_bind_threads == 1) {
2257                         if (intr_event_bind(hpts->ie, i) == 0)
2258                                 bound++;
2259                 } else if (tcp_bind_threads == 2) {
2260                         pc = pcpu_find(i);
2261                         domain = pc->pc_domain;
2262                         CPU_COPY(&cpuset_domain[domain], &cs);
2263                         if (intr_event_bind_ithread_cpuset(hpts->ie, &cs)
2264                             == 0) {
2265                                 bound++;
2266                                 count = hpts_domains[domain].count;
2267                                 hpts_domains[domain].cpu[count] = i;
2268                                 hpts_domains[domain].count++;
2269                         }
2270                 }
2271                 tv.tv_sec = 0;
2272                 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT;
2273                 hpts->sleeping = tv.tv_usec;
2274                 sb = tvtosbt(tv);
2275                 cpu = (tcp_bind_threads || hpts_use_assigned_cpu) ?  hpts->p_cpu : curcpu;
2276                 callout_reset_sbt_on(&hpts->co, sb, 0,
2277                                      hpts_timeout_swi, hpts, cpu,
2278                                      (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
2279         }
2280         /*
2281          * If we somehow have an empty domain, fall back to choosing
2282          * among all htps threads.
2283          */
2284         for (i = 0; i < vm_ndomains; i++) {
2285                 if (hpts_domains[i].count == 0) {
2286                         tcp_bind_threads = 0;
2287                         break;
2288                 }
2289         }
2290         printf("TCP Hpts created %d swi interrupt threads and bound %d to %s\n",
2291             created, bound,
2292             tcp_bind_threads == 2 ? "NUMA domains" : "cpus");
2293 #ifdef INVARIANTS
2294         printf("HPTS is in INVARIANT mode!!\n");
2295 #endif
2296 }
2297
2298 SYSINIT(tcphptsi, SI_SUB_SOFTINTR, SI_ORDER_ANY, tcp_init_hptsi, NULL);
2299 MODULE_VERSION(tcphpts, 1);