]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_hpts.c
libsa: open() should use NULL instead of typecasted 0
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_hpts.c
1 /*-
2  * Copyright (c) 2016-2018 Netflix Inc.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  */
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31 #include "opt_ipsec.h"
32 #include "opt_tcpdebug.h"
33 /**
34  * Some notes about usage.
35  *
36  * The tcp_hpts system is designed to provide a high precision timer
37  * system for tcp. Its main purpose is to provide a mechanism for 
38  * pacing packets out onto the wire. It can be used in two ways
39  * by a given TCP stack (and those two methods can be used simultaneously).
40  *
41  * First, and probably the main thing its used by Rack and BBR for, it can
42  * be used to call tcp_output() of a transport stack at some time in the future.
43  * The normal way this is done is that tcp_output() of the stack schedules
44  * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
45  * slot is the time from now that the stack wants to be called but it
46  * must be converted to tcp_hpts's notion of slot. This is done with
47  * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
48  * call from the tcp_output() routine might look like:
49  *
50  * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550));
51  *
52  * The above would schedule tcp_ouput() to be called in 550 useconds.
53  * Note that if using this mechanism the stack will want to add near
54  * its top a check to prevent unwanted calls (from user land or the
55  * arrival of incoming ack's). So it would add something like:
56  *
57  * if (inp->inp_in_hpts)
58  *    return;
59  *
60  * to prevent output processing until the time alotted has gone by.
61  * Of course this is a bare bones example and the stack will probably
62  * have more consideration then just the above.
63  *
64  * Now the tcp_hpts system will call tcp_output in one of two forms, 
65  * it will first check to see if the stack as defined a 
66  * tfb_tcp_output_wtime() function, if so that is the routine it
67  * will call, if that function is not defined then it will call the
68  * tfb_tcp_output() function. The only difference between these
69  * two calls is that the former passes the time in to the function
70  * so the function does not have to access the time (which tcp_hpts
71  * already has). What these functions do is of course totally up
72  * to the individual tcp stack.
73  *
74  * Now the second function (actually two functions I guess :D)
75  * the tcp_hpts system provides is the  ability to either abort 
76  * a connection (later) or process  input on a connection. 
77  * Why would you want to do this? To keep processor locality.
78  *
79  * So in order to use the input redirection function the
80  * stack changes its tcp_do_segment() routine to instead
81  * of process the data call the function:
82  *
83  * tcp_queue_pkt_to_input()
84  *
85  * You will note that the arguments to this function look
86  * a lot like tcp_do_segments's arguments. This function
87  * will assure that the tcp_hpts system will
88  * call the functions tfb_tcp_hpts_do_segment() from the
89  * correct CPU. Note that multiple calls can get pushed
90  * into the tcp_hpts system this will be indicated by
91  * the next to last argument to tfb_tcp_hpts_do_segment()
92  * (nxt_pkt). If nxt_pkt is a 1 then another packet is
93  * coming. If nxt_pkt is a 0 then this is the last call
94  * that the tcp_hpts system has available for the tcp stack.
95  * 
96  * The other point of the input system is to be able to safely
97  * drop a tcp connection without worrying about the recursive 
98  * locking that may be occuring on the INP_WLOCK. So if
99  * a stack wants to drop a connection it calls:
100  *
101  *     tcp_set_inp_to_drop(tp, ETIMEDOUT)
102  * 
103  * To schedule the tcp_hpts system to call 
104  * 
105  *    tcp_drop(tp, drop_reason)
106  *
107  * at a future point. This is quite handy to prevent locking
108  * issues when dropping connections.
109  *
110  */
111
112 #include <sys/param.h>
113 #include <sys/bus.h>
114 #include <sys/interrupt.h>
115 #include <sys/module.h>
116 #include <sys/kernel.h>
117 #include <sys/hhook.h>
118 #include <sys/malloc.h>
119 #include <sys/mbuf.h>
120 #include <sys/proc.h>           /* for proc0 declaration */
121 #include <sys/socket.h>
122 #include <sys/socketvar.h>
123 #include <sys/sysctl.h>
124 #include <sys/systm.h>
125 #include <sys/refcount.h>
126 #include <sys/sched.h>
127 #include <sys/queue.h>
128 #include <sys/smp.h>
129 #include <sys/counter.h>
130 #include <sys/time.h>
131 #include <sys/kthread.h>
132 #include <sys/kern_prefetch.h>
133
134 #include <vm/uma.h>
135
136 #include <net/route.h>
137 #include <net/vnet.h>
138
139 #define TCPSTATES               /* for logging */
140
141 #include <netinet/in.h>
142 #include <netinet/in_kdtrace.h>
143 #include <netinet/in_pcb.h>
144 #include <netinet/ip.h>
145 #include <netinet/ip_icmp.h>    /* required for icmp_var.h */
146 #include <netinet/icmp_var.h>   /* for ICMP_BANDLIM */
147 #include <netinet/ip_var.h>
148 #include <netinet/ip6.h>
149 #include <netinet6/in6_pcb.h>
150 #include <netinet6/ip6_var.h>
151 #define TCPOUTFLAGS
152 #include <netinet/tcp.h>
153 #include <netinet/tcp_fsm.h>
154 #include <netinet/tcp_seq.h>
155 #include <netinet/tcp_timer.h>
156 #include <netinet/tcp_var.h>
157 #include <netinet/tcpip.h>
158 #include <netinet/cc/cc.h>
159 #include <netinet/tcp_hpts.h>
160
161 #ifdef tcpdebug
162 #include <netinet/tcp_debug.h>
163 #endif                          /* tcpdebug */
164 #ifdef tcp_offload
165 #include <netinet/tcp_offload.h>
166 #endif
167
168 #ifdef ipsec
169 #include <netipsec/ipsec.h>
170 #include <netipsec/ipsec6.h>
171 #endif                          /* ipsec */
172 #include "opt_rss.h"
173
174 MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
175 #ifdef RSS
176 static int tcp_bind_threads = 1;
177 #else
178 static int tcp_bind_threads = 0;
179 #endif
180 TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
181
182 static uint32_t tcp_hpts_logging_size = DEFAULT_HPTS_LOG;
183
184 TUNABLE_INT("net.inet.tcp.hpts_logging_sz", &tcp_hpts_logging_size);
185
186 static struct tcp_hptsi tcp_pace;
187
188 static int
189 tcp_hptsi_lock_inpinfo(struct inpcb *inp,
190     struct tcpcb **tp);
191 static void tcp_wakehpts(struct tcp_hpts_entry *p);
192 static void tcp_wakeinput(struct tcp_hpts_entry *p);
193 static void tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv);
194 static void tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick);
195 static void tcp_hpts_thread(void *ctx);
196 static void tcp_init_hptsi(void *st);
197
198 int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
199 static int32_t tcp_hpts_callout_skip_swi = 0;
200
201 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW, 0, "TCP Hpts controls");
202
203 #define timersub(tvp, uvp, vvp)                                         \
204         do {                                                            \
205                 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;          \
206                 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;       \
207                 if ((vvp)->tv_usec < 0) {                               \
208                         (vvp)->tv_sec--;                                \
209                         (vvp)->tv_usec += 1000000;                      \
210                 }                                                       \
211         } while (0)
212
213 static int32_t logging_on = 0;
214 static int32_t hpts_sleep_max = (NUM_OF_HPTSI_SLOTS - 2);
215 static int32_t tcp_hpts_precision = 120;
216
217 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
218     &tcp_hpts_precision, 120,
219     "Value for PRE() precision of callout");
220
221 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
222     &logging_on, 0,
223     "Turn on logging if compiled in");
224
225 counter_u64_t hpts_loops;
226
227 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, loops, CTLFLAG_RD,
228     &hpts_loops, "Number of times hpts had to loop to catch up");
229
230 counter_u64_t back_tosleep;
231
232 SYSCTL_COUNTER_U64(_net_inet_tcp_hpts, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
233     &back_tosleep, "Number of times hpts found no tcbs");
234
235 static int32_t in_newts_every_tcb = 0;
236
237 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tsperpcb, CTLFLAG_RW,
238     &in_newts_every_tcb, 0,
239     "Do we have a new cts every tcb we process for input");
240 static int32_t in_ts_percision = 0;
241
242 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, in_tspercision, CTLFLAG_RW,
243     &in_ts_percision, 0,
244     "Do we use percise timestamp for clients on input");
245 static int32_t out_newts_every_tcb = 0;
246
247 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tsperpcb, CTLFLAG_RW,
248     &out_newts_every_tcb, 0,
249     "Do we have a new cts every tcb we process for output");
250 static int32_t out_ts_percision = 0;
251
252 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, out_tspercision, CTLFLAG_RW,
253     &out_ts_percision, 0,
254     "Do we use a percise timestamp for every output cts");
255
256 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, maxsleep, CTLFLAG_RW,
257     &hpts_sleep_max, 0,
258     "The maximum time the hpts will sleep <1 - 254>");
259
260 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, minsleep, CTLFLAG_RW,
261     &tcp_min_hptsi_time, 0,
262     "The minimum time the hpts must sleep before processing more slots");
263
264 SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, skip_swi, CTLFLAG_RW,
265     &tcp_hpts_callout_skip_swi, 0,
266     "Do we have the callout call directly to the hpts?");
267
268 static void
269 __tcp_hpts_log_it(struct tcp_hpts_entry *hpts, struct inpcb *inp, int event, uint32_t slot,
270     uint32_t ticknow, int32_t line)
271 {
272         struct hpts_log *pl;
273
274         HPTS_MTX_ASSERT(hpts);
275         if (hpts->p_log == NULL)
276                 return;
277         pl = &hpts->p_log[hpts->p_log_at];
278         hpts->p_log_at++;
279         if (hpts->p_log_at >= hpts->p_logsize) {
280                 hpts->p_log_at = 0;
281                 hpts->p_log_wrapped = 1;
282         }
283         pl->inp = inp;
284         if (inp) {
285                 pl->t_paceslot = inp->inp_hptsslot;
286                 pl->t_hptsreq = inp->inp_hpts_request;
287                 pl->p_onhpts = inp->inp_in_hpts;
288                 pl->p_oninput = inp->inp_in_input;
289         } else {
290                 pl->t_paceslot = 0;
291                 pl->t_hptsreq = 0;
292                 pl->p_onhpts = 0;
293                 pl->p_oninput = 0;
294         }
295         pl->is_notempty = 1;
296         pl->event = event;
297         pl->line = line;
298         pl->cts = tcp_get_usecs(NULL);
299         pl->p_curtick = hpts->p_curtick;
300         pl->p_prevtick = hpts->p_prevtick;
301         pl->p_on_queue_cnt = hpts->p_on_queue_cnt;
302         pl->ticknow = ticknow;
303         pl->slot_req = slot;
304         pl->p_nxt_slot = hpts->p_nxt_slot;
305         pl->p_cur_slot = hpts->p_cur_slot;
306         pl->p_hpts_sleep_time = hpts->p_hpts_sleep_time;
307         pl->p_flags = (hpts->p_cpu & 0x7f);
308         pl->p_flags <<= 7;
309         pl->p_flags |= (hpts->p_num & 0x7f);
310         pl->p_flags <<= 2;
311         if (hpts->p_hpts_active) {
312                 pl->p_flags |= HPTS_HPTS_ACTIVE;
313         }
314 }
315
316 #define tcp_hpts_log_it(a, b, c, d, e) __tcp_hpts_log_it(a, b, c, d, e, __LINE__)
317
318 static void
319 hpts_timeout_swi(void *arg)
320 {
321         struct tcp_hpts_entry *hpts;
322
323         hpts = (struct tcp_hpts_entry *)arg;
324         swi_sched(hpts->ie_cookie, 0);
325 }
326
327 static void
328 hpts_timeout_dir(void *arg)
329 {
330         tcp_hpts_thread(arg);
331 }
332
333 static inline void
334 hpts_sane_pace_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int clear)
335 {
336 #ifdef INVARIANTS
337         if (mtx_owned(&hpts->p_mtx) == 0) {
338                 /* We don't own the mutex? */
339                 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
340         }
341         if (hpts->p_cpu != inp->inp_hpts_cpu) {
342                 /* It is not the right cpu/mutex? */
343                 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
344         }
345         if (inp->inp_in_hpts == 0) {
346                 /* We are not on the hpts? */
347                 panic("%s: hpts:%p inp:%p not on the hpts?", __FUNCTION__, hpts, inp);
348         }
349         if (TAILQ_EMPTY(head) &&
350             (hpts->p_on_queue_cnt != 0)) {
351                 /* We should not be empty with a queue count */
352                 panic("%s hpts:%p hpts bucket empty but cnt:%d",
353                     __FUNCTION__, hpts, hpts->p_on_queue_cnt);
354         }
355 #endif
356         TAILQ_REMOVE(head, inp, inp_hpts);
357         hpts->p_on_queue_cnt--;
358         if (hpts->p_on_queue_cnt < 0) {
359                 /* Count should not go negative .. */
360 #ifdef INVARIANTS
361                 panic("Hpts goes negative inp:%p hpts:%p",
362                     inp, hpts);
363 #endif
364                 hpts->p_on_queue_cnt = 0;
365         }
366         if (clear) {
367                 inp->inp_hpts_request = 0;
368                 inp->inp_in_hpts = 0;
369         }
370 }
371
372 static inline void
373 hpts_sane_pace_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, struct hptsh *head, int line, int noref)
374 {
375 #ifdef INVARIANTS
376         if (mtx_owned(&hpts->p_mtx) == 0) {
377                 /* We don't own the mutex? */
378                 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
379         }
380         if (hpts->p_cpu != inp->inp_hpts_cpu) {
381                 /* It is not the right cpu/mutex? */
382                 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
383         }
384         if ((noref == 0) && (inp->inp_in_hpts == 1)) {
385                 /* We are already on the hpts? */
386                 panic("%s: hpts:%p inp:%p already on the hpts?", __FUNCTION__, hpts, inp);
387         }
388 #endif
389         TAILQ_INSERT_TAIL(head, inp, inp_hpts);
390         inp->inp_in_hpts = 1;
391         hpts->p_on_queue_cnt++;
392         if (noref == 0) {
393                 in_pcbref(inp);
394         }
395 }
396
397 static inline void
398 hpts_sane_input_remove(struct tcp_hpts_entry *hpts, struct inpcb *inp, int clear)
399 {
400 #ifdef INVARIANTS
401         if (mtx_owned(&hpts->p_mtx) == 0) {
402                 /* We don't own the mutex? */
403                 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
404         }
405         if (hpts->p_cpu != inp->inp_input_cpu) {
406                 /* It is not the right cpu/mutex? */
407                 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
408         }
409         if (inp->inp_in_input == 0) {
410                 /* We are not on the input hpts? */
411                 panic("%s: hpts:%p inp:%p not on the input hpts?", __FUNCTION__, hpts, inp);
412         }
413 #endif
414         TAILQ_REMOVE(&hpts->p_input, inp, inp_input);
415         hpts->p_on_inqueue_cnt--;
416         if (hpts->p_on_inqueue_cnt < 0) {
417 #ifdef INVARIANTS
418                 panic("Hpts in goes negative inp:%p hpts:%p",
419                     inp, hpts);
420 #endif
421                 hpts->p_on_inqueue_cnt = 0;
422         }
423 #ifdef INVARIANTS
424         if (TAILQ_EMPTY(&hpts->p_input) &&
425             (hpts->p_on_inqueue_cnt != 0)) {
426                 /* We should not be empty with a queue count */
427                 panic("%s hpts:%p in_hpts input empty but cnt:%d",
428                     __FUNCTION__, hpts, hpts->p_on_inqueue_cnt);
429         }
430 #endif
431         if (clear)
432                 inp->inp_in_input = 0;
433 }
434
435 static inline void
436 hpts_sane_input_insert(struct tcp_hpts_entry *hpts, struct inpcb *inp, int line)
437 {
438 #ifdef INVARIANTS
439         if (mtx_owned(&hpts->p_mtx) == 0) {
440                 /* We don't own the mutex? */
441                 panic("%s: hpts:%p inp:%p no hpts mutex", __FUNCTION__, hpts, inp);
442         }
443         if (hpts->p_cpu != inp->inp_input_cpu) {
444                 /* It is not the right cpu/mutex? */
445                 panic("%s: hpts:%p inp:%p incorrect CPU", __FUNCTION__, hpts, inp);
446         }
447         if (inp->inp_in_input == 1) {
448                 /* We are already on the input hpts? */
449                 panic("%s: hpts:%p inp:%p already on the input hpts?", __FUNCTION__, hpts, inp);
450         }
451 #endif
452         TAILQ_INSERT_TAIL(&hpts->p_input, inp, inp_input);
453         inp->inp_in_input = 1;
454         hpts->p_on_inqueue_cnt++;
455         in_pcbref(inp);
456 }
457
458 static int
459 sysctl_tcp_hpts_log(SYSCTL_HANDLER_ARGS)
460 {
461         struct tcp_hpts_entry *hpts;
462         size_t sz;
463         int32_t logging_was, i;
464         int32_t error = 0;
465
466         /*
467          * HACK: Turn off logging so no locks are required this really needs
468          * a memory barrier :)
469          */
470         logging_was = logging_on;
471         logging_on = 0;
472         if (!req->oldptr) {
473                 /* How much? */
474                 sz = 0;
475                 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
476                         hpts = tcp_pace.rp_ent[i];
477                         if (hpts->p_log == NULL)
478                                 continue;
479                         sz += (sizeof(struct hpts_log) * hpts->p_logsize);
480                 }
481                 error = SYSCTL_OUT(req, 0, sz);
482         } else {
483                 for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
484                         hpts = tcp_pace.rp_ent[i];
485                         if (hpts->p_log == NULL)
486                                 continue;
487                         if (hpts->p_log_wrapped)
488                                 sz = (sizeof(struct hpts_log) * hpts->p_logsize);
489                         else
490                                 sz = (sizeof(struct hpts_log) * hpts->p_log_at);
491                         error = SYSCTL_OUT(req, hpts->p_log, sz);
492                 }
493         }
494         logging_on = logging_was;
495         return error;
496 }
497
498 SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, log, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
499     0, 0, sysctl_tcp_hpts_log, "A", "tcp hptsi log");
500
501
502 /*
503  * Try to get the INP_INFO lock.
504  *
505  * This function always succeeds in getting the lock. It will clear
506  * *tpp and return (1) if something critical changed while the inpcb
507  * was unlocked. Otherwise, it will leave *tpp unchanged and return (0).
508  *
509  * This function relies on the fact that the hpts always holds a
510  * reference on the inpcb while the segment is on the hptsi wheel and
511  * in the input queue.
512  *
513  */
514 static int
515 tcp_hptsi_lock_inpinfo(struct inpcb *inp, struct tcpcb **tpp)
516 {
517         struct tcp_function_block *tfb;
518         struct tcpcb *tp;
519         void *ptr;
520
521         /* Try the easy way. */
522         if (INP_INFO_TRY_RLOCK(&V_tcbinfo))
523                 return (0);
524
525         /*
526          * OK, let's try the hard way. We'll save the function pointer block
527          * to make sure that doesn't change while we aren't holding the
528          * lock.
529          */
530         tp = *tpp;
531         tfb = tp->t_fb;
532         ptr = tp->t_fb_ptr;
533         INP_WUNLOCK(inp);
534         INP_INFO_RLOCK(&V_tcbinfo);
535         INP_WLOCK(inp);
536         /* If the session went away, return an error. */
537         if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
538             (inp->inp_flags2 & INP_FREED)) {
539                 *tpp = NULL;
540                 return (1);
541         }
542         /*
543          * If the function block or stack-specific data block changed,
544          * report an error.
545          */
546         tp = intotcpcb(inp);
547         if ((tp->t_fb != tfb) && (tp->t_fb_ptr != ptr)) {
548                 *tpp = NULL;
549                 return (1);
550         }
551         return (0);
552 }
553
554
555 static void
556 tcp_wakehpts(struct tcp_hpts_entry *hpts)
557 {
558         HPTS_MTX_ASSERT(hpts);
559         swi_sched(hpts->ie_cookie, 0);
560         if (hpts->p_hpts_active == 2) {
561                 /* Rare sleeping on a ENOBUF */
562                 wakeup_one(hpts);
563         }
564 }
565
566 static void
567 tcp_wakeinput(struct tcp_hpts_entry *hpts)
568 {
569         HPTS_MTX_ASSERT(hpts);
570         swi_sched(hpts->ie_cookie, 0);
571         if (hpts->p_hpts_active == 2) {
572                 /* Rare sleeping on a ENOBUF */
573                 wakeup_one(hpts);
574         }
575 }
576
577 struct tcp_hpts_entry *
578 tcp_cur_hpts(struct inpcb *inp)
579 {
580         int32_t hpts_num;
581         struct tcp_hpts_entry *hpts;
582
583         hpts_num = inp->inp_hpts_cpu;
584         hpts = tcp_pace.rp_ent[hpts_num];
585         return (hpts);
586 }
587
588 struct tcp_hpts_entry *
589 tcp_hpts_lock(struct inpcb *inp)
590 {
591         struct tcp_hpts_entry *hpts;
592         int32_t hpts_num;
593
594 again:
595         hpts_num = inp->inp_hpts_cpu;
596         hpts = tcp_pace.rp_ent[hpts_num];
597 #ifdef INVARIANTS
598         if (mtx_owned(&hpts->p_mtx)) {
599                 panic("Hpts:%p owns mtx prior-to lock line:%d",
600                     hpts, __LINE__);
601         }
602 #endif
603         mtx_lock(&hpts->p_mtx);
604         if (hpts_num != inp->inp_hpts_cpu) {
605                 mtx_unlock(&hpts->p_mtx);
606                 goto again;
607         }
608         return (hpts);
609 }
610
611 struct tcp_hpts_entry *
612 tcp_input_lock(struct inpcb *inp)
613 {
614         struct tcp_hpts_entry *hpts;
615         int32_t hpts_num;
616
617 again:
618         hpts_num = inp->inp_input_cpu;
619         hpts = tcp_pace.rp_ent[hpts_num];
620 #ifdef INVARIANTS
621         if (mtx_owned(&hpts->p_mtx)) {
622                 panic("Hpts:%p owns mtx prior-to lock line:%d",
623                     hpts, __LINE__);
624         }
625 #endif
626         mtx_lock(&hpts->p_mtx);
627         if (hpts_num != inp->inp_input_cpu) {
628                 mtx_unlock(&hpts->p_mtx);
629                 goto again;
630         }
631         return (hpts);
632 }
633
634 static void
635 tcp_remove_hpts_ref(struct inpcb *inp, struct tcp_hpts_entry *hpts, int line)
636 {
637         int32_t add_freed;
638
639         if (inp->inp_flags2 & INP_FREED) {
640                 /*
641                  * Need to play a special trick so that in_pcbrele_wlocked
642                  * does not return 1 when it really should have returned 0.
643                  */
644                 add_freed = 1;
645                 inp->inp_flags2 &= ~INP_FREED;
646         } else {
647                 add_freed = 0;
648         }
649 #ifndef INP_REF_DEBUG
650         if (in_pcbrele_wlocked(inp)) {
651                 /*
652                  * This should not happen. We have the inpcb referred to by
653                  * the main socket (why we are called) and the hpts. It
654                  * should always return 0.
655                  */
656                 panic("inpcb:%p release ret 1",
657                     inp);
658         }
659 #else
660         if (__in_pcbrele_wlocked(inp, line)) {
661                 /*
662                  * This should not happen. We have the inpcb referred to by
663                  * the main socket (why we are called) and the hpts. It
664                  * should always return 0.
665                  */
666                 panic("inpcb:%p release ret 1",
667                     inp);
668         }
669 #endif
670         if (add_freed) {
671                 inp->inp_flags2 |= INP_FREED;
672         }
673 }
674
675 static void
676 tcp_hpts_remove_locked_output(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
677 {
678         if (inp->inp_in_hpts) {
679                 hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], 1);
680                 tcp_remove_hpts_ref(inp, hpts, line);
681         }
682 }
683
684 static void
685 tcp_hpts_remove_locked_input(struct tcp_hpts_entry *hpts, struct inpcb *inp, int32_t flags, int32_t line)
686 {
687         HPTS_MTX_ASSERT(hpts);
688         if (inp->inp_in_input) {
689                 hpts_sane_input_remove(hpts, inp, 1);
690                 tcp_remove_hpts_ref(inp, hpts, line);
691         }
692 }
693
694 /*
695  * Called normally with the INP_LOCKED but it
696  * does not matter, the hpts lock is the key
697  * but the lock order allows us to hold the
698  * INP lock and then get the hpts lock.
699  *
700  * Valid values in the flags are
701  * HPTS_REMOVE_OUTPUT - remove from the output of the hpts.
702  * HPTS_REMOVE_INPUT - remove from the input of the hpts.
703  * Note that you can or both values together and get two
704  * actions.
705  */
706 void
707 __tcp_hpts_remove(struct inpcb *inp, int32_t flags, int32_t line)
708 {
709         struct tcp_hpts_entry *hpts;
710
711         INP_WLOCK_ASSERT(inp);
712         if (flags & HPTS_REMOVE_OUTPUT) {
713                 hpts = tcp_hpts_lock(inp);
714                 tcp_hpts_remove_locked_output(hpts, inp, flags, line);
715                 mtx_unlock(&hpts->p_mtx);
716         }
717         if (flags & HPTS_REMOVE_INPUT) {
718                 hpts = tcp_input_lock(inp);
719                 tcp_hpts_remove_locked_input(hpts, inp, flags, line);
720                 mtx_unlock(&hpts->p_mtx);
721         }
722 }
723
724 static inline int
725 hpts_tick(struct tcp_hpts_entry *hpts, int32_t plus)
726 {
727         return ((hpts->p_prevtick + plus) % NUM_OF_HPTSI_SLOTS);
728 }
729
730 static int
731 tcp_queue_to_hpts_immediate_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line, int32_t noref)
732 {
733         int32_t need_wake = 0;
734         uint32_t ticknow = 0;
735
736         HPTS_MTX_ASSERT(hpts);
737         if (inp->inp_in_hpts == 0) {
738                 /* Ok we need to set it on the hpts in the current slot */
739                 if (hpts->p_hpts_active == 0) {
740                         /* A sleeping hpts we want in next slot to run */
741                         if (logging_on) {
742                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, 0,
743                                     hpts_tick(hpts, 1));
744                         }
745                         inp->inp_hptsslot = hpts_tick(hpts, 1);
746                         inp->inp_hpts_request = 0;
747                         if (logging_on) {
748                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEP_BEFORE, 1, ticknow);
749                         }
750                         need_wake = 1;
751                 } else if ((void *)inp == hpts->p_inp) {
752                         /*
753                          * We can't allow you to go into the same slot we
754                          * are in. We must put you out.
755                          */
756                         inp->inp_hptsslot = hpts->p_nxt_slot;
757                 } else
758                         inp->inp_hptsslot = hpts->p_cur_slot;
759                 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
760                 inp->inp_hpts_request = 0;
761                 if (logging_on) {
762                         tcp_hpts_log_it(hpts, inp, HPTSLOG_IMMEDIATE, 0, 0);
763                 }
764                 if (need_wake) {
765                         /*
766                          * Activate the hpts if it is sleeping and its
767                          * timeout is not 1.
768                          */
769                         if (logging_on) {
770                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_HPTS, 0, ticknow);
771                         }
772                         hpts->p_direct_wake = 1;
773                         tcp_wakehpts(hpts);
774                 }
775         }
776         return (need_wake);
777 }
778
779 int
780 __tcp_queue_to_hpts_immediate(struct inpcb *inp, int32_t line)
781 {
782         int32_t ret;
783         struct tcp_hpts_entry *hpts;
784
785         INP_WLOCK_ASSERT(inp);
786         hpts = tcp_hpts_lock(inp);
787         ret = tcp_queue_to_hpts_immediate_locked(inp, hpts, line, 0);
788         mtx_unlock(&hpts->p_mtx);
789         return (ret);
790 }
791
792 static void
793 tcp_hpts_insert_locked(struct tcp_hpts_entry *hpts, struct inpcb *inp, uint32_t slot, uint32_t cts, int32_t line,
794     struct hpts_diag *diag, int32_t noref)
795 {
796         int32_t need_new_to = 0;
797         int32_t need_wakeup = 0;
798         uint32_t largest_slot;
799         uint32_t ticknow = 0;
800         uint32_t slot_calc;
801
802         HPTS_MTX_ASSERT(hpts);
803         if (diag) {
804                 memset(diag, 0, sizeof(struct hpts_diag));
805                 diag->p_hpts_active = hpts->p_hpts_active;
806                 diag->p_nxt_slot = hpts->p_nxt_slot;
807                 diag->p_cur_slot = hpts->p_cur_slot;
808                 diag->slot_req = slot;
809         }
810         if ((inp->inp_in_hpts == 0) || noref) {
811                 inp->inp_hpts_request = slot;
812                 if (slot == 0) {
813                         /* Immediate */
814                         tcp_queue_to_hpts_immediate_locked(inp, hpts, line, noref);
815                         return;
816                 }
817                 if (hpts->p_hpts_active) {
818                         /*
819                          * Its slot - 1 since nxt_slot is the next tick that
820                          * will go off since the hpts is awake
821                          */
822                         if (logging_on) {
823                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_NORMAL, slot, 0);
824                         }
825                         /*
826                          * We want to make sure that we don't place a inp in
827                          * the range of p_cur_slot <-> p_nxt_slot. If we
828                          * take from p_nxt_slot to the end, plus p_cur_slot
829                          * and then take away 2, we will know how many is
830                          * the max slots we can use.
831                          */
832                         if (hpts->p_nxt_slot > hpts->p_cur_slot) {
833                                 /*
834                                  * Non-wrap case nxt_slot <-> cur_slot we
835                                  * don't want to land in. So the diff gives
836                                  * us what is taken away from the number of
837                                  * slots.
838                                  */
839                                 largest_slot = NUM_OF_HPTSI_SLOTS - (hpts->p_nxt_slot - hpts->p_cur_slot);
840                         } else if (hpts->p_nxt_slot == hpts->p_cur_slot) {
841                                 largest_slot = NUM_OF_HPTSI_SLOTS - 2;
842                         } else {
843                                 /*
844                                  * Wrap case so the diff gives us the number
845                                  * of slots that we can land in.
846                                  */
847                                 largest_slot = hpts->p_cur_slot - hpts->p_nxt_slot;
848                         }
849                         /*
850                          * We take away two so we never have a problem (20
851                          * usec's) out of 1024000 usecs
852                          */
853                         largest_slot -= 2;
854                         if (inp->inp_hpts_request > largest_slot) {
855                                 /*
856                                  * Restrict max jump of slots and remember
857                                  * leftover
858                                  */
859                                 slot = largest_slot;
860                                 inp->inp_hpts_request -= largest_slot;
861                         } else {
862                                 /* This one will run when we hit it */
863                                 inp->inp_hpts_request = 0;
864                         }
865                         if (hpts->p_nxt_slot == hpts->p_cur_slot)
866                                 slot_calc = (hpts->p_nxt_slot + slot) % NUM_OF_HPTSI_SLOTS;
867                         else
868                                 slot_calc = (hpts->p_nxt_slot + slot - 1) % NUM_OF_HPTSI_SLOTS;
869                         if (slot_calc == hpts->p_cur_slot) {
870 #ifdef INVARIANTS
871                                 /* TSNH */
872                                 panic("Hpts:%p impossible slot calculation slot_calc:%u slot:%u largest:%u\n",
873                                     hpts, slot_calc, slot, largest_slot);
874 #endif
875                                 if (slot_calc)
876                                         slot_calc--;
877                                 else
878                                         slot_calc = NUM_OF_HPTSI_SLOTS - 1;
879                         }
880                         inp->inp_hptsslot = slot_calc;
881                         if (diag) {
882                                 diag->inp_hptsslot = inp->inp_hptsslot;
883                         }
884                 } else {
885                         /*
886                          * The hpts is sleeping, we need to figure out where
887                          * it will wake up at and if we need to reschedule
888                          * its time-out.
889                          */
890                         uint32_t have_slept, yet_to_sleep;
891                         uint32_t slot_now;
892                         struct timeval tv;
893
894                         ticknow = tcp_gethptstick(&tv);
895                         slot_now = ticknow % NUM_OF_HPTSI_SLOTS;
896                         /*
897                          * The user wants to be inserted at (slot_now +
898                          * slot) % NUM_OF_HPTSI_SLOTS, so lets set that up.
899                          */
900                         largest_slot = NUM_OF_HPTSI_SLOTS - 2;
901                         if (inp->inp_hpts_request > largest_slot) {
902                                 /* Adjust the residual in inp_hpts_request */
903                                 slot = largest_slot;
904                                 inp->inp_hpts_request -= largest_slot;
905                         } else {
906                                 /* No residual it all fits */
907                                 inp->inp_hpts_request = 0;
908                         }
909                         inp->inp_hptsslot = (slot_now + slot) % NUM_OF_HPTSI_SLOTS;
910                         if (diag) {
911                                 diag->slot_now = slot_now;
912                                 diag->inp_hptsslot = inp->inp_hptsslot;
913                                 diag->p_on_min_sleep = hpts->p_on_min_sleep;
914                         }
915                         if (logging_on) {
916                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERT_SLEEPER, slot, ticknow);
917                         }
918                         /* Now do we need to restart the hpts's timer? */
919                         if (TSTMP_GT(ticknow, hpts->p_curtick))
920                                 have_slept = ticknow - hpts->p_curtick;
921                         else
922                                 have_slept = 0;
923                         if (have_slept < hpts->p_hpts_sleep_time) {
924                                 /* This should be what happens */
925                                 yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
926                         } else {
927                                 /* We are over-due */
928                                 yet_to_sleep = 0;
929                                 need_wakeup = 1;
930                         }
931                         if (diag) {
932                                 diag->have_slept = have_slept;
933                                 diag->yet_to_sleep = yet_to_sleep;
934                                 diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
935                         }
936                         if ((hpts->p_on_min_sleep == 0) && (yet_to_sleep > slot)) {
937                                 /*
938                                  * We need to reschedule the hptss time-out.
939                                  */
940                                 hpts->p_hpts_sleep_time = slot;
941                                 need_new_to = slot * HPTS_TICKS_PER_USEC;
942                         }
943                 }
944                 hpts_sane_pace_insert(hpts, inp, &hpts->p_hptss[inp->inp_hptsslot], line, noref);
945                 if (logging_on) {
946                         tcp_hpts_log_it(hpts, inp, HPTSLOG_INSERTED, slot, ticknow);
947                 }
948                 /*
949                  * Now how far is the hpts sleeping to? if active is 1, its
950                  * up and ticking we do nothing, otherwise we may need to
951                  * reschedule its callout if need_new_to is set from above.
952                  */
953                 if (need_wakeup) {
954                         if (logging_on) {
955                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_RESCHEDULE, 1, 0);
956                         }
957                         hpts->p_direct_wake = 1;
958                         tcp_wakehpts(hpts);
959                         if (diag) {
960                                 diag->need_new_to = 0;
961                                 diag->co_ret = 0xffff0000;
962                         }
963                 } else if (need_new_to) {
964                         int32_t co_ret;
965                         struct timeval tv;
966                         sbintime_t sb;
967
968                         tv.tv_sec = 0;
969                         tv.tv_usec = 0;
970                         while (need_new_to > HPTS_USEC_IN_SEC) {
971                                 tv.tv_sec++;
972                                 need_new_to -= HPTS_USEC_IN_SEC;
973                         }
974                         tv.tv_usec = need_new_to;
975                         sb = tvtosbt(tv);
976                         if (tcp_hpts_callout_skip_swi == 0) {
977                                 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
978                                     hpts_timeout_swi, hpts, hpts->p_cpu,
979                                     (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
980                         } else {
981                                 co_ret = callout_reset_sbt_on(&hpts->co, sb, 0,
982                                     hpts_timeout_dir, hpts,
983                                     hpts->p_cpu,
984                                     C_PREL(tcp_hpts_precision));
985                         }
986                         if (diag) {
987                                 diag->need_new_to = need_new_to;
988                                 diag->co_ret = co_ret;
989                         }
990                 }
991         } else {
992 #ifdef INVARIANTS
993                 panic("Hpts:%p tp:%p already on hpts and add?", hpts, inp);
994 #endif
995         }
996 }
997
998 uint32_t
999 tcp_hpts_insert_diag(struct inpcb *inp, uint32_t slot, int32_t line, struct hpts_diag *diag){
1000         struct tcp_hpts_entry *hpts;
1001         uint32_t slot_on, cts;
1002         struct timeval tv;
1003
1004         /*
1005          * We now return the next-slot the hpts will be on, beyond its
1006          * current run (if up) or where it was when it stopped if it is
1007          * sleeping.
1008          */
1009         INP_WLOCK_ASSERT(inp);
1010         hpts = tcp_hpts_lock(inp);
1011         if (in_ts_percision)
1012                 microuptime(&tv);
1013         else
1014                 getmicrouptime(&tv);
1015         cts = tcp_tv_to_usectick(&tv);
1016         tcp_hpts_insert_locked(hpts, inp, slot, cts, line, diag, 0);
1017         slot_on = hpts->p_nxt_slot;
1018         mtx_unlock(&hpts->p_mtx);
1019         return (slot_on);
1020 }
1021
1022 uint32_t
1023 __tcp_hpts_insert(struct inpcb *inp, uint32_t slot, int32_t line){
1024         return (tcp_hpts_insert_diag(inp, slot, line, NULL));
1025 }
1026
1027 int
1028 __tcp_queue_to_input_locked(struct inpcb *inp, struct tcp_hpts_entry *hpts, int32_t line)
1029 {
1030         int32_t retval = 0;
1031
1032         HPTS_MTX_ASSERT(hpts);
1033         if (inp->inp_in_input == 0) {
1034                 /* Ok we need to set it on the hpts in the current slot */
1035                 hpts_sane_input_insert(hpts, inp, line);
1036                 retval = 1;
1037                 if (hpts->p_hpts_active == 0) {
1038                         /*
1039                          * Activate the hpts if it is sleeping.
1040                          */
1041                         if (logging_on) {
1042                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_WAKEUP_INPUT, 0, 0);
1043                         }
1044                         retval = 2;
1045                         hpts->p_direct_wake = 1;
1046                         tcp_wakeinput(hpts);
1047                 }
1048         } else if (hpts->p_hpts_active == 0) {
1049                 retval = 4;
1050                 hpts->p_direct_wake = 1;
1051                 tcp_wakeinput(hpts);
1052         }
1053         return (retval);
1054 }
1055
1056 void
1057 tcp_queue_pkt_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1058     int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked)
1059 {
1060         /* Setup packet for input first */
1061         INP_WLOCK_ASSERT(tp->t_inpcb);
1062         m->m_pkthdr.pace_thoff = (uint16_t) ((caddr_t)th - mtod(m, caddr_t));
1063         m->m_pkthdr.pace_tlen = (uint16_t) tlen;
1064         m->m_pkthdr.pace_drphdrlen = drop_hdrlen;
1065         m->m_pkthdr.pace_tos = iptos;
1066         m->m_pkthdr.pace_lock = (uint8_t) ti_locked;
1067         if (tp->t_in_pkt == NULL) {
1068                 tp->t_in_pkt = m;
1069                 tp->t_tail_pkt = m;
1070         } else {
1071                 tp->t_tail_pkt->m_nextpkt = m;
1072                 tp->t_tail_pkt = m;
1073         }
1074 }
1075
1076
1077 int32_t
1078 __tcp_queue_to_input(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th,
1079     int32_t tlen, int32_t drop_hdrlen, uint8_t iptos, uint8_t ti_locked, int32_t line){
1080         struct tcp_hpts_entry *hpts;
1081         int32_t ret;
1082
1083         tcp_queue_pkt_to_input(tp, m, th, tlen, drop_hdrlen, iptos, ti_locked);
1084         hpts = tcp_input_lock(tp->t_inpcb);
1085         ret = __tcp_queue_to_input_locked(tp->t_inpcb, hpts, line);
1086         mtx_unlock(&hpts->p_mtx);
1087         return (ret);
1088 }
1089
1090 void
1091 __tcp_set_inp_to_drop(struct inpcb *inp, uint16_t reason, int32_t line)
1092 {
1093         struct tcp_hpts_entry *hpts;
1094         struct tcpcb *tp;
1095
1096         tp = intotcpcb(inp);
1097         hpts = tcp_input_lock(tp->t_inpcb);
1098         if (inp->inp_in_input == 0) {
1099                 /* Ok we need to set it on the hpts in the current slot */
1100                 hpts_sane_input_insert(hpts, inp, line);
1101                 if (hpts->p_hpts_active == 0) {
1102                         /*
1103                          * Activate the hpts if it is sleeping.
1104                          */
1105                         hpts->p_direct_wake = 1;
1106                         tcp_wakeinput(hpts);
1107                 }
1108         } else if (hpts->p_hpts_active == 0) {
1109                 hpts->p_direct_wake = 1;
1110                 tcp_wakeinput(hpts);
1111         }
1112         inp->inp_hpts_drop_reas = reason;
1113         mtx_unlock(&hpts->p_mtx);
1114 }
1115
1116 static uint16_t
1117 hpts_random_cpu(struct inpcb *inp){
1118         /*
1119          * No flow type set distribute the load randomly.
1120          */
1121         uint16_t cpuid;
1122         uint32_t ran;
1123
1124         /*
1125          * If one has been set use it i.e. we want both in and out on the
1126          * same hpts.
1127          */
1128         if (inp->inp_input_cpu_set) {
1129                 return (inp->inp_input_cpu);
1130         } else if (inp->inp_hpts_cpu_set) {
1131                 return (inp->inp_hpts_cpu);
1132         }
1133         /* Nothing set use a random number */
1134         ran = arc4random();
1135         cpuid = (ran & 0xffff) % mp_ncpus;
1136         return (cpuid);
1137 }
1138
1139 static uint16_t
1140 hpts_cpuid(struct inpcb *inp){
1141         uint16_t cpuid;
1142
1143
1144         /*
1145          * If one has been set use it i.e. we want both in and out on the
1146          * same hpts.
1147          */
1148         if (inp->inp_input_cpu_set) {
1149                 return (inp->inp_input_cpu);
1150         } else if (inp->inp_hpts_cpu_set) {
1151                 return (inp->inp_hpts_cpu);
1152         }
1153         /* If one is set the other must be the same */
1154 #ifdef  RSS
1155         cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
1156         if (cpuid == NETISR_CPUID_NONE)
1157                 return (hpts_random_cpu(inp));
1158         else
1159                 return (cpuid);
1160 #else
1161         /*
1162          * We don't have a flowid -> cpuid mapping, so cheat and just map
1163          * unknown cpuids to curcpu.  Not the best, but apparently better
1164          * than defaulting to swi 0.
1165          */
1166         if (inp->inp_flowtype != M_HASHTYPE_NONE) {
1167                 cpuid = inp->inp_flowid % mp_ncpus;
1168                 return (cpuid);
1169         }
1170         cpuid = hpts_random_cpu(inp);
1171         return (cpuid);
1172 #endif
1173 }
1174
1175 /*
1176  * Do NOT try to optimize the processing of inp's
1177  * by first pulling off all the inp's into a temporary
1178  * list (e.g. TAILQ_CONCAT). If you do that the subtle
1179  * interactions of switching CPU's will kill because of
1180  * problems in the linked list manipulation. Basically
1181  * you would switch cpu's with the hpts mutex locked
1182  * but then while you were processing one of the inp's
1183  * some other one that you switch will get a new
1184  * packet on the different CPU. It will insert it
1185  * on the new hptss input list. Creating a temporary
1186  * link in the inp will not fix it either, since
1187  * the other hpts will be doing the same thing and
1188  * you will both end up using the temporary link.
1189  *
1190  * You will die in an ASSERT for tailq corruption if you
1191  * run INVARIANTS or you will die horribly without
1192  * INVARIANTS in some unknown way with a corrupt linked
1193  * list.
1194  */
1195 static void
1196 tcp_input_data(struct tcp_hpts_entry *hpts, struct timeval *tv)
1197 {
1198         struct mbuf *m, *n;
1199         struct tcpcb *tp;
1200         struct inpcb *inp;
1201         uint16_t drop_reason;
1202         int16_t set_cpu;
1203         uint32_t did_prefetch = 0;
1204         int32_t ti_locked = TI_UNLOCKED;
1205
1206         HPTS_MTX_ASSERT(hpts);
1207         while ((inp = TAILQ_FIRST(&hpts->p_input)) != NULL) {
1208                 HPTS_MTX_ASSERT(hpts);
1209                 hpts_sane_input_remove(hpts, inp, 0);
1210                 if (inp->inp_input_cpu_set == 0) {
1211                         set_cpu = 1;
1212                 } else {
1213                         set_cpu = 0;
1214                 }
1215                 hpts->p_inp = inp;
1216                 drop_reason = inp->inp_hpts_drop_reas;
1217                 inp->inp_in_input = 0;
1218                 tp = intotcpcb(inp);
1219                 mtx_unlock(&hpts->p_mtx);
1220                 CURVNET_SET(tp->t_vnet);
1221                 if (drop_reason) {
1222                         INP_INFO_RLOCK(&V_tcbinfo);
1223                         ti_locked = TI_RLOCKED;
1224                 } else {
1225                         ti_locked = TI_UNLOCKED;
1226                 }
1227                 INP_WLOCK(inp);
1228                 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1229                     (inp->inp_flags2 & INP_FREED)) {
1230 out:
1231                         hpts->p_inp = NULL;
1232                         if (ti_locked == TI_RLOCKED) {
1233                                 INP_INFO_RUNLOCK(&V_tcbinfo);
1234                         }
1235                         if (in_pcbrele_wlocked(inp) == 0) {
1236                                 INP_WUNLOCK(inp);
1237                         }
1238                         ti_locked = TI_UNLOCKED;
1239                         CURVNET_RESTORE();
1240                         mtx_lock(&hpts->p_mtx);
1241                         continue;
1242                 }
1243                 if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1244                         goto out;
1245                 }
1246                 if (drop_reason) {
1247                         /* This tcb is being destroyed for drop_reason */
1248                         m = tp->t_in_pkt;
1249                         if (m)
1250                                 n = m->m_nextpkt;
1251                         else
1252                                 n = NULL;
1253                         tp->t_in_pkt = NULL;
1254                         while (m) {
1255                                 m_freem(m);
1256                                 m = n;
1257                                 if (m)
1258                                         n = m->m_nextpkt;
1259                         }
1260                         tp = tcp_drop(tp, drop_reason);
1261                         INP_INFO_RUNLOCK(&V_tcbinfo);
1262                         if (tp == NULL) {
1263                                 INP_WLOCK(inp);
1264                         }
1265                         if (in_pcbrele_wlocked(inp) == 0)
1266                                 INP_WUNLOCK(inp);
1267                         CURVNET_RESTORE();
1268                         mtx_lock(&hpts->p_mtx);
1269                         continue;
1270                 }
1271                 if (set_cpu) {
1272                         /*
1273                          * Setup so the next time we will move to the right
1274                          * CPU. This should be a rare event. It will
1275                          * sometimes happens when we are the client side
1276                          * (usually not the server). Somehow tcp_output()
1277                          * gets called before the tcp_do_segment() sets the
1278                          * intial state. This means the r_cpu and r_hpts_cpu
1279                          * is 0. We get on the hpts, and then tcp_input()
1280                          * gets called setting up the r_cpu to the correct
1281                          * value. The hpts goes off and sees the mis-match.
1282                          * We simply correct it here and the CPU will switch
1283                          * to the new hpts nextime the tcb gets added to the
1284                          * the hpts (not this time) :-)
1285                          */
1286                         tcp_set_hpts(inp);
1287                 }
1288                 m = tp->t_in_pkt;
1289                 n = NULL;
1290                 if (m != NULL &&
1291                     (m->m_pkthdr.pace_lock == TI_RLOCKED ||
1292                     tp->t_state != TCPS_ESTABLISHED)) {
1293                         ti_locked = TI_RLOCKED;
1294                         if (tcp_hptsi_lock_inpinfo(inp, &tp)) {
1295                                 CURVNET_RESTORE();
1296                                 goto out;
1297                         }
1298                         m = tp->t_in_pkt;
1299                 }
1300                 if (in_newts_every_tcb) {
1301                         if (in_ts_percision)
1302                                 microuptime(tv);
1303                         else
1304                                 getmicrouptime(tv);
1305                 }
1306                 if (tp->t_fb_ptr != NULL) {
1307                         kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1308                         did_prefetch = 1;
1309                 }
1310                 /* Any input work to do, if so do it first */
1311                 if ((m != NULL) && (m == tp->t_in_pkt)) {
1312                         struct tcphdr *th;
1313                         int32_t tlen, drop_hdrlen, nxt_pkt;
1314                         uint8_t iptos;
1315
1316                         n = m->m_nextpkt;
1317                         tp->t_in_pkt = tp->t_tail_pkt = NULL;
1318                         while (m) {
1319                                 th = (struct tcphdr *)(mtod(m, caddr_t)+m->m_pkthdr.pace_thoff);
1320                                 tlen = m->m_pkthdr.pace_tlen;
1321                                 drop_hdrlen = m->m_pkthdr.pace_drphdrlen;
1322                                 iptos = m->m_pkthdr.pace_tos;
1323                                 m->m_nextpkt = NULL;
1324                                 if (n)
1325                                         nxt_pkt = 1;
1326                                 else
1327                                         nxt_pkt = 0;
1328                                 inp->inp_input_calls = 1;
1329                                 if (tp->t_fb->tfb_tcp_hpts_do_segment) {
1330                                         /* Use the hpts specific do_segment */
1331                                         (*tp->t_fb->tfb_tcp_hpts_do_segment) (m, th, inp->inp_socket,
1332                                             tp, drop_hdrlen,
1333                                             tlen, iptos, ti_locked, nxt_pkt, tv);
1334                                 } else {
1335                                         /* Use the default do_segment */
1336                                         (*tp->t_fb->tfb_tcp_do_segment) (m, th, inp->inp_socket,
1337                                             tp, drop_hdrlen,
1338                                             tlen, iptos, ti_locked);
1339                                 }
1340                                 /*
1341                                  * Do segment returns unlocked we need the
1342                                  * lock again but we also need some kasserts
1343                                  * here.
1344                                  */
1345                                 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1346                                 INP_UNLOCK_ASSERT(inp);
1347                                 m = n;
1348                                 if (m)
1349                                         n = m->m_nextpkt;
1350                                 if (m != NULL &&
1351                                     m->m_pkthdr.pace_lock == TI_RLOCKED) {
1352                                         INP_INFO_RLOCK(&V_tcbinfo);
1353                                         ti_locked = TI_RLOCKED;
1354                                 } else
1355                                         ti_locked = TI_UNLOCKED;
1356                                 INP_WLOCK(inp);
1357                                 /*
1358                                  * Since we have an opening here we must
1359                                  * re-check if the tcb went away while we
1360                                  * were getting the lock(s).
1361                                  */
1362                                 if ((inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) ||
1363                                     (inp->inp_flags2 & INP_FREED)) {
1364                         out_free:
1365                                         while (m) {
1366                                                 m_freem(m);
1367                                                 m = n;
1368                                                 if (m)
1369                                                         n = m->m_nextpkt;
1370                                         }
1371                                         goto out;
1372                                 }
1373                                 /*
1374                                  * Now that we hold the INP lock, check if
1375                                  * we need to upgrade our lock.
1376                                  */
1377                                 if (ti_locked == TI_UNLOCKED &&
1378                                     (tp->t_state != TCPS_ESTABLISHED)) {
1379                                         ti_locked = TI_RLOCKED;
1380                                         if (tcp_hptsi_lock_inpinfo(inp, &tp))
1381                                                 goto out_free;
1382                                 }
1383                         }       /** end while(m) */
1384                 }               /** end if ((m != NULL)  && (m == tp->t_in_pkt)) */
1385                 if (in_pcbrele_wlocked(inp) == 0)
1386                         INP_WUNLOCK(inp);
1387                 if (ti_locked == TI_RLOCKED)
1388                         INP_INFO_RUNLOCK(&V_tcbinfo);
1389                 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
1390                 INP_UNLOCK_ASSERT(inp);
1391                 ti_locked = TI_UNLOCKED;
1392                 mtx_lock(&hpts->p_mtx);
1393                 hpts->p_inp = NULL;
1394                 CURVNET_RESTORE();
1395         }
1396 }
1397
1398 static int
1399 tcp_hpts_est_run(struct tcp_hpts_entry *hpts)
1400 {
1401         int32_t ticks_to_run;
1402
1403         if (hpts->p_prevtick && (SEQ_GT(hpts->p_curtick, hpts->p_prevtick))) {
1404                 ticks_to_run = hpts->p_curtick - hpts->p_prevtick;
1405                 if (ticks_to_run >= (NUM_OF_HPTSI_SLOTS - 1)) {
1406                         ticks_to_run = NUM_OF_HPTSI_SLOTS - 2;
1407                 }
1408         } else {
1409                 if (hpts->p_prevtick == hpts->p_curtick) {
1410                         /* This happens when we get woken up right away */
1411                         return (-1);
1412                 }
1413                 ticks_to_run = 1;
1414         }
1415         /* Set in where we will be when we catch up */
1416         hpts->p_nxt_slot = (hpts->p_cur_slot + ticks_to_run) % NUM_OF_HPTSI_SLOTS;
1417         if (hpts->p_nxt_slot == hpts->p_cur_slot) {
1418                 panic("Impossible math -- hpts:%p p_nxt_slot:%d p_cur_slot:%d ticks_to_run:%d",
1419                     hpts, hpts->p_nxt_slot, hpts->p_cur_slot, ticks_to_run);
1420         }
1421         return (ticks_to_run);
1422 }
1423
1424 static void
1425 tcp_hptsi(struct tcp_hpts_entry *hpts, struct timeval *ctick)
1426 {
1427         struct tcpcb *tp;
1428         struct inpcb *inp = NULL, *ninp;
1429         struct timeval tv;
1430         int32_t ticks_to_run, i, error, tick_now, interum_tick;
1431         int32_t paced_cnt = 0;
1432         int32_t did_prefetch = 0;
1433         int32_t prefetch_ninp = 0;
1434         int32_t prefetch_tp = 0;
1435         uint32_t cts;
1436         int16_t set_cpu;
1437
1438         HPTS_MTX_ASSERT(hpts);
1439         hpts->p_curtick = tcp_tv_to_hptstick(ctick);
1440         cts = tcp_tv_to_usectick(ctick);
1441         memcpy(&tv, ctick, sizeof(struct timeval));
1442         hpts->p_cur_slot = hpts_tick(hpts, 1);
1443
1444         /* Figure out if we had missed ticks */
1445 again:
1446         HPTS_MTX_ASSERT(hpts);
1447         ticks_to_run = tcp_hpts_est_run(hpts);
1448         if (!TAILQ_EMPTY(&hpts->p_input)) {
1449                 tcp_input_data(hpts, &tv);
1450         }
1451 #ifdef INVARIANTS
1452         if (TAILQ_EMPTY(&hpts->p_input) &&
1453             (hpts->p_on_inqueue_cnt != 0)) {
1454                 panic("tp:%p in_hpts input empty but cnt:%d",
1455                     hpts, hpts->p_on_inqueue_cnt);
1456         }
1457 #endif
1458         HPTS_MTX_ASSERT(hpts);
1459         /* Reset the ticks to run and time if we need too */
1460         interum_tick = tcp_gethptstick(&tv);
1461         if (interum_tick != hpts->p_curtick) {
1462                 /* Save off the new time we execute to */
1463                 *ctick = tv;
1464                 hpts->p_curtick = interum_tick;
1465                 cts = tcp_tv_to_usectick(&tv);
1466                 hpts->p_cur_slot = hpts_tick(hpts, 1);
1467                 ticks_to_run = tcp_hpts_est_run(hpts);
1468         }
1469         if (ticks_to_run == -1) {
1470                 goto no_run;
1471         }
1472         if (logging_on) {
1473                 tcp_hpts_log_it(hpts, inp, HPTSLOG_SETTORUN, ticks_to_run, 0);
1474         }
1475         if (hpts->p_on_queue_cnt == 0) {
1476                 goto no_one;
1477         }
1478         HPTS_MTX_ASSERT(hpts);
1479         for (i = 0; i < ticks_to_run; i++) {
1480                 /*
1481                  * Calculate our delay, if there are no extra ticks there
1482                  * was not any
1483                  */
1484                 hpts->p_delayed_by = (ticks_to_run - (i + 1)) * HPTS_TICKS_PER_USEC;
1485                 HPTS_MTX_ASSERT(hpts);
1486                 while ((inp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1487                         /* For debugging */
1488                         if (logging_on) {
1489                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_HPTSI, ticks_to_run, i);
1490                         }
1491                         hpts->p_inp = inp;
1492                         paced_cnt++;
1493                         if (hpts->p_cur_slot != inp->inp_hptsslot) {
1494                                 panic("Hpts:%p inp:%p slot mis-aligned %u vs %u",
1495                                     hpts, inp, hpts->p_cur_slot, inp->inp_hptsslot);
1496                         }
1497                         /* Now pull it */
1498                         if (inp->inp_hpts_cpu_set == 0) {
1499                                 set_cpu = 1;
1500                         } else {
1501                                 set_cpu = 0;
1502                         }
1503                         hpts_sane_pace_remove(hpts, inp, &hpts->p_hptss[hpts->p_cur_slot], 0);
1504                         if ((ninp = TAILQ_FIRST(&hpts->p_hptss[hpts->p_cur_slot])) != NULL) {
1505                                 /* We prefetch the next inp if possible */
1506                                 kern_prefetch(ninp, &prefetch_ninp);
1507                                 prefetch_ninp = 1;
1508                         }
1509                         if (inp->inp_hpts_request) {
1510                                 /*
1511                                  * This guy is deferred out further in time
1512                                  * then our wheel had on it. Push him back
1513                                  * on the wheel.
1514                                  */
1515                                 int32_t remaining_slots;
1516
1517                                 remaining_slots = ticks_to_run - (i + 1);
1518                                 if (inp->inp_hpts_request > remaining_slots) {
1519                                         /*
1520                                          * Keep INVARIANTS happy by clearing
1521                                          * the flag
1522                                          */
1523                                         tcp_hpts_insert_locked(hpts, inp, inp->inp_hpts_request, cts, __LINE__, NULL, 1);
1524                                         hpts->p_inp = NULL;
1525                                         continue;
1526                                 }
1527                                 inp->inp_hpts_request = 0;
1528                         }
1529                         /*
1530                          * We clear the hpts flag here after dealing with
1531                          * remaining slots. This way anyone looking with the
1532                          * TCB lock will see its on the hpts until just
1533                          * before we unlock.
1534                          */
1535                         inp->inp_in_hpts = 0;
1536                         mtx_unlock(&hpts->p_mtx);
1537                         INP_WLOCK(inp);
1538                         if (in_pcbrele_wlocked(inp)) {
1539                                 mtx_lock(&hpts->p_mtx);
1540                                 if (logging_on)
1541                                         tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 1);
1542                                 hpts->p_inp = NULL;
1543                                 continue;
1544                         }
1545                         if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1546 out_now:
1547 #ifdef INVARIANTS
1548                                 if (mtx_owned(&hpts->p_mtx)) {
1549                                         panic("Hpts:%p owns mtx prior-to lock line:%d",
1550                                             hpts, __LINE__);
1551                                 }
1552 #endif
1553                                 INP_WUNLOCK(inp);
1554                                 mtx_lock(&hpts->p_mtx);
1555                                 if (logging_on)
1556                                         tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 3);
1557                                 hpts->p_inp = NULL;
1558                                 continue;
1559                         }
1560                         tp = intotcpcb(inp);
1561                         if ((tp == NULL) || (tp->t_inpcb == NULL)) {
1562                                 goto out_now;
1563                         }
1564                         if (set_cpu) {
1565                                 /*
1566                                  * Setup so the next time we will move to
1567                                  * the right CPU. This should be a rare
1568                                  * event. It will sometimes happens when we
1569                                  * are the client side (usually not the
1570                                  * server). Somehow tcp_output() gets called
1571                                  * before the tcp_do_segment() sets the
1572                                  * intial state. This means the r_cpu and
1573                                  * r_hpts_cpu is 0. We get on the hpts, and
1574                                  * then tcp_input() gets called setting up
1575                                  * the r_cpu to the correct value. The hpts
1576                                  * goes off and sees the mis-match. We
1577                                  * simply correct it here and the CPU will
1578                                  * switch to the new hpts nextime the tcb
1579                                  * gets added to the the hpts (not this one)
1580                                  * :-)
1581                                  */
1582                                 tcp_set_hpts(inp);
1583                         }
1584                         if (out_newts_every_tcb) {
1585                                 struct timeval sv;
1586
1587                                 if (out_ts_percision)
1588                                         microuptime(&sv);
1589                                 else
1590                                         getmicrouptime(&sv);
1591                                 cts = tcp_tv_to_usectick(&sv);
1592                         }
1593                         CURVNET_SET(tp->t_vnet);
1594                         /*
1595                          * There is a hole here, we get the refcnt on the
1596                          * inp so it will still be preserved but to make
1597                          * sure we can get the INP we need to hold the p_mtx
1598                          * above while we pull out the tp/inp,  as long as
1599                          * fini gets the lock first we are assured of having
1600                          * a sane INP we can lock and test.
1601                          */
1602 #ifdef INVARIANTS
1603                         if (mtx_owned(&hpts->p_mtx)) {
1604                                 panic("Hpts:%p owns mtx before tcp-output:%d",
1605                                     hpts, __LINE__);
1606                         }
1607 #endif
1608                         if (tp->t_fb_ptr != NULL) {
1609                                 kern_prefetch(tp->t_fb_ptr, &did_prefetch);
1610                                 did_prefetch = 1;
1611                         }
1612                         inp->inp_hpts_calls = 1;
1613                         if (tp->t_fb->tfb_tcp_output_wtime != NULL) {
1614                                 error = (*tp->t_fb->tfb_tcp_output_wtime) (tp, &tv);
1615                         } else {
1616                                 error = tp->t_fb->tfb_tcp_output(tp);
1617                         }
1618                         if (ninp && ninp->inp_ppcb) {
1619                                 /*
1620                                  * If we have a nxt inp, see if we can
1621                                  * prefetch its ppcb. Note this may seem
1622                                  * "risky" since we have no locks (other
1623                                  * than the previous inp) and there no
1624                                  * assurance that ninp was not pulled while
1625                                  * we were processing inp and freed. If this
1626                                  * occured it could mean that either:
1627                                  *
1628                                  * a) Its NULL (which is fine we won't go
1629                                  * here) <or> b) Its valid (which is cool we
1630                                  * will prefetch it) <or> c) The inp got
1631                                  * freed back to the slab which was
1632                                  * reallocated. Then the piece of memory was
1633                                  * re-used and something else (not an
1634                                  * address) is in inp_ppcb. If that occurs
1635                                  * we don't crash, but take a TLB shootdown
1636                                  * performance hit (same as if it was NULL
1637                                  * and we tried to pre-fetch it).
1638                                  *
1639                                  * Considering that the likelyhood of <c> is
1640                                  * quite rare we will take a risk on doing
1641                                  * this. If performance drops after testing
1642                                  * we can always take this out. NB: the
1643                                  * kern_prefetch on amd64 actually has
1644                                  * protection against a bad address now via
1645                                  * the DMAP_() tests. This will prevent the
1646                                  * TLB hit, and instead if <c> occurs just
1647                                  * cause us to load cache with a useless
1648                                  * address (to us).
1649                                  */
1650                                 kern_prefetch(ninp->inp_ppcb, &prefetch_tp);
1651                                 prefetch_tp = 1;
1652                         }
1653                         INP_WUNLOCK(inp);
1654                         INP_UNLOCK_ASSERT(inp);
1655                         CURVNET_RESTORE();
1656 #ifdef INVARIANTS
1657                         if (mtx_owned(&hpts->p_mtx)) {
1658                                 panic("Hpts:%p owns mtx prior-to lock line:%d",
1659                                     hpts, __LINE__);
1660                         }
1661 #endif
1662                         mtx_lock(&hpts->p_mtx);
1663                         if (logging_on)
1664                                 tcp_hpts_log_it(hpts, hpts->p_inp, HPTSLOG_INP_DONE, 0, 4);
1665                         hpts->p_inp = NULL;
1666                 }
1667                 HPTS_MTX_ASSERT(hpts);
1668                 hpts->p_inp = NULL;
1669                 hpts->p_cur_slot++;
1670                 if (hpts->p_cur_slot >= NUM_OF_HPTSI_SLOTS) {
1671                         hpts->p_cur_slot = 0;
1672                 }
1673         }
1674 no_one:
1675         HPTS_MTX_ASSERT(hpts);
1676         hpts->p_prevtick = hpts->p_curtick;
1677         hpts->p_delayed_by = 0;
1678         /*
1679          * Check to see if we took an excess amount of time and need to run
1680          * more ticks (if we did not hit eno-bufs).
1681          */
1682         /* Re-run any input that may be there */
1683         (void)tcp_gethptstick(&tv);
1684         if (!TAILQ_EMPTY(&hpts->p_input)) {
1685                 tcp_input_data(hpts, &tv);
1686         }
1687 #ifdef INVARIANTS
1688         if (TAILQ_EMPTY(&hpts->p_input) &&
1689             (hpts->p_on_inqueue_cnt != 0)) {
1690                 panic("tp:%p in_hpts input empty but cnt:%d",
1691                     hpts, hpts->p_on_inqueue_cnt);
1692         }
1693 #endif
1694         tick_now = tcp_gethptstick(&tv);
1695         if (SEQ_GT(tick_now, hpts->p_prevtick)) {
1696                 struct timeval res;
1697
1698                 /* Did we really spend a full tick or more in here? */
1699                 timersub(&tv, ctick, &res);
1700                 if (res.tv_sec || (res.tv_usec >= HPTS_TICKS_PER_USEC)) {
1701                         counter_u64_add(hpts_loops, 1);
1702                         if (logging_on) {
1703                                 tcp_hpts_log_it(hpts, inp, HPTSLOG_TOLONG, (uint32_t) res.tv_usec, tick_now);
1704                         }
1705                         *ctick = res;
1706                         hpts->p_curtick = tick_now;
1707                         goto again;
1708                 }
1709         }
1710 no_run:
1711         {
1712                 uint32_t t = 0, i, fnd = 0;
1713
1714                 if (hpts->p_on_queue_cnt) {
1715
1716
1717                         /*
1718                          * Find next slot that is occupied and use that to
1719                          * be the sleep time.
1720                          */
1721                         for (i = 1, t = hpts->p_nxt_slot; i < NUM_OF_HPTSI_SLOTS; i++) {
1722                                 if (TAILQ_EMPTY(&hpts->p_hptss[t]) == 0) {
1723                                         fnd = 1;
1724                                         break;
1725                                 }
1726                                 t = (t + 1) % NUM_OF_HPTSI_SLOTS;
1727                         }
1728                         if (fnd) {
1729                                 hpts->p_hpts_sleep_time = i;
1730                         } else {
1731                                 counter_u64_add(back_tosleep, 1);
1732 #ifdef INVARIANTS
1733                                 panic("Hpts:%p cnt:%d but non found", hpts, hpts->p_on_queue_cnt);
1734 #endif
1735                                 hpts->p_on_queue_cnt = 0;
1736                                 goto non_found;
1737                         }
1738                         t++;
1739                 } else {
1740                         /* No one on the wheel sleep for all but 2 slots  */
1741 non_found:
1742                         if (hpts_sleep_max == 0)
1743                                 hpts_sleep_max = 1;
1744                         hpts->p_hpts_sleep_time = min((NUM_OF_HPTSI_SLOTS - 2), hpts_sleep_max);
1745                         t = 0;
1746                 }
1747                 if (logging_on) {
1748                         tcp_hpts_log_it(hpts, inp, HPTSLOG_SLEEPSET, t, (hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC));
1749                 }
1750         }
1751 }
1752
1753 void
1754 __tcp_set_hpts(struct inpcb *inp, int32_t line)
1755 {
1756         struct tcp_hpts_entry *hpts;
1757
1758         INP_WLOCK_ASSERT(inp);
1759         hpts = tcp_hpts_lock(inp);
1760         if ((inp->inp_in_hpts == 0) &&
1761             (inp->inp_hpts_cpu_set == 0)) {
1762                 inp->inp_hpts_cpu = hpts_cpuid(inp);
1763                 inp->inp_hpts_cpu_set = 1;
1764         }
1765         mtx_unlock(&hpts->p_mtx);
1766         hpts = tcp_input_lock(inp);
1767         if ((inp->inp_input_cpu_set == 0) &&
1768             (inp->inp_in_input == 0)) {
1769                 inp->inp_input_cpu = hpts_cpuid(inp);
1770                 inp->inp_input_cpu_set = 1;
1771         }
1772         mtx_unlock(&hpts->p_mtx);
1773 }
1774
1775 uint16_t
1776 tcp_hpts_delayedby(struct inpcb *inp){
1777         return (tcp_pace.rp_ent[inp->inp_hpts_cpu]->p_delayed_by);
1778 }
1779
1780 static void
1781 tcp_hpts_thread(void *ctx)
1782 {
1783         struct tcp_hpts_entry *hpts;
1784         struct timeval tv;
1785         sbintime_t sb;
1786
1787         hpts = (struct tcp_hpts_entry *)ctx;
1788         mtx_lock(&hpts->p_mtx);
1789         if (hpts->p_direct_wake) {
1790                 /* Signaled by input */
1791                 if (logging_on)
1792                         tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 1, 1);
1793                 callout_stop(&hpts->co);
1794         } else {
1795                 /* Timed out */
1796                 if (callout_pending(&hpts->co) ||
1797                     !callout_active(&hpts->co)) {
1798                         if (logging_on)
1799                                 tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 2, 2);
1800                         mtx_unlock(&hpts->p_mtx);
1801                         return;
1802                 }
1803                 callout_deactivate(&hpts->co);
1804                 if (logging_on)
1805                         tcp_hpts_log_it(hpts, NULL, HPTSLOG_AWAKE, 3, 3);
1806         }
1807         hpts->p_hpts_active = 1;
1808         (void)tcp_gethptstick(&tv);
1809         tcp_hptsi(hpts, &tv);
1810         HPTS_MTX_ASSERT(hpts);
1811         tv.tv_sec = 0;
1812         tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1813         if (tcp_min_hptsi_time && (tv.tv_usec < tcp_min_hptsi_time)) {
1814                 tv.tv_usec = tcp_min_hptsi_time;
1815                 hpts->p_on_min_sleep = 1;
1816         } else {
1817                 /* Clear the min sleep flag */
1818                 hpts->p_on_min_sleep = 0;
1819         }
1820         hpts->p_hpts_active = 0;
1821         sb = tvtosbt(tv);
1822         if (tcp_hpts_callout_skip_swi == 0) {
1823                 callout_reset_sbt_on(&hpts->co, sb, 0,
1824                     hpts_timeout_swi, hpts, hpts->p_cpu,
1825                     (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1826         } else {
1827                 callout_reset_sbt_on(&hpts->co, sb, 0,
1828                     hpts_timeout_dir, hpts,
1829                     hpts->p_cpu,
1830                     C_PREL(tcp_hpts_precision));
1831         }
1832         hpts->p_direct_wake = 0;
1833         mtx_unlock(&hpts->p_mtx);
1834 }
1835
1836 #undef  timersub
1837
1838 static void
1839 tcp_init_hptsi(void *st)
1840 {
1841         int32_t i, j, error, bound = 0, created = 0;
1842         size_t sz, asz;
1843         struct timeval tv;
1844         sbintime_t sb;
1845         struct tcp_hpts_entry *hpts;
1846         char unit[16];
1847         uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
1848
1849         tcp_pace.rp_proc = NULL;
1850         tcp_pace.rp_num_hptss = ncpus;
1851         hpts_loops = counter_u64_alloc(M_WAITOK);
1852         back_tosleep = counter_u64_alloc(M_WAITOK);
1853
1854         sz = (tcp_pace.rp_num_hptss * sizeof(struct tcp_hpts_entry *));
1855         tcp_pace.rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1856         asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
1857         for (i = 0; i < tcp_pace.rp_num_hptss; i++) {
1858                 tcp_pace.rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
1859                     M_TCPHPTS, M_WAITOK | M_ZERO);
1860                 tcp_pace.rp_ent[i]->p_hptss = malloc(asz,
1861                     M_TCPHPTS, M_WAITOK);
1862                 hpts = tcp_pace.rp_ent[i];
1863                 /*
1864                  * Init all the hpts structures that are not specifically
1865                  * zero'd by the allocations. Also lets attach them to the
1866                  * appropriate sysctl block as well.
1867                  */
1868                 mtx_init(&hpts->p_mtx, "tcp_hpts_lck",
1869                     "hpts", MTX_DEF | MTX_DUPOK);
1870                 TAILQ_INIT(&hpts->p_input);
1871                 for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
1872                         TAILQ_INIT(&hpts->p_hptss[j]);
1873                 }
1874                 sysctl_ctx_init(&hpts->hpts_ctx);
1875                 sprintf(unit, "%d", i);
1876                 hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
1877                     SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
1878                     OID_AUTO,
1879                     unit,
1880                     CTLFLAG_RW, 0,
1881                     "");
1882                 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1883                     SYSCTL_CHILDREN(hpts->hpts_root),
1884                     OID_AUTO, "in_qcnt", CTLFLAG_RD,
1885                     &hpts->p_on_inqueue_cnt, 0,
1886                     "Count TCB's awaiting input processing");
1887                 SYSCTL_ADD_INT(&hpts->hpts_ctx,
1888                     SYSCTL_CHILDREN(hpts->hpts_root),
1889                     OID_AUTO, "out_qcnt", CTLFLAG_RD,
1890                     &hpts->p_on_queue_cnt, 0,
1891                     "Count TCB's awaiting output processing");
1892                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1893                     SYSCTL_CHILDREN(hpts->hpts_root),
1894                     OID_AUTO, "active", CTLFLAG_RD,
1895                     &hpts->p_hpts_active, 0,
1896                     "Is the hpts active");
1897                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1898                     SYSCTL_CHILDREN(hpts->hpts_root),
1899                     OID_AUTO, "curslot", CTLFLAG_RD,
1900                     &hpts->p_cur_slot, 0,
1901                     "What the current slot is if active");
1902                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1903                     SYSCTL_CHILDREN(hpts->hpts_root),
1904                     OID_AUTO, "curtick", CTLFLAG_RD,
1905                     &hpts->p_curtick, 0,
1906                     "What the current tick on if active");
1907                 SYSCTL_ADD_UINT(&hpts->hpts_ctx,
1908                     SYSCTL_CHILDREN(hpts->hpts_root),
1909                     OID_AUTO, "logsize", CTLFLAG_RD,
1910                     &hpts->p_logsize, 0,
1911                     "Hpts logging buffer size");
1912                 hpts->p_hpts_sleep_time = NUM_OF_HPTSI_SLOTS - 2;
1913                 hpts->p_num = i;
1914                 hpts->p_prevtick = hpts->p_curtick = tcp_gethptstick(&tv);
1915                 hpts->p_prevtick -= 1;
1916                 hpts->p_prevtick %= NUM_OF_HPTSI_SLOTS;
1917                 hpts->p_cpu = 0xffff;
1918                 hpts->p_nxt_slot = 1;
1919                 hpts->p_logsize = tcp_hpts_logging_size;
1920                 if (hpts->p_logsize) {
1921                         sz = (sizeof(struct hpts_log) * hpts->p_logsize);
1922                         hpts->p_log = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
1923                 }
1924                 callout_init(&hpts->co, 1);
1925         }
1926         /*
1927          * Now lets start ithreads to handle the hptss.
1928          */
1929         CPU_FOREACH(i) {
1930                 hpts = tcp_pace.rp_ent[i];
1931                 hpts->p_cpu = i;
1932                 error = swi_add(&hpts->ie, "hpts",
1933                     tcp_hpts_thread, (void *)hpts,
1934                     SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
1935                 if (error) {
1936                         panic("Can't add hpts:%p i:%d err:%d",
1937                             hpts, i, error);
1938                 }
1939                 created++;
1940                 if (tcp_bind_threads) {
1941                         if (intr_event_bind(hpts->ie, i) == 0)
1942                                 bound++;
1943                 }
1944                 tv.tv_sec = 0;
1945                 tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_USEC;
1946                 sb = tvtosbt(tv);
1947                 if (tcp_hpts_callout_skip_swi == 0) {
1948                         callout_reset_sbt_on(&hpts->co, sb, 0,
1949                             hpts_timeout_swi, hpts, hpts->p_cpu,
1950                             (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision)));
1951                 } else {
1952                         callout_reset_sbt_on(&hpts->co, sb, 0,
1953                             hpts_timeout_dir, hpts,
1954                             hpts->p_cpu,
1955                             C_PREL(tcp_hpts_precision));
1956                 }
1957         }
1958         printf("TCP Hpts created %d swi interrupt thread and bound %d\n",
1959             created, bound);
1960         return;
1961 }
1962
1963 SYSINIT(tcphptsi, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, tcp_init_hptsi, NULL);
1964 MODULE_VERSION(tcphpts, 1);