]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_hostcache.c
Add UPDATING entries and bump version.
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_hostcache.c
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote
16  *    products derived from this software without specific prior written
17  *    permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31
32 /*
33  * The tcp_hostcache moves the tcp-specific cached metrics from the routing
34  * table to a dedicated structure indexed by the remote IP address.  It keeps
35  * information on the measured TCP parameters of past TCP sessions to allow
36  * better initial start values to be used with later connections to/from the
37  * same source.  Depending on the network parameters (delay, max MTU,
38  * congestion window) between local and remote sites, this can lead to
39  * significant speed-ups for new TCP connections after the first one.
40  *
41  * Due to the tcp_hostcache, all TCP-specific metrics information in the
42  * routing table have been removed.  The inpcb no longer keeps a pointer to
43  * the routing entry, and protocol-initiated route cloning has been removed
44  * as well.  With these changes, the routing table has gone back to being
45  * more lightwight and only carries information related to packet forwarding.
46  *
47  * tcp_hostcache is designed for multiple concurrent access in SMP
48  * environments and high contention.  All bucket rows have their own lock and
49  * thus multiple lookups and modifies can be done at the same time as long as
50  * they are in different bucket rows.  If a request for insertion of a new
51  * record can't be satisfied, it simply returns an empty structure.  Nobody
52  * and nothing outside of tcp_hostcache.c will ever point directly to any
53  * entry in the tcp_hostcache.  All communication is done in an
54  * object-oriented way and only functions of tcp_hostcache will manipulate
55  * hostcache entries.  Otherwise, we are unable to achieve good behaviour in
56  * concurrent access situations.  Since tcp_hostcache is only caching
57  * information, there are no fatal consequences if we either can't satisfy
58  * any particular request or have to drop/overwrite an existing entry because
59  * of bucket limit memory constrains.
60  */
61
62 /*
63  * Many thanks to jlemon for basic structure of tcp_syncache which is being
64  * followed here.
65  */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include "opt_inet6.h"
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/jail.h>
75 #include <sys/kernel.h>
76 #include <sys/lock.h>
77 #include <sys/mutex.h>
78 #include <sys/malloc.h>
79 #include <sys/proc.h>
80 #include <sys/sbuf.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/sysctl.h>
84
85 #include <net/if.h>
86 #include <net/if_var.h>
87 #include <net/route.h>
88 #include <net/vnet.h>
89
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/in_var.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/ip_var.h>
96 #ifdef INET6
97 #include <netinet/ip6.h>
98 #include <netinet6/ip6_var.h>
99 #endif
100 #include <netinet/tcp.h>
101 #include <netinet/tcp_var.h>
102 #include <netinet/tcp_hostcache.h>
103 #ifdef INET6
104 #include <netinet6/tcp6_var.h>
105 #endif
106
107 #include <vm/uma.h>
108
109 /* Arbitrary values */
110 #define TCP_HOSTCACHE_HASHSIZE          512
111 #define TCP_HOSTCACHE_BUCKETLIMIT       30
112 #define TCP_HOSTCACHE_EXPIRE            60*60   /* one hour */
113 #define TCP_HOSTCACHE_PRUNE             5*60    /* every 5 minutes */
114
115 VNET_DEFINE_STATIC(struct tcp_hostcache, tcp_hostcache);
116 #define V_tcp_hostcache         VNET(tcp_hostcache)
117
118 VNET_DEFINE_STATIC(struct callout, tcp_hc_callout);
119 #define V_tcp_hc_callout        VNET(tcp_hc_callout)
120
121 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
122 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
123 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
124 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
125 static void tcp_hc_purge_internal(int);
126 static void tcp_hc_purge(void *);
127
128 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0,
129     "TCP Host cache");
130
131 VNET_DEFINE(int, tcp_use_hostcache) = 1;
132 #define V_tcp_use_hostcache  VNET(tcp_use_hostcache)
133 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
134     &VNET_NAME(tcp_use_hostcache), 0,
135     "Enable the TCP hostcache");
136
137 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
138     &VNET_NAME(tcp_hostcache.cache_limit), 0,
139     "Overall entry limit for hostcache");
140
141 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
142     &VNET_NAME(tcp_hostcache.hashsize), 0,
143     "Size of TCP hostcache hashtable");
144
145 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
146     CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
147     "Per-bucket hash limit for hostcache");
148
149 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
150      &VNET_NAME(tcp_hostcache.cache_count), 0,
151     "Current number of entries in hostcache");
152
153 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
154     &VNET_NAME(tcp_hostcache.expire), 0,
155     "Expire time of TCP hostcache entries");
156
157 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
158     &VNET_NAME(tcp_hostcache.prune), 0,
159     "Time between purge runs");
160
161 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
162     &VNET_NAME(tcp_hostcache.purgeall), 0,
163     "Expire all entires on next purge run");
164
165 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
166     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
167     sysctl_tcp_hc_list, "A", "List of all hostcache entries");
168
169 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
170     CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
171     sysctl_tcp_hc_purgenow, "I", "Immediately purge all entries");
172
173 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
174
175 #define HOSTCACHE_HASH(ip) \
176         (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) &  \
177           V_tcp_hostcache.hashmask)
178
179 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
180 #define HOSTCACHE_HASH6(ip6)                            \
181         (((ip6)->s6_addr32[0] ^                         \
182           (ip6)->s6_addr32[1] ^                         \
183           (ip6)->s6_addr32[2] ^                         \
184           (ip6)->s6_addr32[3]) &                        \
185          V_tcp_hostcache.hashmask)
186
187 #define THC_LOCK(lp)            mtx_lock(lp)
188 #define THC_UNLOCK(lp)          mtx_unlock(lp)
189
190 void
191 tcp_hc_init(void)
192 {
193         u_int cache_limit;
194         int i;
195
196         /*
197          * Initialize hostcache structures.
198          */
199         atomic_store_int(&V_tcp_hostcache.cache_count, 0);
200         V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
201         V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
202         V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
203         V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
204
205         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
206             &V_tcp_hostcache.hashsize);
207         if (!powerof2(V_tcp_hostcache.hashsize)) {
208                 printf("WARNING: hostcache hash size is not a power of 2.\n");
209                 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
210         }
211         V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
212
213         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
214             &V_tcp_hostcache.bucket_limit);
215
216         cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
217         V_tcp_hostcache.cache_limit = cache_limit;
218         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
219             &V_tcp_hostcache.cache_limit);
220         if (V_tcp_hostcache.cache_limit > cache_limit)
221                 V_tcp_hostcache.cache_limit = cache_limit;
222
223         /*
224          * Allocate the hash table.
225          */
226         V_tcp_hostcache.hashbase = (struct hc_head *)
227             malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
228                    M_HOSTCACHE, M_WAITOK | M_ZERO);
229
230         /*
231          * Initialize the hash buckets.
232          */
233         for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
234                 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
235                 V_tcp_hostcache.hashbase[i].hch_length = 0;
236                 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
237                           NULL, MTX_DEF);
238         }
239
240         /*
241          * Allocate the hostcache entries.
242          */
243         V_tcp_hostcache.zone =
244             uma_zcreate("hostcache", sizeof(struct hc_metrics),
245             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
246         uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
247
248         /*
249          * Set up periodic cache cleanup.
250          */
251         callout_init(&V_tcp_hc_callout, 1);
252         callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
253             tcp_hc_purge, curvnet);
254 }
255
256 #ifdef VIMAGE
257 void
258 tcp_hc_destroy(void)
259 {
260         int i;
261
262         callout_drain(&V_tcp_hc_callout);
263
264         /* Purge all hc entries. */
265         tcp_hc_purge_internal(1);
266
267         /* Free the uma zone and the allocated hash table. */
268         uma_zdestroy(V_tcp_hostcache.zone);
269
270         for (i = 0; i < V_tcp_hostcache.hashsize; i++)
271                 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
272         free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
273 }
274 #endif
275
276 /*
277  * Internal function: look up an entry in the hostcache or return NULL.
278  *
279  * If an entry has been returned, the caller becomes responsible for
280  * unlocking the bucket row after he is done reading/modifying the entry.
281  */
282 static struct hc_metrics *
283 tcp_hc_lookup(struct in_conninfo *inc)
284 {
285         int hash;
286         struct hc_head *hc_head;
287         struct hc_metrics *hc_entry;
288
289         if (!V_tcp_use_hostcache)
290                 return NULL;
291
292         KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
293
294         /*
295          * Hash the foreign ip address.
296          */
297         if (inc->inc_flags & INC_ISIPV6)
298                 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
299         else
300                 hash = HOSTCACHE_HASH(&inc->inc_faddr);
301
302         hc_head = &V_tcp_hostcache.hashbase[hash];
303
304         /*
305          * Acquire lock for this bucket row; we release the lock if we don't
306          * find an entry, otherwise the caller has to unlock after he is
307          * done.
308          */
309         THC_LOCK(&hc_head->hch_mtx);
310
311         /*
312          * Iterate through entries in bucket row looking for a match.
313          */
314         TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
315                 if (inc->inc_flags & INC_ISIPV6) {
316                         /* XXX: check ip6_zoneid */
317                         if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
318                             sizeof(inc->inc6_faddr)) == 0)
319                                 return hc_entry;
320                 } else {
321                         if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
322                             sizeof(inc->inc_faddr)) == 0)
323                                 return hc_entry;
324                 }
325         }
326
327         /*
328          * We were unsuccessful and didn't find anything.
329          */
330         THC_UNLOCK(&hc_head->hch_mtx);
331         return NULL;
332 }
333
334 /*
335  * Internal function: insert an entry into the hostcache or return NULL if
336  * unable to allocate a new one.
337  *
338  * If an entry has been returned, the caller becomes responsible for
339  * unlocking the bucket row after he is done reading/modifying the entry.
340  */
341 static struct hc_metrics *
342 tcp_hc_insert(struct in_conninfo *inc)
343 {
344         int hash;
345         struct hc_head *hc_head;
346         struct hc_metrics *hc_entry;
347
348         if (!V_tcp_use_hostcache)
349                 return NULL;
350
351         KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
352
353         /*
354          * Hash the foreign ip address.
355          */
356         if (inc->inc_flags & INC_ISIPV6)
357                 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
358         else
359                 hash = HOSTCACHE_HASH(&inc->inc_faddr);
360
361         hc_head = &V_tcp_hostcache.hashbase[hash];
362
363         /*
364          * Acquire lock for this bucket row; we release the lock if we don't
365          * find an entry, otherwise the caller has to unlock after he is
366          * done.
367          */
368         THC_LOCK(&hc_head->hch_mtx);
369
370         /*
371          * If the bucket limit is reached, reuse the least-used element.
372          */
373         if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
374             atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) {
375                 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
376                 /*
377                  * At first we were dropping the last element, just to
378                  * reacquire it in the next two lines again, which isn't very
379                  * efficient.  Instead just reuse the least used element.
380                  * We may drop something that is still "in-use" but we can be
381                  * "lossy".
382                  * Just give up if this bucket row is empty and we don't have
383                  * anything to replace.
384                  */
385                 if (hc_entry == NULL) {
386                         THC_UNLOCK(&hc_head->hch_mtx);
387                         return NULL;
388                 }
389                 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
390                 V_tcp_hostcache.hashbase[hash].hch_length--;
391                 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
392                 TCPSTAT_INC(tcps_hc_bucketoverflow);
393 #if 0
394                 uma_zfree(V_tcp_hostcache.zone, hc_entry);
395 #endif
396         } else {
397                 /*
398                  * Allocate a new entry, or balk if not possible.
399                  */
400                 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
401                 if (hc_entry == NULL) {
402                         THC_UNLOCK(&hc_head->hch_mtx);
403                         return NULL;
404                 }
405         }
406
407         /*
408          * Initialize basic information of hostcache entry.
409          */
410         bzero(hc_entry, sizeof(*hc_entry));
411         if (inc->inc_flags & INC_ISIPV6) {
412                 hc_entry->ip6 = inc->inc6_faddr;
413                 hc_entry->ip6_zoneid = inc->inc6_zoneid;
414         } else
415                 hc_entry->ip4 = inc->inc_faddr;
416         hc_entry->rmx_head = hc_head;
417         hc_entry->rmx_expire = V_tcp_hostcache.expire;
418
419         /*
420          * Put it upfront.
421          */
422         TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
423         V_tcp_hostcache.hashbase[hash].hch_length++;
424         atomic_add_int(&V_tcp_hostcache.cache_count, 1);
425         TCPSTAT_INC(tcps_hc_added);
426
427         return hc_entry;
428 }
429
430 /*
431  * External function: look up an entry in the hostcache and fill out the
432  * supplied TCP metrics structure.  Fills in NULL when no entry was found or
433  * a value is not set.
434  */
435 void
436 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
437 {
438         struct hc_metrics *hc_entry;
439
440         if (!V_tcp_use_hostcache) {
441                 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
442                 return;
443         }
444
445         /*
446          * Find the right bucket.
447          */
448         hc_entry = tcp_hc_lookup(inc);
449
450         /*
451          * If we don't have an existing object.
452          */
453         if (hc_entry == NULL) {
454                 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
455                 return;
456         }
457         hc_entry->rmx_hits++;
458         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
459
460         hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
461         hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
462         hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
463         hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
464         hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
465         hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
466         hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
467
468         /*
469          * Unlock bucket row.
470          */
471         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
472 }
473
474 /*
475  * External function: look up an entry in the hostcache and return the
476  * discovered path MTU.  Returns 0 if no entry is found or value is not
477  * set.
478  */
479 uint32_t
480 tcp_hc_getmtu(struct in_conninfo *inc)
481 {
482         struct hc_metrics *hc_entry;
483         uint32_t mtu;
484
485         if (!V_tcp_use_hostcache)
486                 return 0;
487
488         hc_entry = tcp_hc_lookup(inc);
489         if (hc_entry == NULL) {
490                 return 0;
491         }
492         hc_entry->rmx_hits++;
493         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
494
495         mtu = hc_entry->rmx_mtu;
496         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
497         return mtu;
498 }
499
500 /*
501  * External function: update the MTU value of an entry in the hostcache.
502  * Creates a new entry if none was found.
503  */
504 void
505 tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
506 {
507         struct hc_metrics *hc_entry;
508
509         if (!V_tcp_use_hostcache)
510                 return;
511
512         /*
513          * Find the right bucket.
514          */
515         hc_entry = tcp_hc_lookup(inc);
516
517         /*
518          * If we don't have an existing object, try to insert a new one.
519          */
520         if (hc_entry == NULL) {
521                 hc_entry = tcp_hc_insert(inc);
522                 if (hc_entry == NULL)
523                         return;
524         }
525         hc_entry->rmx_updates++;
526         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
527
528         hc_entry->rmx_mtu = mtu;
529
530         /*
531          * Put it upfront so we find it faster next time.
532          */
533         TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
534         TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
535
536         /*
537          * Unlock bucket row.
538          */
539         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
540 }
541
542 /*
543  * External function: update the TCP metrics of an entry in the hostcache.
544  * Creates a new entry if none was found.
545  */
546 void
547 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
548 {
549         struct hc_metrics *hc_entry;
550
551         if (!V_tcp_use_hostcache)
552                 return;
553
554         hc_entry = tcp_hc_lookup(inc);
555         if (hc_entry == NULL) {
556                 hc_entry = tcp_hc_insert(inc);
557                 if (hc_entry == NULL)
558                         return;
559         }
560         hc_entry->rmx_updates++;
561         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
562
563         if (hcml->rmx_rtt != 0) {
564                 if (hc_entry->rmx_rtt == 0)
565                         hc_entry->rmx_rtt = hcml->rmx_rtt;
566                 else
567                         hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt +
568                             (uint64_t)hcml->rmx_rtt) / 2;
569                 TCPSTAT_INC(tcps_cachedrtt);
570         }
571         if (hcml->rmx_rttvar != 0) {
572                 if (hc_entry->rmx_rttvar == 0)
573                         hc_entry->rmx_rttvar = hcml->rmx_rttvar;
574                 else
575                         hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar +
576                             (uint64_t)hcml->rmx_rttvar) / 2;
577                 TCPSTAT_INC(tcps_cachedrttvar);
578         }
579         if (hcml->rmx_ssthresh != 0) {
580                 if (hc_entry->rmx_ssthresh == 0)
581                         hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
582                 else
583                         hc_entry->rmx_ssthresh =
584                             (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
585                 TCPSTAT_INC(tcps_cachedssthresh);
586         }
587         if (hcml->rmx_cwnd != 0) {
588                 if (hc_entry->rmx_cwnd == 0)
589                         hc_entry->rmx_cwnd = hcml->rmx_cwnd;
590                 else
591                         hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd +
592                             (uint64_t)hcml->rmx_cwnd) / 2;
593                 /* TCPSTAT_INC(tcps_cachedcwnd); */
594         }
595         if (hcml->rmx_sendpipe != 0) {
596                 if (hc_entry->rmx_sendpipe == 0)
597                         hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
598                 else
599                         hc_entry->rmx_sendpipe =
600                             ((uint64_t)hc_entry->rmx_sendpipe +
601                             (uint64_t)hcml->rmx_sendpipe) /2;
602                 /* TCPSTAT_INC(tcps_cachedsendpipe); */
603         }
604         if (hcml->rmx_recvpipe != 0) {
605                 if (hc_entry->rmx_recvpipe == 0)
606                         hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
607                 else
608                         hc_entry->rmx_recvpipe =
609                             ((uint64_t)hc_entry->rmx_recvpipe +
610                             (uint64_t)hcml->rmx_recvpipe) /2;
611                 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
612         }
613
614         TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
615         TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
616         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
617 }
618
619 /*
620  * Sysctl function: prints the list and values of all hostcache entries in
621  * unsorted order.
622  */
623 static int
624 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
625 {
626         const int linesize = 128;
627         struct sbuf sb;
628         int i, error, len;
629         struct hc_metrics *hc_entry;
630         char ip4buf[INET_ADDRSTRLEN];
631 #ifdef INET6
632         char ip6buf[INET6_ADDRSTRLEN];
633 #endif
634
635         if (jailed_without_vnet(curthread->td_ucred) != 0)
636                 return (EPERM);
637
638         /* Optimize Buffer length query by sbin/sysctl */
639         if (req->oldptr == NULL) {
640                 len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
641                         linesize;
642                 return (SYSCTL_OUT(req, NULL, len));
643         }
644
645         error = sysctl_wire_old_buffer(req, 0);
646         if (error != 0) {
647                 return(error);
648         }
649
650         /* Use a buffer sized for one full bucket */
651         sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
652                 linesize, req);
653
654         sbuf_printf(&sb,
655                 "\nIP address        MTU  SSTRESH      RTT   RTTVAR "
656                 "    CWND SENDPIPE RECVPIPE HITS  UPD  EXP\n");
657         sbuf_drain(&sb);
658
659 #define msec(u) (((u) + 500) / 1000)
660         for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
661                 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
662                 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
663                               rmx_q) {
664                         sbuf_printf(&sb,
665                             "%-15s %5u %8u %6lums %6lums %8u %8u %8u %4lu "
666                             "%4lu %4i\n",
667                             hc_entry->ip4.s_addr ?
668                                 inet_ntoa_r(hc_entry->ip4, ip4buf) :
669 #ifdef INET6
670                                 ip6_sprintf(ip6buf, &hc_entry->ip6),
671 #else
672                                 "IPv6?",
673 #endif
674                             hc_entry->rmx_mtu,
675                             hc_entry->rmx_ssthresh,
676                             msec((u_long)hc_entry->rmx_rtt *
677                                 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
678                             msec((u_long)hc_entry->rmx_rttvar *
679                                 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
680                             hc_entry->rmx_cwnd,
681                             hc_entry->rmx_sendpipe,
682                             hc_entry->rmx_recvpipe,
683                             hc_entry->rmx_hits,
684                             hc_entry->rmx_updates,
685                             hc_entry->rmx_expire);
686                 }
687                 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
688                 sbuf_drain(&sb);
689         }
690 #undef msec
691         error = sbuf_finish(&sb);
692         sbuf_delete(&sb);
693         return(error);
694 }
695
696 /*
697  * Caller has to make sure the curvnet is set properly.
698  */
699 static void
700 tcp_hc_purge_internal(int all)
701 {
702         struct hc_metrics *hc_entry, *hc_next;
703         int i;
704
705         for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
706                 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
707                 TAILQ_FOREACH_SAFE(hc_entry,
708                     &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
709                         if (all || hc_entry->rmx_expire <= 0) {
710                                 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
711                                               hc_entry, rmx_q);
712                                 uma_zfree(V_tcp_hostcache.zone, hc_entry);
713                                 V_tcp_hostcache.hashbase[i].hch_length--;
714                                 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
715                         } else
716                                 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
717                 }
718                 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
719         }
720 }
721
722 /*
723  * Expire and purge (old|all) entries in the tcp_hostcache.  Runs
724  * periodically from the callout.
725  */
726 static void
727 tcp_hc_purge(void *arg)
728 {
729         CURVNET_SET((struct vnet *) arg);
730         int all = 0;
731
732         if (V_tcp_hostcache.purgeall) {
733                 all = 1;
734                 V_tcp_hostcache.purgeall = 0;
735         }
736
737         tcp_hc_purge_internal(all);
738
739         callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
740             tcp_hc_purge, arg);
741         CURVNET_RESTORE();
742 }
743
744 /*
745  * Expire and purge all entries in hostcache immediately.
746  */
747 static int
748 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
749 {
750         int error, val;
751
752         val = 0;
753         error = sysctl_handle_int(oidp, &val, 0, req);
754         if (error || !req->newptr)
755                 return (error);
756
757         tcp_hc_purge_internal(1);
758
759         callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
760             tcp_hc_purge, curvnet);
761
762         return (0);
763 }