2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote
16 * products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
34 * table to a dedicated structure indexed by the remote IP address. It keeps
35 * information on the measured TCP parameters of past TCP sessions to allow
36 * better initial start values to be used with later connections to/from the
37 * same source. Depending on the network parameters (delay, max MTU,
38 * congestion window) between local and remote sites, this can lead to
39 * significant speed-ups for new TCP connections after the first one.
41 * Due to the tcp_hostcache, all TCP-specific metrics information in the
42 * routing table have been removed. The inpcb no longer keeps a pointer to
43 * the routing entry, and protocol-initiated route cloning has been removed
44 * as well. With these changes, the routing table has gone back to being
45 * more lightwight and only carries information related to packet forwarding.
47 * tcp_hostcache is designed for multiple concurrent access in SMP
48 * environments and high contention. All bucket rows have their own lock and
49 * thus multiple lookups and modifies can be done at the same time as long as
50 * they are in different bucket rows. If a request for insertion of a new
51 * record can't be satisfied, it simply returns an empty structure. Nobody
52 * and nothing outside of tcp_hostcache.c will ever point directly to any
53 * entry in the tcp_hostcache. All communication is done in an
54 * object-oriented way and only functions of tcp_hostcache will manipulate
55 * hostcache entries. Otherwise, we are unable to achieve good behaviour in
56 * concurrent access situations. Since tcp_hostcache is only caching
57 * information, there are no fatal consequences if we either can't satisfy
58 * any particular request or have to drop/overwrite an existing entry because
59 * of bucket limit memory constrains.
63 * Many thanks to jlemon for basic structure of tcp_syncache which is being
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
70 #include "opt_inet6.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
76 #include <sys/kernel.h>
78 #include <sys/mutex.h>
79 #include <sys/malloc.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/sysctl.h>
88 #include <netinet/in.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet/tcp.h>
91 #include <netinet/tcp_var.h>
95 TAILQ_HEAD(hc_qhead, hc_metrics);
98 struct hc_qhead hch_bucket;
105 TAILQ_ENTRY(hc_metrics) rmx_q;
106 struct hc_head *rmx_head; /* head of bucket tail queue */
107 struct in_addr ip4; /* IP address */
108 struct in6_addr ip6; /* IP6 address */
109 uint32_t ip6_zoneid; /* IPv6 scope zone id */
110 /* endpoint specific values for tcp */
111 uint32_t rmx_mtu; /* MTU for this path */
112 uint32_t rmx_ssthresh; /* outbound gateway buffer limit */
113 uint32_t rmx_rtt; /* estimated round trip time */
114 uint32_t rmx_rttvar; /* estimated rtt variance */
115 uint32_t rmx_cwnd; /* congestion window */
116 uint32_t rmx_sendpipe; /* outbound delay-bandwidth product */
117 uint32_t rmx_recvpipe; /* inbound delay-bandwidth product */
118 /* TCP hostcache internal data */
119 int rmx_expire; /* lifetime for object */
120 #ifdef TCP_HC_COUNTERS
121 u_long rmx_hits; /* number of hits */
122 u_long rmx_updates; /* number of updates */
126 struct tcp_hostcache {
127 struct hc_head *hashbase;
140 /* Arbitrary values */
141 #define TCP_HOSTCACHE_HASHSIZE 512
142 #define TCP_HOSTCACHE_BUCKETLIMIT 30
143 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */
144 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */
146 VNET_DEFINE_STATIC(struct tcp_hostcache, tcp_hostcache);
147 #define V_tcp_hostcache VNET(tcp_hostcache)
149 VNET_DEFINE_STATIC(struct callout, tcp_hc_callout);
150 #define V_tcp_hc_callout VNET(tcp_hc_callout)
152 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *, bool);
153 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
154 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
155 static int sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS);
156 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
157 static void tcp_hc_purge_internal(int);
158 static void tcp_hc_purge(void *);
160 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache,
161 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
164 VNET_DEFINE(int, tcp_use_hostcache) = 1;
165 #define V_tcp_use_hostcache VNET(tcp_use_hostcache)
166 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
167 &VNET_NAME(tcp_use_hostcache), 0,
168 "Enable the TCP hostcache");
170 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
171 &VNET_NAME(tcp_hostcache.cache_limit), 0,
172 "Overall entry limit for hostcache");
174 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
175 &VNET_NAME(tcp_hostcache.hashsize), 0,
176 "Size of TCP hostcache hashtable");
178 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
179 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
180 "Per-bucket hash limit for hostcache");
182 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
183 &VNET_NAME(tcp_hostcache.cache_count), 0,
184 "Current number of entries in hostcache");
186 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
187 &VNET_NAME(tcp_hostcache.expire), 0,
188 "Expire time of TCP hostcache entries");
190 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
191 &VNET_NAME(tcp_hostcache.prune), 0,
192 "Time between purge runs");
194 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
195 &VNET_NAME(tcp_hostcache.purgeall), 0,
196 "Expire all entires on next purge run");
198 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
199 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
200 0, 0, sysctl_tcp_hc_list, "A",
201 "List of all hostcache entries");
203 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, histo,
204 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
205 0, 0, sysctl_tcp_hc_histo, "A",
206 "Print a histogram of hostcache hashbucket utilization");
208 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
209 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
210 NULL, 0, sysctl_tcp_hc_purgenow, "I",
211 "Immediately purge all entries");
213 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
215 /* Use jenkins_hash32(), as in other parts of the tcp stack */
216 #define HOSTCACHE_HASH(ip) \
217 (jenkins_hash32((uint32_t *)(ip), 1, V_tcp_hostcache.hashsalt) & \
218 V_tcp_hostcache.hashmask)
220 #define HOSTCACHE_HASH6(ip6) \
221 (jenkins_hash32((uint32_t *)&((ip6)->s6_addr32[0]), 4, \
222 V_tcp_hostcache.hashsalt) & \
223 V_tcp_hostcache.hashmask)
225 #define THC_LOCK(h) mtx_lock(&(h)->hch_mtx)
226 #define THC_UNLOCK(h) mtx_unlock(&(h)->hch_mtx)
235 * Initialize hostcache structures.
237 atomic_store_int(&V_tcp_hostcache.cache_count, 0);
238 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
239 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
240 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
241 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
242 V_tcp_hostcache.hashsalt = arc4random();
244 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
245 &V_tcp_hostcache.hashsize);
246 if (!powerof2(V_tcp_hostcache.hashsize)) {
247 printf("WARNING: hostcache hash size is not a power of 2.\n");
248 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
250 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
252 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
253 &V_tcp_hostcache.bucket_limit);
255 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
256 V_tcp_hostcache.cache_limit = cache_limit;
257 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
258 &V_tcp_hostcache.cache_limit);
259 if (V_tcp_hostcache.cache_limit > cache_limit)
260 V_tcp_hostcache.cache_limit = cache_limit;
263 * Allocate the hash table.
265 V_tcp_hostcache.hashbase = (struct hc_head *)
266 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
267 M_HOSTCACHE, M_WAITOK | M_ZERO);
270 * Initialize the hash buckets.
272 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
273 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
274 V_tcp_hostcache.hashbase[i].hch_length = 0;
275 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
280 * Allocate the hostcache entries.
282 V_tcp_hostcache.zone =
283 uma_zcreate("hostcache", sizeof(struct hc_metrics),
284 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
285 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
288 * Set up periodic cache cleanup.
290 callout_init(&V_tcp_hc_callout, 1);
291 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
292 tcp_hc_purge, curvnet);
301 callout_drain(&V_tcp_hc_callout);
303 /* Purge all hc entries. */
304 tcp_hc_purge_internal(1);
306 /* Free the uma zone and the allocated hash table. */
307 uma_zdestroy(V_tcp_hostcache.zone);
309 for (i = 0; i < V_tcp_hostcache.hashsize; i++)
310 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
311 free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
316 * Internal function: look up an entry in the hostcache or return NULL.
318 * If an entry has been returned, the caller becomes responsible for
319 * unlocking the bucket row after he is done reading/modifying the entry.
321 static struct hc_metrics *
322 tcp_hc_lookup(struct in_conninfo *inc, bool update)
325 struct hc_head *hc_head;
326 struct hc_metrics *hc_entry;
328 KASSERT(inc != NULL, ("%s: NULL in_conninfo", __func__));
331 * Hash the foreign ip address.
333 if (inc->inc_flags & INC_ISIPV6)
334 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
336 hash = HOSTCACHE_HASH(&inc->inc_faddr);
338 hc_head = &V_tcp_hostcache.hashbase[hash];
341 * Acquire lock for this bucket row; we release the lock if we don't
342 * find an entry, otherwise the caller has to unlock after he is
348 * Iterate through entries in bucket row looking for a match.
350 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
351 if (inc->inc_flags & INC_ISIPV6) {
352 /* XXX: check ip6_zoneid */
353 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
354 sizeof(inc->inc6_faddr)) == 0)
357 if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
358 sizeof(inc->inc_faddr)) == 0)
364 * We were unsuccessful and didn't find anything.
370 #ifdef TCP_HC_COUNTERS
372 hc_entry->rmx_updates++;
374 hc_entry->rmx_hits++;
376 hc_entry->rmx_expire = V_tcp_hostcache.expire;
382 * Internal function: insert an entry into the hostcache or return NULL if
383 * unable to allocate a new one.
385 * If an entry has been returned, the caller becomes responsible for
386 * unlocking the bucket row after he is done reading/modifying the entry.
388 static struct hc_metrics *
389 tcp_hc_insert(struct in_conninfo *inc)
392 struct hc_head *hc_head;
393 struct hc_metrics *hc_entry;
395 KASSERT(inc != NULL, ("%s: NULL in_conninfo", __func__));
398 * Hash the foreign ip address.
400 if (inc->inc_flags & INC_ISIPV6)
401 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
403 hash = HOSTCACHE_HASH(&inc->inc_faddr);
405 hc_head = &V_tcp_hostcache.hashbase[hash];
408 * Acquire lock for this bucket row; we release the lock if we don't
409 * find an entry, otherwise the caller has to unlock after he is
415 * If the bucket limit is reached, reuse the least-used element.
417 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
418 atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) {
419 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
421 * At first we were dropping the last element, just to
422 * reacquire it in the next two lines again, which isn't very
423 * efficient. Instead just reuse the least used element.
424 * We may drop something that is still "in-use" but we can be
426 * Just give up if this bucket row is empty and we don't have
427 * anything to replace.
429 if (hc_entry == NULL) {
433 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
434 KASSERT(V_tcp_hostcache.hashbase[hash].hch_length > 0 &&
435 V_tcp_hostcache.hashbase[hash].hch_length <=
436 V_tcp_hostcache.bucket_limit,
437 ("tcp_hostcache: bucket length range violated at %u: %u",
438 hash, V_tcp_hostcache.hashbase[hash].hch_length));
439 V_tcp_hostcache.hashbase[hash].hch_length--;
440 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
441 TCPSTAT_INC(tcps_hc_bucketoverflow);
443 uma_zfree(V_tcp_hostcache.zone, hc_entry);
447 * Allocate a new entry, or balk if not possible.
449 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
450 if (hc_entry == NULL) {
457 * Initialize basic information of hostcache entry.
459 bzero(hc_entry, sizeof(*hc_entry));
460 if (inc->inc_flags & INC_ISIPV6) {
461 hc_entry->ip6 = inc->inc6_faddr;
462 hc_entry->ip6_zoneid = inc->inc6_zoneid;
464 hc_entry->ip4 = inc->inc_faddr;
465 hc_entry->rmx_head = hc_head;
466 hc_entry->rmx_expire = V_tcp_hostcache.expire;
471 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
472 V_tcp_hostcache.hashbase[hash].hch_length++;
473 KASSERT(V_tcp_hostcache.hashbase[hash].hch_length <=
474 V_tcp_hostcache.bucket_limit,
475 ("tcp_hostcache: bucket length too high at %u: %u",
476 hash, V_tcp_hostcache.hashbase[hash].hch_length));
477 atomic_add_int(&V_tcp_hostcache.cache_count, 1);
478 TCPSTAT_INC(tcps_hc_added);
484 * External function: look up an entry in the hostcache and fill out the
485 * supplied TCP metrics structure. Fills in NULL when no entry was found or
486 * a value is not set.
489 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
491 struct hc_metrics *hc_entry;
493 if (!V_tcp_use_hostcache) {
494 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
499 * Find the right bucket.
501 hc_entry = tcp_hc_lookup(inc, false);
504 * If we don't have an existing object.
506 if (hc_entry == NULL) {
507 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
511 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
512 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
513 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
514 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
515 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
516 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
517 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
522 THC_UNLOCK(hc_entry->rmx_head);
526 * External function: look up an entry in the hostcache and return the
527 * discovered path MTU. Returns 0 if no entry is found or value is not
531 tcp_hc_getmtu(struct in_conninfo *inc)
533 struct hc_metrics *hc_entry;
536 if (!V_tcp_use_hostcache)
539 hc_entry = tcp_hc_lookup(inc, false);
540 if (hc_entry == NULL) {
544 mtu = hc_entry->rmx_mtu;
545 THC_UNLOCK(hc_entry->rmx_head);
550 * External function: update the MTU value of an entry in the hostcache.
551 * Creates a new entry if none was found.
554 tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
556 struct hc_metrics_lite hcml = { .rmx_mtu = mtu };
558 return (tcp_hc_update(inc, &hcml));
562 * External function: update the TCP metrics of an entry in the hostcache.
563 * Creates a new entry if none was found.
566 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
568 struct hc_metrics *hc_entry;
570 if (!V_tcp_use_hostcache)
573 hc_entry = tcp_hc_lookup(inc, true);
574 if (hc_entry == NULL) {
575 hc_entry = tcp_hc_insert(inc);
576 if (hc_entry == NULL)
580 if (hcml->rmx_mtu != 0) {
581 hc_entry->rmx_mtu = hcml->rmx_mtu;
583 if (hcml->rmx_rtt != 0) {
584 if (hc_entry->rmx_rtt == 0)
585 hc_entry->rmx_rtt = hcml->rmx_rtt;
587 hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt +
588 (uint64_t)hcml->rmx_rtt) / 2;
589 TCPSTAT_INC(tcps_cachedrtt);
591 if (hcml->rmx_rttvar != 0) {
592 if (hc_entry->rmx_rttvar == 0)
593 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
595 hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar +
596 (uint64_t)hcml->rmx_rttvar) / 2;
597 TCPSTAT_INC(tcps_cachedrttvar);
599 if (hcml->rmx_ssthresh != 0) {
600 if (hc_entry->rmx_ssthresh == 0)
601 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
603 hc_entry->rmx_ssthresh =
604 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
605 TCPSTAT_INC(tcps_cachedssthresh);
607 if (hcml->rmx_cwnd != 0) {
608 if (hc_entry->rmx_cwnd == 0)
609 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
611 hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd +
612 (uint64_t)hcml->rmx_cwnd) / 2;
613 /* TCPSTAT_INC(tcps_cachedcwnd); */
615 if (hcml->rmx_sendpipe != 0) {
616 if (hc_entry->rmx_sendpipe == 0)
617 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
619 hc_entry->rmx_sendpipe =
620 ((uint64_t)hc_entry->rmx_sendpipe +
621 (uint64_t)hcml->rmx_sendpipe) /2;
622 /* TCPSTAT_INC(tcps_cachedsendpipe); */
624 if (hcml->rmx_recvpipe != 0) {
625 if (hc_entry->rmx_recvpipe == 0)
626 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
628 hc_entry->rmx_recvpipe =
629 ((uint64_t)hc_entry->rmx_recvpipe +
630 (uint64_t)hcml->rmx_recvpipe) /2;
631 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
634 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
635 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
636 THC_UNLOCK(hc_entry->rmx_head);
640 * Sysctl function: prints the list and values of all hostcache entries in
644 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
646 const int linesize = 128;
649 struct hc_metrics *hc_entry;
650 char ip4buf[INET_ADDRSTRLEN];
652 char ip6buf[INET6_ADDRSTRLEN];
655 if (jailed_without_vnet(curthread->td_ucred) != 0)
658 /* Optimize Buffer length query by sbin/sysctl */
659 if (req->oldptr == NULL) {
660 len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
662 return (SYSCTL_OUT(req, NULL, len));
665 error = sysctl_wire_old_buffer(req, 0);
670 /* Use a buffer sized for one full bucket */
671 sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
675 "\nIP address MTU SSTRESH RTT RTTVAR "
676 " CWND SENDPIPE RECVPIPE "
677 #ifdef TCP_HC_COUNTERS
683 #define msec(u) (((u) + 500) / 1000)
684 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
685 THC_LOCK(&V_tcp_hostcache.hashbase[i]);
686 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
689 "%-15s %5u %8u %6lums %6lums %8u %8u %8u "
690 #ifdef TCP_HC_COUNTERS
694 hc_entry->ip4.s_addr ?
695 inet_ntoa_r(hc_entry->ip4, ip4buf) :
697 ip6_sprintf(ip6buf, &hc_entry->ip6),
702 hc_entry->rmx_ssthresh,
703 msec((u_long)hc_entry->rmx_rtt *
704 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
705 msec((u_long)hc_entry->rmx_rttvar *
706 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
708 hc_entry->rmx_sendpipe,
709 hc_entry->rmx_recvpipe,
710 #ifdef TCP_HC_COUNTERS
712 hc_entry->rmx_updates,
714 hc_entry->rmx_expire);
716 THC_UNLOCK(&V_tcp_hostcache.hashbase[i]);
720 error = sbuf_finish(&sb);
726 * Sysctl function: prints a histogram of the hostcache hashbucket
730 sysctl_tcp_hc_histo(SYSCTL_HANDLER_ARGS)
732 const int linesize = 50;
738 if (jailed_without_vnet(curthread->td_ucred) != 0)
741 histo = (int *)malloc(sizeof(int) * (V_tcp_hostcache.bucket_limit + 1),
742 M_TEMP, M_NOWAIT|M_ZERO);
746 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
747 hch_length = V_tcp_hostcache.hashbase[i].hch_length;
748 KASSERT(hch_length <= V_tcp_hostcache.bucket_limit,
749 ("tcp_hostcache: bucket limit exceeded at %u: %u",
754 /* Use a buffer for 16 lines */
755 sbuf_new_for_sysctl(&sb, NULL, 16 * linesize, req);
757 sbuf_printf(&sb, "\nLength\tCount\n");
758 for (i = 0; i <= V_tcp_hostcache.bucket_limit; i++) {
759 sbuf_printf(&sb, "%u\t%u\n", i, histo[i]);
761 error = sbuf_finish(&sb);
768 * Caller has to make sure the curvnet is set properly.
771 tcp_hc_purge_internal(int all)
773 struct hc_metrics *hc_entry, *hc_next;
776 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
777 THC_LOCK(&V_tcp_hostcache.hashbase[i]);
778 TAILQ_FOREACH_SAFE(hc_entry,
779 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
780 KASSERT(V_tcp_hostcache.hashbase[i].hch_length > 0 &&
781 V_tcp_hostcache.hashbase[i].hch_length <=
782 V_tcp_hostcache.bucket_limit, ("tcp_hostcache: "
783 "bucket length out of range at %u: %u",
784 i, V_tcp_hostcache.hashbase[i].hch_length));
785 if (all || hc_entry->rmx_expire <= 0) {
787 &V_tcp_hostcache.hashbase[i].hch_bucket,
789 uma_zfree(V_tcp_hostcache.zone, hc_entry);
790 V_tcp_hostcache.hashbase[i].hch_length--;
791 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
793 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
795 THC_UNLOCK(&V_tcp_hostcache.hashbase[i]);
800 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
801 * periodically from the callout.
804 tcp_hc_purge(void *arg)
806 CURVNET_SET((struct vnet *) arg);
809 if (V_tcp_hostcache.purgeall) {
810 if (V_tcp_hostcache.purgeall == 2)
811 V_tcp_hostcache.hashsalt = arc4random();
813 V_tcp_hostcache.purgeall = 0;
816 tcp_hc_purge_internal(all);
818 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
824 * Expire and purge all entries in hostcache immediately.
827 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
832 error = sysctl_handle_int(oidp, &val, 0, req);
833 if (error || !req->newptr)
837 V_tcp_hostcache.hashsalt = arc4random();
838 tcp_hc_purge_internal(1);
840 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
841 tcp_hc_purge, curvnet);