2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote
14 * products derived from this software without specific prior written
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
32 * table to a dedicated structure indexed by the remote IP address. It keeps
33 * information on the measured TCP parameters of past TCP sessions to allow
34 * better initial start values to be used with later connections to/from the
35 * same source. Depending on the network parameters (delay, max MTU,
36 * congestion window) between local and remote sites, this can lead to
37 * significant speed-ups for new TCP connections after the first one.
39 * Due to the tcp_hostcache, all TCP-specific metrics information in the
40 * routing table have been removed. The inpcb no longer keeps a pointer to
41 * the routing entry, and protocol-initiated route cloning has been removed
42 * as well. With these changes, the routing table has gone back to being
43 * more lightwight and only carries information related to packet forwarding.
45 * tcp_hostcache is designed for multiple concurrent access in SMP
46 * environments and high contention. All bucket rows have their own lock and
47 * thus multiple lookups and modifies can be done at the same time as long as
48 * they are in different bucket rows. If a request for insertion of a new
49 * record can't be satisfied, it simply returns an empty structure. Nobody
50 * and nothing outside of tcp_hostcache.c will ever point directly to any
51 * entry in the tcp_hostcache. All communication is done in an
52 * object-oriented way and only functions of tcp_hostcache will manipulate
53 * hostcache entries. Otherwise, we are unable to achieve good behaviour in
54 * concurrent access situations. Since tcp_hostcache is only caching
55 * information, there are no fatal consequences if we either can't satisfy
56 * any particular request or have to drop/overwrite an existing entry because
57 * of bucket limit memory constrains.
61 * Many thanks to jlemon for basic structure of tcp_syncache which is being
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
68 #include "opt_inet6.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
74 #include <sys/mutex.h>
75 #include <sys/malloc.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <sys/sysctl.h>
82 #include <net/if_var.h>
83 #include <net/route.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_var.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/ip_var.h>
93 #include <netinet/ip6.h>
94 #include <netinet6/ip6_var.h>
96 #include <netinet/tcp.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet/tcp_hostcache.h>
100 #include <netinet6/tcp6_var.h>
105 /* Arbitrary values */
106 #define TCP_HOSTCACHE_HASHSIZE 512
107 #define TCP_HOSTCACHE_BUCKETLIMIT 30
108 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */
109 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */
111 static VNET_DEFINE(struct tcp_hostcache, tcp_hostcache);
112 #define V_tcp_hostcache VNET(tcp_hostcache)
114 static VNET_DEFINE(struct callout, tcp_hc_callout);
115 #define V_tcp_hc_callout VNET(tcp_hc_callout)
117 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
118 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
119 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
120 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
121 static void tcp_hc_purge_internal(int);
122 static void tcp_hc_purge(void *);
124 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0,
127 VNET_DEFINE(int, tcp_use_hostcache) = 1;
128 #define V_tcp_use_hostcache VNET(tcp_use_hostcache)
129 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
130 &VNET_NAME(tcp_use_hostcache), 0,
131 "Enable the TCP hostcache");
133 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
134 &VNET_NAME(tcp_hostcache.cache_limit), 0,
135 "Overall entry limit for hostcache");
137 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
138 &VNET_NAME(tcp_hostcache.hashsize), 0,
139 "Size of TCP hostcache hashtable");
141 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
142 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
143 "Per-bucket hash limit for hostcache");
145 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
146 &VNET_NAME(tcp_hostcache.cache_count), 0,
147 "Current number of entries in hostcache");
149 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
150 &VNET_NAME(tcp_hostcache.expire), 0,
151 "Expire time of TCP hostcache entries");
153 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
154 &VNET_NAME(tcp_hostcache.prune), 0,
155 "Time between purge runs");
157 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
158 &VNET_NAME(tcp_hostcache.purgeall), 0,
159 "Expire all entires on next purge run");
161 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
162 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
163 sysctl_tcp_hc_list, "A", "List of all hostcache entries");
165 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
166 CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
167 sysctl_tcp_hc_purgenow, "I", "Immediately purge all entries");
169 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
171 #define HOSTCACHE_HASH(ip) \
172 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
173 V_tcp_hostcache.hashmask)
175 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
176 #define HOSTCACHE_HASH6(ip6) \
177 (((ip6)->s6_addr32[0] ^ \
178 (ip6)->s6_addr32[1] ^ \
179 (ip6)->s6_addr32[2] ^ \
180 (ip6)->s6_addr32[3]) & \
181 V_tcp_hostcache.hashmask)
183 #define THC_LOCK(lp) mtx_lock(lp)
184 #define THC_UNLOCK(lp) mtx_unlock(lp)
193 * Initialize hostcache structures.
195 V_tcp_hostcache.cache_count = 0;
196 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
197 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
198 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
199 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
201 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
202 &V_tcp_hostcache.hashsize);
203 if (!powerof2(V_tcp_hostcache.hashsize)) {
204 printf("WARNING: hostcache hash size is not a power of 2.\n");
205 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
207 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
209 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
210 &V_tcp_hostcache.bucket_limit);
212 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
213 V_tcp_hostcache.cache_limit = cache_limit;
214 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
215 &V_tcp_hostcache.cache_limit);
216 if (V_tcp_hostcache.cache_limit > cache_limit)
217 V_tcp_hostcache.cache_limit = cache_limit;
220 * Allocate the hash table.
222 V_tcp_hostcache.hashbase = (struct hc_head *)
223 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
224 M_HOSTCACHE, M_WAITOK | M_ZERO);
227 * Initialize the hash buckets.
229 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
230 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
231 V_tcp_hostcache.hashbase[i].hch_length = 0;
232 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
237 * Allocate the hostcache entries.
239 V_tcp_hostcache.zone =
240 uma_zcreate("hostcache", sizeof(struct hc_metrics),
241 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
242 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
245 * Set up periodic cache cleanup.
247 callout_init(&V_tcp_hc_callout, 1);
248 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
249 tcp_hc_purge, curvnet);
258 callout_drain(&V_tcp_hc_callout);
260 /* Purge all hc entries. */
261 tcp_hc_purge_internal(1);
263 /* Free the uma zone and the allocated hash table. */
264 uma_zdestroy(V_tcp_hostcache.zone);
266 for (i = 0; i < V_tcp_hostcache.hashsize; i++)
267 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
268 free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
273 * Internal function: look up an entry in the hostcache or return NULL.
275 * If an entry has been returned, the caller becomes responsible for
276 * unlocking the bucket row after he is done reading/modifying the entry.
278 static struct hc_metrics *
279 tcp_hc_lookup(struct in_conninfo *inc)
282 struct hc_head *hc_head;
283 struct hc_metrics *hc_entry;
285 if (!V_tcp_use_hostcache)
288 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
291 * Hash the foreign ip address.
293 if (inc->inc_flags & INC_ISIPV6)
294 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
296 hash = HOSTCACHE_HASH(&inc->inc_faddr);
298 hc_head = &V_tcp_hostcache.hashbase[hash];
301 * Acquire lock for this bucket row; we release the lock if we don't
302 * find an entry, otherwise the caller has to unlock after he is
305 THC_LOCK(&hc_head->hch_mtx);
308 * Iterate through entries in bucket row looking for a match.
310 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
311 if (inc->inc_flags & INC_ISIPV6) {
312 /* XXX: check ip6_zoneid */
313 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
314 sizeof(inc->inc6_faddr)) == 0)
317 if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
318 sizeof(inc->inc_faddr)) == 0)
324 * We were unsuccessful and didn't find anything.
326 THC_UNLOCK(&hc_head->hch_mtx);
331 * Internal function: insert an entry into the hostcache or return NULL if
332 * unable to allocate a new one.
334 * If an entry has been returned, the caller becomes responsible for
335 * unlocking the bucket row after he is done reading/modifying the entry.
337 static struct hc_metrics *
338 tcp_hc_insert(struct in_conninfo *inc)
341 struct hc_head *hc_head;
342 struct hc_metrics *hc_entry;
344 if (!V_tcp_use_hostcache)
347 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
350 * Hash the foreign ip address.
352 if (inc->inc_flags & INC_ISIPV6)
353 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
355 hash = HOSTCACHE_HASH(&inc->inc_faddr);
357 hc_head = &V_tcp_hostcache.hashbase[hash];
360 * Acquire lock for this bucket row; we release the lock if we don't
361 * find an entry, otherwise the caller has to unlock after he is
364 THC_LOCK(&hc_head->hch_mtx);
367 * If the bucket limit is reached, reuse the least-used element.
369 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
370 V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
371 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
373 * At first we were dropping the last element, just to
374 * reacquire it in the next two lines again, which isn't very
375 * efficient. Instead just reuse the least used element.
376 * We may drop something that is still "in-use" but we can be
378 * Just give up if this bucket row is empty and we don't have
379 * anything to replace.
381 if (hc_entry == NULL) {
382 THC_UNLOCK(&hc_head->hch_mtx);
385 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
386 V_tcp_hostcache.hashbase[hash].hch_length--;
387 V_tcp_hostcache.cache_count--;
388 TCPSTAT_INC(tcps_hc_bucketoverflow);
390 uma_zfree(V_tcp_hostcache.zone, hc_entry);
394 * Allocate a new entry, or balk if not possible.
396 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
397 if (hc_entry == NULL) {
398 THC_UNLOCK(&hc_head->hch_mtx);
404 * Initialize basic information of hostcache entry.
406 bzero(hc_entry, sizeof(*hc_entry));
407 if (inc->inc_flags & INC_ISIPV6) {
408 hc_entry->ip6 = inc->inc6_faddr;
409 hc_entry->ip6_zoneid = inc->inc6_zoneid;
411 hc_entry->ip4 = inc->inc_faddr;
412 hc_entry->rmx_head = hc_head;
413 hc_entry->rmx_expire = V_tcp_hostcache.expire;
418 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
419 V_tcp_hostcache.hashbase[hash].hch_length++;
420 V_tcp_hostcache.cache_count++;
421 TCPSTAT_INC(tcps_hc_added);
427 * External function: look up an entry in the hostcache and fill out the
428 * supplied TCP metrics structure. Fills in NULL when no entry was found or
429 * a value is not set.
432 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
434 struct hc_metrics *hc_entry;
436 if (!V_tcp_use_hostcache)
440 * Find the right bucket.
442 hc_entry = tcp_hc_lookup(inc);
445 * If we don't have an existing object.
447 if (hc_entry == NULL) {
448 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
451 hc_entry->rmx_hits++;
452 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
454 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
455 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
456 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
457 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
458 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
459 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
460 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
465 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
469 * External function: look up an entry in the hostcache and return the
470 * discovered path MTU. Returns 0 if no entry is found or value is not
474 tcp_hc_getmtu(struct in_conninfo *inc)
476 struct hc_metrics *hc_entry;
479 if (!V_tcp_use_hostcache)
482 hc_entry = tcp_hc_lookup(inc);
483 if (hc_entry == NULL) {
486 hc_entry->rmx_hits++;
487 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
489 mtu = hc_entry->rmx_mtu;
490 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
495 * External function: update the MTU value of an entry in the hostcache.
496 * Creates a new entry if none was found.
499 tcp_hc_updatemtu(struct in_conninfo *inc, uint32_t mtu)
501 struct hc_metrics *hc_entry;
503 if (!V_tcp_use_hostcache)
507 * Find the right bucket.
509 hc_entry = tcp_hc_lookup(inc);
512 * If we don't have an existing object, try to insert a new one.
514 if (hc_entry == NULL) {
515 hc_entry = tcp_hc_insert(inc);
516 if (hc_entry == NULL)
519 hc_entry->rmx_updates++;
520 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
522 hc_entry->rmx_mtu = mtu;
525 * Put it upfront so we find it faster next time.
527 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
528 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
533 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
537 * External function: update the TCP metrics of an entry in the hostcache.
538 * Creates a new entry if none was found.
541 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
543 struct hc_metrics *hc_entry;
545 if (!V_tcp_use_hostcache)
548 hc_entry = tcp_hc_lookup(inc);
549 if (hc_entry == NULL) {
550 hc_entry = tcp_hc_insert(inc);
551 if (hc_entry == NULL)
554 hc_entry->rmx_updates++;
555 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
557 if (hcml->rmx_rtt != 0) {
558 if (hc_entry->rmx_rtt == 0)
559 hc_entry->rmx_rtt = hcml->rmx_rtt;
561 hc_entry->rmx_rtt = ((uint64_t)hc_entry->rmx_rtt +
562 (uint64_t)hcml->rmx_rtt) / 2;
563 TCPSTAT_INC(tcps_cachedrtt);
565 if (hcml->rmx_rttvar != 0) {
566 if (hc_entry->rmx_rttvar == 0)
567 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
569 hc_entry->rmx_rttvar = ((uint64_t)hc_entry->rmx_rttvar +
570 (uint64_t)hcml->rmx_rttvar) / 2;
571 TCPSTAT_INC(tcps_cachedrttvar);
573 if (hcml->rmx_ssthresh != 0) {
574 if (hc_entry->rmx_ssthresh == 0)
575 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
577 hc_entry->rmx_ssthresh =
578 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
579 TCPSTAT_INC(tcps_cachedssthresh);
581 if (hcml->rmx_cwnd != 0) {
582 if (hc_entry->rmx_cwnd == 0)
583 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
585 hc_entry->rmx_cwnd = ((uint64_t)hc_entry->rmx_cwnd +
586 (uint64_t)hcml->rmx_cwnd) / 2;
587 /* TCPSTAT_INC(tcps_cachedcwnd); */
589 if (hcml->rmx_sendpipe != 0) {
590 if (hc_entry->rmx_sendpipe == 0)
591 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
593 hc_entry->rmx_sendpipe =
594 ((uint64_t)hc_entry->rmx_sendpipe +
595 (uint64_t)hcml->rmx_sendpipe) /2;
596 /* TCPSTAT_INC(tcps_cachedsendpipe); */
598 if (hcml->rmx_recvpipe != 0) {
599 if (hc_entry->rmx_recvpipe == 0)
600 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
602 hc_entry->rmx_recvpipe =
603 ((uint64_t)hc_entry->rmx_recvpipe +
604 (uint64_t)hcml->rmx_recvpipe) /2;
605 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
608 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
609 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
610 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
614 * Sysctl function: prints the list and values of all hostcache entries in
618 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
620 const int linesize = 128;
623 struct hc_metrics *hc_entry;
625 char ip6buf[INET6_ADDRSTRLEN];
628 sbuf_new(&sb, NULL, linesize * (V_tcp_hostcache.cache_count + 1),
632 "\nIP address MTU SSTRESH RTT RTTVAR "
633 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n");
635 #define msec(u) (((u) + 500) / 1000)
636 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
637 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
638 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
641 "%-15s %5u %8u %6lums %6lums %8u %8u %8u %4lu "
643 hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
645 ip6_sprintf(ip6buf, &hc_entry->ip6),
650 hc_entry->rmx_ssthresh,
651 msec((u_long)hc_entry->rmx_rtt *
652 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
653 msec((u_long)hc_entry->rmx_rttvar *
654 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
656 hc_entry->rmx_sendpipe,
657 hc_entry->rmx_recvpipe,
659 hc_entry->rmx_updates,
660 hc_entry->rmx_expire);
662 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
665 error = sbuf_finish(&sb);
667 error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
673 * Caller has to make sure the curvnet is set properly.
676 tcp_hc_purge_internal(int all)
678 struct hc_metrics *hc_entry, *hc_next;
681 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
682 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
683 TAILQ_FOREACH_SAFE(hc_entry,
684 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
685 if (all || hc_entry->rmx_expire <= 0) {
686 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
688 uma_zfree(V_tcp_hostcache.zone, hc_entry);
689 V_tcp_hostcache.hashbase[i].hch_length--;
690 V_tcp_hostcache.cache_count--;
692 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
694 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
699 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
700 * periodically from the callout.
703 tcp_hc_purge(void *arg)
705 CURVNET_SET((struct vnet *) arg);
708 if (V_tcp_hostcache.purgeall) {
710 V_tcp_hostcache.purgeall = 0;
713 tcp_hc_purge_internal(all);
715 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
721 * Expire and purge all entries in hostcache immediately.
724 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
729 error = sysctl_handle_int(oidp, &val, 0, req);
730 if (error || !req->newptr)
733 tcp_hc_purge_internal(1);
735 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
736 tcp_hc_purge, curvnet);