2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote
14 * products derived from this software without specific prior written
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * The tcp_hostcache moves the tcp-specific cached metrics from the routing
32 * table to a dedicated structure indexed by the remote IP address. It keeps
33 * information on the measured TCP parameters of past TCP sessions to allow
34 * better initial start values to be used with later connections to/from the
35 * same source. Depending on the network parameters (delay, max MTU,
36 * congestion window) between local and remote sites, this can lead to
37 * significant speed-ups for new TCP connections after the first one.
39 * Due to the tcp_hostcache, all TCP-specific metrics information in the
40 * routing table have been removed. The inpcb no longer keeps a pointer to
41 * the routing entry, and protocol-initiated route cloning has been removed
42 * as well. With these changes, the routing table has gone back to being
43 * more lightwight and only carries information related to packet forwarding.
45 * tcp_hostcache is designed for multiple concurrent access in SMP
46 * environments and high contention. All bucket rows have their own lock and
47 * thus multiple lookups and modifies can be done at the same time as long as
48 * they are in different bucket rows. If a request for insertion of a new
49 * record can't be satisfied, it simply returns an empty structure. Nobody
50 * and nothing outside of tcp_hostcache.c will ever point directly to any
51 * entry in the tcp_hostcache. All communication is done in an
52 * object-oriented way and only functions of tcp_hostcache will manipulate
53 * hostcache entries. Otherwise, we are unable to achieve good behaviour in
54 * concurrent access situations. Since tcp_hostcache is only caching
55 * information, there are no fatal consequences if we either can't satisfy
56 * any particular request or have to drop/overwrite an existing entry because
57 * of bucket limit memory constrains.
61 * Many thanks to jlemon for basic structure of tcp_syncache which is being
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
68 #include "opt_inet6.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
74 #include <sys/mutex.h>
75 #include <sys/malloc.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <sys/sysctl.h>
82 #include <net/if_var.h>
83 #include <net/route.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_var.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/ip_var.h>
93 #include <netinet/ip6.h>
94 #include <netinet6/ip6_var.h>
96 #include <netinet/tcp.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet/tcp_hostcache.h>
100 #include <netinet6/tcp6_var.h>
105 /* Arbitrary values */
106 #define TCP_HOSTCACHE_HASHSIZE 512
107 #define TCP_HOSTCACHE_BUCKETLIMIT 30
108 #define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */
109 #define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */
111 static VNET_DEFINE(struct tcp_hostcache, tcp_hostcache);
112 #define V_tcp_hostcache VNET(tcp_hostcache)
114 static VNET_DEFINE(struct callout, tcp_hc_callout);
115 #define V_tcp_hc_callout VNET(tcp_hc_callout)
117 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
118 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
119 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
120 static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS);
121 static void tcp_hc_purge_internal(int);
122 static void tcp_hc_purge(void *);
124 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0,
127 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
128 &VNET_NAME(tcp_hostcache.cache_limit), 0,
129 "Overall entry limit for hostcache");
131 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
132 &VNET_NAME(tcp_hostcache.hashsize), 0,
133 "Size of TCP hostcache hashtable");
135 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
136 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
137 "Per-bucket hash limit for hostcache");
139 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
140 &VNET_NAME(tcp_hostcache.cache_count), 0,
141 "Current number of entries in hostcache");
143 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
144 &VNET_NAME(tcp_hostcache.expire), 0,
145 "Expire time of TCP hostcache entries");
147 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
148 &VNET_NAME(tcp_hostcache.prune), 0,
149 "Time between purge runs");
151 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
152 &VNET_NAME(tcp_hostcache.purgeall), 0,
153 "Expire all entires on next purge run");
155 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
156 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
157 sysctl_tcp_hc_list, "A", "List of all hostcache entries");
159 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow,
160 CTLTYPE_INT | CTLFLAG_RW, NULL, 0,
161 sysctl_tcp_hc_purgenow, "I", "Immediately purge all entries");
163 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
165 #define HOSTCACHE_HASH(ip) \
166 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
167 V_tcp_hostcache.hashmask)
169 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
170 #define HOSTCACHE_HASH6(ip6) \
171 (((ip6)->s6_addr32[0] ^ \
172 (ip6)->s6_addr32[1] ^ \
173 (ip6)->s6_addr32[2] ^ \
174 (ip6)->s6_addr32[3]) & \
175 V_tcp_hostcache.hashmask)
177 #define THC_LOCK(lp) mtx_lock(lp)
178 #define THC_UNLOCK(lp) mtx_unlock(lp)
187 * Initialize hostcache structures.
189 V_tcp_hostcache.cache_count = 0;
190 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
191 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
192 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
193 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
195 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
196 &V_tcp_hostcache.hashsize);
197 if (!powerof2(V_tcp_hostcache.hashsize)) {
198 printf("WARNING: hostcache hash size is not a power of 2.\n");
199 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
201 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
203 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
204 &V_tcp_hostcache.bucket_limit);
206 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
207 V_tcp_hostcache.cache_limit = cache_limit;
208 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
209 &V_tcp_hostcache.cache_limit);
210 if (V_tcp_hostcache.cache_limit > cache_limit)
211 V_tcp_hostcache.cache_limit = cache_limit;
214 * Allocate the hash table.
216 V_tcp_hostcache.hashbase = (struct hc_head *)
217 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
218 M_HOSTCACHE, M_WAITOK | M_ZERO);
221 * Initialize the hash buckets.
223 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
224 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
225 V_tcp_hostcache.hashbase[i].hch_length = 0;
226 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
231 * Allocate the hostcache entries.
233 V_tcp_hostcache.zone =
234 uma_zcreate("hostcache", sizeof(struct hc_metrics),
235 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
236 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
239 * Set up periodic cache cleanup.
241 callout_init(&V_tcp_hc_callout, 1);
242 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
243 tcp_hc_purge, curvnet);
252 callout_drain(&V_tcp_hc_callout);
254 /* Purge all hc entries. */
255 tcp_hc_purge_internal(1);
257 /* Free the uma zone and the allocated hash table. */
258 uma_zdestroy(V_tcp_hostcache.zone);
260 for (i = 0; i < V_tcp_hostcache.hashsize; i++)
261 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
262 free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
267 * Internal function: look up an entry in the hostcache or return NULL.
269 * If an entry has been returned, the caller becomes responsible for
270 * unlocking the bucket row after he is done reading/modifying the entry.
272 static struct hc_metrics *
273 tcp_hc_lookup(struct in_conninfo *inc)
276 struct hc_head *hc_head;
277 struct hc_metrics *hc_entry;
279 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
282 * Hash the foreign ip address.
284 if (inc->inc_flags & INC_ISIPV6)
285 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
287 hash = HOSTCACHE_HASH(&inc->inc_faddr);
289 hc_head = &V_tcp_hostcache.hashbase[hash];
292 * Acquire lock for this bucket row; we release the lock if we don't
293 * find an entry, otherwise the caller has to unlock after he is
296 THC_LOCK(&hc_head->hch_mtx);
299 * Iterate through entries in bucket row looking for a match.
301 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
302 if (inc->inc_flags & INC_ISIPV6) {
303 /* XXX: check ip6_zoneid */
304 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
305 sizeof(inc->inc6_faddr)) == 0)
308 if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
309 sizeof(inc->inc_faddr)) == 0)
315 * We were unsuccessful and didn't find anything.
317 THC_UNLOCK(&hc_head->hch_mtx);
322 * Internal function: insert an entry into the hostcache or return NULL if
323 * unable to allocate a new one.
325 * If an entry has been returned, the caller becomes responsible for
326 * unlocking the bucket row after he is done reading/modifying the entry.
328 static struct hc_metrics *
329 tcp_hc_insert(struct in_conninfo *inc)
332 struct hc_head *hc_head;
333 struct hc_metrics *hc_entry;
335 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
338 * Hash the foreign ip address.
340 if (inc->inc_flags & INC_ISIPV6)
341 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
343 hash = HOSTCACHE_HASH(&inc->inc_faddr);
345 hc_head = &V_tcp_hostcache.hashbase[hash];
348 * Acquire lock for this bucket row; we release the lock if we don't
349 * find an entry, otherwise the caller has to unlock after he is
352 THC_LOCK(&hc_head->hch_mtx);
355 * If the bucket limit is reached, reuse the least-used element.
357 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
358 V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
359 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
361 * At first we were dropping the last element, just to
362 * reacquire it in the next two lines again, which isn't very
363 * efficient. Instead just reuse the least used element.
364 * We may drop something that is still "in-use" but we can be
366 * Just give up if this bucket row is empty and we don't have
367 * anything to replace.
369 if (hc_entry == NULL) {
370 THC_UNLOCK(&hc_head->hch_mtx);
373 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
374 V_tcp_hostcache.hashbase[hash].hch_length--;
375 V_tcp_hostcache.cache_count--;
376 TCPSTAT_INC(tcps_hc_bucketoverflow);
378 uma_zfree(V_tcp_hostcache.zone, hc_entry);
382 * Allocate a new entry, or balk if not possible.
384 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
385 if (hc_entry == NULL) {
386 THC_UNLOCK(&hc_head->hch_mtx);
392 * Initialize basic information of hostcache entry.
394 bzero(hc_entry, sizeof(*hc_entry));
395 if (inc->inc_flags & INC_ISIPV6) {
396 hc_entry->ip6 = inc->inc6_faddr;
397 hc_entry->ip6_zoneid = inc->inc6_zoneid;
399 hc_entry->ip4 = inc->inc_faddr;
400 hc_entry->rmx_head = hc_head;
401 hc_entry->rmx_expire = V_tcp_hostcache.expire;
406 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
407 V_tcp_hostcache.hashbase[hash].hch_length++;
408 V_tcp_hostcache.cache_count++;
409 TCPSTAT_INC(tcps_hc_added);
415 * External function: look up an entry in the hostcache and fill out the
416 * supplied TCP metrics structure. Fills in NULL when no entry was found or
417 * a value is not set.
420 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
422 struct hc_metrics *hc_entry;
425 * Find the right bucket.
427 hc_entry = tcp_hc_lookup(inc);
430 * If we don't have an existing object.
432 if (hc_entry == NULL) {
433 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
436 hc_entry->rmx_hits++;
437 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
439 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
440 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
441 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
442 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
443 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
444 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
445 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
450 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
454 * External function: look up an entry in the hostcache and return the
455 * discovered path MTU. Returns NULL if no entry is found or value is not
459 tcp_hc_getmtu(struct in_conninfo *inc)
461 struct hc_metrics *hc_entry;
464 hc_entry = tcp_hc_lookup(inc);
465 if (hc_entry == NULL) {
468 hc_entry->rmx_hits++;
469 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
471 mtu = hc_entry->rmx_mtu;
472 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
477 * External function: update the MTU value of an entry in the hostcache.
478 * Creates a new entry if none was found.
481 tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
483 struct hc_metrics *hc_entry;
486 * Find the right bucket.
488 hc_entry = tcp_hc_lookup(inc);
491 * If we don't have an existing object, try to insert a new one.
493 if (hc_entry == NULL) {
494 hc_entry = tcp_hc_insert(inc);
495 if (hc_entry == NULL)
498 hc_entry->rmx_updates++;
499 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
501 hc_entry->rmx_mtu = mtu;
504 * Put it upfront so we find it faster next time.
506 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
507 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
512 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
516 * External function: update the TCP metrics of an entry in the hostcache.
517 * Creates a new entry if none was found.
520 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
522 struct hc_metrics *hc_entry;
524 hc_entry = tcp_hc_lookup(inc);
525 if (hc_entry == NULL) {
526 hc_entry = tcp_hc_insert(inc);
527 if (hc_entry == NULL)
530 hc_entry->rmx_updates++;
531 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
533 if (hcml->rmx_rtt != 0) {
534 if (hc_entry->rmx_rtt == 0)
535 hc_entry->rmx_rtt = hcml->rmx_rtt;
538 (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
539 TCPSTAT_INC(tcps_cachedrtt);
541 if (hcml->rmx_rttvar != 0) {
542 if (hc_entry->rmx_rttvar == 0)
543 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
545 hc_entry->rmx_rttvar =
546 (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
547 TCPSTAT_INC(tcps_cachedrttvar);
549 if (hcml->rmx_ssthresh != 0) {
550 if (hc_entry->rmx_ssthresh == 0)
551 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
553 hc_entry->rmx_ssthresh =
554 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
555 TCPSTAT_INC(tcps_cachedssthresh);
557 if (hcml->rmx_cwnd != 0) {
558 if (hc_entry->rmx_cwnd == 0)
559 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
562 (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
563 /* TCPSTAT_INC(tcps_cachedcwnd); */
565 if (hcml->rmx_sendpipe != 0) {
566 if (hc_entry->rmx_sendpipe == 0)
567 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
569 hc_entry->rmx_sendpipe =
570 (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
571 /* TCPSTAT_INC(tcps_cachedsendpipe); */
573 if (hcml->rmx_recvpipe != 0) {
574 if (hc_entry->rmx_recvpipe == 0)
575 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
577 hc_entry->rmx_recvpipe =
578 (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
579 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
582 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
583 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
584 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
588 * Sysctl function: prints the list and values of all hostcache entries in
592 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
594 const int linesize = 128;
597 struct hc_metrics *hc_entry;
599 char ip6buf[INET6_ADDRSTRLEN];
602 sbuf_new(&sb, NULL, linesize * (V_tcp_hostcache.cache_count + 1),
606 "\nIP address MTU SSTRESH RTT RTTVAR "
607 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n");
609 #define msec(u) (((u) + 500) / 1000)
610 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
611 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
612 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
615 "%-15s %5lu %8lu %6lums %6lums %8lu %8lu %8lu %4lu "
617 hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
619 ip6_sprintf(ip6buf, &hc_entry->ip6),
624 hc_entry->rmx_ssthresh,
625 msec(hc_entry->rmx_rtt *
626 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
627 msec(hc_entry->rmx_rttvar *
628 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
630 hc_entry->rmx_sendpipe,
631 hc_entry->rmx_recvpipe,
633 hc_entry->rmx_updates,
634 hc_entry->rmx_expire);
636 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
639 error = sbuf_finish(&sb);
641 error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
647 * Caller has to make sure the curvnet is set properly.
650 tcp_hc_purge_internal(int all)
652 struct hc_metrics *hc_entry, *hc_next;
655 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
656 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
657 TAILQ_FOREACH_SAFE(hc_entry,
658 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
659 if (all || hc_entry->rmx_expire <= 0) {
660 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
662 uma_zfree(V_tcp_hostcache.zone, hc_entry);
663 V_tcp_hostcache.hashbase[i].hch_length--;
664 V_tcp_hostcache.cache_count--;
666 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
668 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
673 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
674 * periodically from the callout.
677 tcp_hc_purge(void *arg)
679 CURVNET_SET((struct vnet *) arg);
682 if (V_tcp_hostcache.purgeall) {
684 V_tcp_hostcache.purgeall = 0;
687 tcp_hc_purge_internal(all);
689 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
695 * Expire and purge all entries in hostcache immediately.
698 sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS)
703 error = sysctl_handle_int(oidp, &val, 0, req);
704 if (error || !req->newptr)
707 tcp_hc_purge_internal(1);
709 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
710 tcp_hc_purge, curvnet);