]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/netinet/tcp_hostcache.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / netinet / tcp_hostcache.c
1 /*-
2  * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote
14  *    products derived from this software without specific prior written
15  *    permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 /*
31  * The tcp_hostcache moves the tcp-specific cached metrics from the routing
32  * table to a dedicated structure indexed by the remote IP address.  It keeps
33  * information on the measured TCP parameters of past TCP sessions to allow
34  * better initial start values to be used with later connections to/from the
35  * same source.  Depending on the network parameters (delay, bandwidth, max
36  * MTU, congestion window) between local and remote sites, this can lead to
37  * significant speed-ups for new TCP connections after the first one.
38  *
39  * Due to the tcp_hostcache, all TCP-specific metrics information in the
40  * routing table has been removed.  The inpcb no longer keeps a pointer to
41  * the routing entry, and protocol-initiated route cloning has been removed
42  * as well.  With these changes, the routing table has gone back to being
43  * more lightwight and only carries information related to packet forwarding.
44  *
45  * tcp_hostcache is designed for multiple concurrent access in SMP
46  * environments and high contention.  All bucket rows have their own lock and
47  * thus multiple lookups and modifies can be done at the same time as long as
48  * they are in different bucket rows.  If a request for insertion of a new
49  * record can't be satisfied, it simply returns an empty structure.  Nobody
50  * and nothing outside of tcp_hostcache.c will ever point directly to any
51  * entry in the tcp_hostcache.  All communication is done in an
52  * object-oriented way and only functions of tcp_hostcache will manipulate
53  * hostcache entries.  Otherwise, we are unable to achieve good behaviour in
54  * concurrent access situations.  Since tcp_hostcache is only caching
55  * information, there are no fatal consequences if we either can't satisfy
56  * any particular request or have to drop/overwrite an existing entry because
57  * of bucket limit memory constrains.
58  */
59
60 /*
61  * Many thanks to jlemon for basic structure of tcp_syncache which is being
62  * followed here.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include "opt_inet6.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/lock.h>
74 #include <sys/mutex.h>
75 #include <sys/malloc.h>
76 #include <sys/socket.h>
77 #include <sys/socketvar.h>
78 #include <sys/sysctl.h>
79
80 #include <net/if.h>
81
82 #include <netinet/in.h>
83 #include <netinet/in_systm.h>
84 #include <netinet/ip.h>
85 #include <netinet/in_var.h>
86 #include <netinet/in_pcb.h>
87 #include <netinet/ip_var.h>
88 #ifdef INET6
89 #include <netinet/ip6.h>
90 #include <netinet6/ip6_var.h>
91 #endif
92 #include <netinet/tcp.h>
93 #include <netinet/tcp_var.h>
94 #ifdef INET6
95 #include <netinet6/tcp6_var.h>
96 #endif
97
98 #include <vm/uma.h>
99
100
101 TAILQ_HEAD(hc_qhead, hc_metrics);
102
103 struct hc_head {
104         struct hc_qhead hch_bucket;
105         u_int           hch_length;
106         struct mtx      hch_mtx;
107 };
108
109 struct hc_metrics {
110         /* housekeeping */
111         TAILQ_ENTRY(hc_metrics) rmx_q;
112         struct  hc_head *rmx_head; /* head of bucket tail queue */
113         struct  in_addr ip4;    /* IP address */
114         struct  in6_addr ip6;   /* IP6 address */
115         /* endpoint specific values for TCP */
116         u_long  rmx_mtu;        /* MTU for this path */
117         u_long  rmx_ssthresh;   /* outbound gateway buffer limit */
118         u_long  rmx_rtt;        /* estimated round trip time */
119         u_long  rmx_rttvar;     /* estimated rtt variance */
120         u_long  rmx_bandwidth;  /* estimated bandwidth */
121         u_long  rmx_cwnd;       /* congestion window */
122         u_long  rmx_sendpipe;   /* outbound delay-bandwidth product */
123         u_long  rmx_recvpipe;   /* inbound delay-bandwidth product */
124         /* TCP hostcache internal data */
125         int     rmx_expire;     /* lifetime for object */
126         u_long  rmx_hits;       /* number of hits */
127         u_long  rmx_updates;    /* number of updates */
128 };
129
130 /* Arbitrary values */
131 #define TCP_HOSTCACHE_HASHSIZE          512
132 #define TCP_HOSTCACHE_BUCKETLIMIT       30
133 #define TCP_HOSTCACHE_EXPIRE            60*60   /* one hour */
134 #define TCP_HOSTCACHE_PRUNE             5*60    /* every 5 minutes */
135
136 struct tcp_hostcache {
137         struct  hc_head *hashbase;
138         uma_zone_t zone;
139         u_int   hashsize;
140         u_int   hashmask;
141         u_int   bucket_limit;
142         u_int   cache_count;
143         u_int   cache_limit;
144         int     expire;
145         int     prune;
146         int     purgeall;
147 };
148 static struct tcp_hostcache tcp_hostcache;
149
150 static struct callout tcp_hc_callout;
151
152 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
153 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
154 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
155 static void tcp_hc_purge(void *);
156
157 SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0, "TCP Host cache");
158
159 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
160     &tcp_hostcache.cache_limit, 0, "Overall entry limit for hostcache");
161
162 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
163     &tcp_hostcache.hashsize, 0, "Size of TCP hostcache hashtable");
164
165 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
166     &tcp_hostcache.bucket_limit, 0, "Per-bucket hash limit for hostcache");
167
168 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_RD,
169     &tcp_hostcache.cache_count, 0, "Current number of entries in hostcache");
170
171 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_RW,
172     &tcp_hostcache.expire, 0, "Expire time of TCP hostcache entries");
173
174 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_RW,
175      &tcp_hostcache.prune, 0, "Time between purge runs");
176
177 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_RW,
178     &tcp_hostcache.purgeall, 0, "Expire all entires on next purge run");
179
180 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
181     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
182     sysctl_tcp_hc_list, "A", "List of all hostcache entries");
183
184
185 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
186
187 #define HOSTCACHE_HASH(ip) \
188         (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) &  \
189           tcp_hostcache.hashmask)
190
191 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
192 #define HOSTCACHE_HASH6(ip6)                            \
193         (((ip6)->s6_addr32[0] ^                         \
194           (ip6)->s6_addr32[1] ^                         \
195           (ip6)->s6_addr32[2] ^                         \
196           (ip6)->s6_addr32[3]) &                        \
197          tcp_hostcache.hashmask)
198
199 #define THC_LOCK(lp)            mtx_lock(lp)
200 #define THC_UNLOCK(lp)          mtx_unlock(lp)
201
202 void
203 tcp_hc_init(void)
204 {
205         int i;
206
207         /*
208          * Initialize hostcache structures.
209          */
210         tcp_hostcache.cache_count = 0;
211         tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
212         tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
213         tcp_hostcache.cache_limit =
214             tcp_hostcache.hashsize * tcp_hostcache.bucket_limit;
215         tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
216         tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
217
218         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
219             &tcp_hostcache.hashsize);
220         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
221             &tcp_hostcache.cache_limit);
222         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
223             &tcp_hostcache.bucket_limit);
224         if (!powerof2(tcp_hostcache.hashsize)) {
225                 printf("WARNING: hostcache hash size is not a power of 2.\n");
226                 tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
227         }
228         tcp_hostcache.hashmask = tcp_hostcache.hashsize - 1;
229
230         /*
231          * Allocate the hash table.
232          */
233         tcp_hostcache.hashbase = (struct hc_head *)
234             malloc(tcp_hostcache.hashsize * sizeof(struct hc_head),
235                    M_HOSTCACHE, M_WAITOK | M_ZERO);
236
237         /*
238          * Initialize the hash buckets.
239          */
240         for (i = 0; i < tcp_hostcache.hashsize; i++) {
241                 TAILQ_INIT(&tcp_hostcache.hashbase[i].hch_bucket);
242                 tcp_hostcache.hashbase[i].hch_length = 0;
243                 mtx_init(&tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
244                           NULL, MTX_DEF);
245         }
246
247         /*
248          * Allocate the hostcache entries.
249          */
250         tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
251             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
252         uma_zone_set_max(tcp_hostcache.zone, tcp_hostcache.cache_limit);
253
254         /*
255          * Set up periodic cache cleanup.
256          */
257         callout_init(&tcp_hc_callout, CALLOUT_MPSAFE);
258         callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
259 }
260
261 /*
262  * Internal function: look up an entry in the hostcache or return NULL.
263  *
264  * If an entry has been returned, the caller becomes responsible for
265  * unlocking the bucket row after he is done reading/modifying the entry.
266  */
267 static struct hc_metrics *
268 tcp_hc_lookup(struct in_conninfo *inc)
269 {
270         int hash;
271         struct hc_head *hc_head;
272         struct hc_metrics *hc_entry;
273
274         KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
275
276         /*
277          * Hash the foreign ip address.
278          */
279         if (inc->inc_flags & INC_ISIPV6)
280                 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
281         else
282                 hash = HOSTCACHE_HASH(&inc->inc_faddr);
283
284         hc_head = &tcp_hostcache.hashbase[hash];
285
286         /*
287          * Acquire lock for this bucket row; we release the lock if we don't
288          * find an entry, otherwise the caller has to unlock after he is
289          * done.
290          */
291         THC_LOCK(&hc_head->hch_mtx);
292
293         /*
294          * Iterate through entries in bucket row looking for a match.
295          */
296         TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
297                 if (inc->inc_flags & INC_ISIPV6) {
298                         if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
299                             sizeof(inc->inc6_faddr)) == 0)
300                                 return hc_entry;
301                 } else {
302                         if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
303                             sizeof(inc->inc_faddr)) == 0)
304                                 return hc_entry;
305                 }
306         }
307
308         /*
309          * We were unsuccessful and didn't find anything.
310          */
311         THC_UNLOCK(&hc_head->hch_mtx);
312         return NULL;
313 }
314
315 /*
316  * Internal function: insert an entry into the hostcache or return NULL if
317  * unable to allocate a new one.
318  *
319  * If an entry has been returned, the caller becomes responsible for
320  * unlocking the bucket row after he is done reading/modifying the entry.
321  */
322 static struct hc_metrics *
323 tcp_hc_insert(struct in_conninfo *inc)
324 {
325         int hash;
326         struct hc_head *hc_head;
327         struct hc_metrics *hc_entry;
328
329         KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
330
331         /*
332          * Hash the foreign ip address.
333          */
334         if (inc->inc_flags & INC_ISIPV6)
335                 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
336         else
337                 hash = HOSTCACHE_HASH(&inc->inc_faddr);
338
339         hc_head = &tcp_hostcache.hashbase[hash];
340
341         /*
342          * Acquire lock for this bucket row; we release the lock if we don't
343          * find an entry, otherwise the caller has to unlock after he is
344          * done.
345          */
346         THC_LOCK(&hc_head->hch_mtx);
347
348         /*
349          * If the bucket limit is reached, reuse the least-used element.
350          */
351         if (hc_head->hch_length >= tcp_hostcache.bucket_limit ||
352             tcp_hostcache.cache_count >= tcp_hostcache.cache_limit) {
353                 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
354                 /*
355                  * At first we were dropping the last element, just to
356                  * reacquire it in the next two lines again, which isn't very
357                  * efficient.  Instead just reuse the least used element.
358                  * We may drop something that is still "in-use" but we can be
359                  * "lossy".
360                  * Just give up if this bucket row is empty and we don't have
361                  * anything to replace.
362                  */
363                 if (hc_entry == NULL) {
364                         THC_UNLOCK(&hc_head->hch_mtx);
365                         return NULL;
366                 }
367                 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
368                 tcp_hostcache.hashbase[hash].hch_length--;
369                 tcp_hostcache.cache_count--;
370                 tcpstat.tcps_hc_bucketoverflow++;
371 #if 0
372                 uma_zfree(tcp_hostcache.zone, hc_entry);
373 #endif
374         } else {
375                 /*
376                  * Allocate a new entry, or balk if not possible.
377                  */
378                 hc_entry = uma_zalloc(tcp_hostcache.zone, M_NOWAIT);
379                 if (hc_entry == NULL) {
380                         THC_UNLOCK(&hc_head->hch_mtx);
381                         return NULL;
382                 }
383         }
384
385         /*
386          * Initialize basic information of hostcache entry.
387          */
388         bzero(hc_entry, sizeof(*hc_entry));
389         if (inc->inc_flags & INC_ISIPV6)
390                 bcopy(&inc->inc6_faddr, &hc_entry->ip6, sizeof(hc_entry->ip6));
391         else
392                 hc_entry->ip4 = inc->inc_faddr;
393         hc_entry->rmx_head = hc_head;
394         hc_entry->rmx_expire = tcp_hostcache.expire;
395
396         /*
397          * Put it upfront.
398          */
399         TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
400         tcp_hostcache.hashbase[hash].hch_length++;
401         tcp_hostcache.cache_count++;
402         tcpstat.tcps_hc_added++;
403
404         return hc_entry;
405 }
406
407 /*
408  * External function: look up an entry in the hostcache and fill out the
409  * supplied TCP metrics structure.  Fills in NULL when no entry was found or
410  * a value is not set.
411  */
412 void
413 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
414 {
415         struct hc_metrics *hc_entry;
416
417         /*
418          * Find the right bucket.
419          */
420         hc_entry = tcp_hc_lookup(inc);
421
422         /*
423          * If we don't have an existing object.
424          */
425         if (hc_entry == NULL) {
426                 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
427                 return;
428         }
429         hc_entry->rmx_hits++;
430         hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
431
432         hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
433         hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
434         hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
435         hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
436         hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth;
437         hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
438         hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
439         hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
440
441         /*
442          * Unlock bucket row.
443          */
444         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
445 }
446
447 /*
448  * External function: look up an entry in the hostcache and return the
449  * discovered path MTU.  Returns NULL if no entry is found or value is not
450  * set.
451  */
452 u_long
453 tcp_hc_getmtu(struct in_conninfo *inc)
454 {
455         struct hc_metrics *hc_entry;
456         u_long mtu;
457
458         hc_entry = tcp_hc_lookup(inc);
459         if (hc_entry == NULL) {
460                 return 0;
461         }
462         hc_entry->rmx_hits++;
463         hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
464
465         mtu = hc_entry->rmx_mtu;
466         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
467         return mtu;
468 }
469
470 /*
471  * External function: update the MTU value of an entry in the hostcache.
472  * Creates a new entry if none was found.
473  */
474 void
475 tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
476 {
477         struct hc_metrics *hc_entry;
478
479         /*
480          * Find the right bucket.
481          */
482         hc_entry = tcp_hc_lookup(inc);
483
484         /*
485          * If we don't have an existing object, try to insert a new one.
486          */
487         if (hc_entry == NULL) {
488                 hc_entry = tcp_hc_insert(inc);
489                 if (hc_entry == NULL)
490                         return;
491         }
492         hc_entry->rmx_updates++;
493         hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
494
495         hc_entry->rmx_mtu = mtu;
496
497         /*
498          * Put it upfront so we find it faster next time.
499          */
500         TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
501         TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
502
503         /*
504          * Unlock bucket row.
505          */
506         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
507 }
508
509 /*
510  * External function: update the TCP metrics of an entry in the hostcache.
511  * Creates a new entry if none was found.
512  */
513 void
514 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
515 {
516         struct hc_metrics *hc_entry;
517
518         hc_entry = tcp_hc_lookup(inc);
519         if (hc_entry == NULL) {
520                 hc_entry = tcp_hc_insert(inc);
521                 if (hc_entry == NULL)
522                         return;
523         }
524         hc_entry->rmx_updates++;
525         hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
526
527         if (hcml->rmx_rtt != 0) {
528                 if (hc_entry->rmx_rtt == 0)
529                         hc_entry->rmx_rtt = hcml->rmx_rtt;
530                 else
531                         hc_entry->rmx_rtt =
532                             (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
533                 tcpstat.tcps_cachedrtt++;
534         }
535         if (hcml->rmx_rttvar != 0) {
536                 if (hc_entry->rmx_rttvar == 0)
537                         hc_entry->rmx_rttvar = hcml->rmx_rttvar;
538                 else
539                         hc_entry->rmx_rttvar =
540                             (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
541                 tcpstat.tcps_cachedrttvar++;
542         }
543         if (hcml->rmx_ssthresh != 0) {
544                 if (hc_entry->rmx_ssthresh == 0)
545                         hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
546                 else
547                         hc_entry->rmx_ssthresh =
548                             (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
549                 tcpstat.tcps_cachedssthresh++;
550         }
551         if (hcml->rmx_bandwidth != 0) {
552                 if (hc_entry->rmx_bandwidth == 0)
553                         hc_entry->rmx_bandwidth = hcml->rmx_bandwidth;
554                 else
555                         hc_entry->rmx_bandwidth =
556                             (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
557                 /* tcpstat.tcps_cachedbandwidth++; */
558         }
559         if (hcml->rmx_cwnd != 0) {
560                 if (hc_entry->rmx_cwnd == 0)
561                         hc_entry->rmx_cwnd = hcml->rmx_cwnd;
562                 else
563                         hc_entry->rmx_cwnd =
564                             (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
565                 /* tcpstat.tcps_cachedcwnd++; */
566         }
567         if (hcml->rmx_sendpipe != 0) {
568                 if (hc_entry->rmx_sendpipe == 0)
569                         hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
570                 else
571                         hc_entry->rmx_sendpipe =
572                             (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
573                 /* tcpstat.tcps_cachedsendpipe++; */
574         }
575         if (hcml->rmx_recvpipe != 0) {
576                 if (hc_entry->rmx_recvpipe == 0)
577                         hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
578                 else
579                         hc_entry->rmx_recvpipe =
580                             (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
581                 /* tcpstat.tcps_cachedrecvpipe++; */
582         }
583
584         TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
585         TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
586         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
587 }
588
589 /*
590  * Sysctl function: prints the list and values of all hostcache entries in
591  * unsorted order.
592  */
593 static int
594 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
595 {
596         int bufsize;
597         int linesize = 128;
598         char *p, *buf;
599         int len, i, error;
600         struct hc_metrics *hc_entry;
601 #ifdef INET6
602         char ip6buf[INET6_ADDRSTRLEN];
603 #endif
604
605         bufsize = linesize * (tcp_hostcache.cache_count + 1);
606
607         p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
608
609         len = snprintf(p, linesize,
610                 "\nIP address        MTU  SSTRESH      RTT   RTTVAR BANDWIDTH "
611                 "    CWND SENDPIPE RECVPIPE HITS  UPD  EXP\n");
612         p += len;
613
614 #define msec(u) (((u) + 500) / 1000)
615         for (i = 0; i < tcp_hostcache.hashsize; i++) {
616                 THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
617                 TAILQ_FOREACH(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
618                               rmx_q) {
619                         len = snprintf(p, linesize,
620                             "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
621                             "%4lu %4lu %4i\n",
622                             hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
623 #ifdef INET6
624                                 ip6_sprintf(ip6buf, &hc_entry->ip6),
625 #else
626                                 "IPv6?",
627 #endif
628                             hc_entry->rmx_mtu,
629                             hc_entry->rmx_ssthresh,
630                             msec(hc_entry->rmx_rtt *
631                                 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
632                             msec(hc_entry->rmx_rttvar *
633                                 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
634                             hc_entry->rmx_bandwidth * 8,
635                             hc_entry->rmx_cwnd,
636                             hc_entry->rmx_sendpipe,
637                             hc_entry->rmx_recvpipe,
638                             hc_entry->rmx_hits,
639                             hc_entry->rmx_updates,
640                             hc_entry->rmx_expire);
641                         p += len;
642                 }
643                 THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
644         }
645 #undef msec
646         error = SYSCTL_OUT(req, buf, p - buf);
647         free(buf, M_TEMP);
648         return(error);
649 }
650
651 /*
652  * Expire and purge (old|all) entries in the tcp_hostcache.  Runs
653  * periodically from the callout.
654  */
655 static void
656 tcp_hc_purge(void *arg)
657 {
658         struct hc_metrics *hc_entry, *hc_next;
659         int all = (intptr_t)arg;
660         int i;
661
662         if (tcp_hostcache.purgeall) {
663                 all = 1;
664                 tcp_hostcache.purgeall = 0;
665         }
666
667         for (i = 0; i < tcp_hostcache.hashsize; i++) {
668                 THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
669                 TAILQ_FOREACH_SAFE(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
670                               rmx_q, hc_next) {
671                         if (all || hc_entry->rmx_expire <= 0) {
672                                 TAILQ_REMOVE(&tcp_hostcache.hashbase[i].hch_bucket,
673                                               hc_entry, rmx_q);
674                                 uma_zfree(tcp_hostcache.zone, hc_entry);
675                                 tcp_hostcache.hashbase[i].hch_length--;
676                                 tcp_hostcache.cache_count--;
677                         } else
678                                 hc_entry->rmx_expire -= tcp_hostcache.prune;
679                 }
680                 THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
681         }
682         callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
683 }