]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/tcp_hostcache.c
In TCP, connect() can return incorrect error code EINVAL
[FreeBSD/FreeBSD.git] / sys / netinet / tcp_hostcache.c
1 /*-
2  * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote
14  *    products derived from this software without specific prior written
15  *    permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29
30 /*
31  * The tcp_hostcache moves the tcp-specific cached metrics from the routing
32  * table to a dedicated structure indexed by the remote IP address.  It keeps
33  * information on the measured TCP parameters of past TCP sessions to allow
34  * better initial start values to be used with later connections to/from the
35  * same source.  Depending on the network parameters (delay, bandwidth, max
36  * MTU, congestion window) between local and remote sites, this can lead to
37  * significant speed-ups for new TCP connections after the first one.
38  *
39  * Due to the tcp_hostcache, all TCP-specific metrics information in the
40  * routing table have been removed.  The inpcb no longer keeps a pointer to
41  * the routing entry, and protocol-initiated route cloning has been removed
42  * as well.  With these changes, the routing table has gone back to being
43  * more lightwight and only carries information related to packet forwarding.
44  *
45  * tcp_hostcache is designed for multiple concurrent access in SMP
46  * environments and high contention.  All bucket rows have their own lock and
47  * thus multiple lookups and modifies can be done at the same time as long as
48  * they are in different bucket rows.  If a request for insertion of a new
49  * record can't be satisfied, it simply returns an empty structure.  Nobody
50  * and nothing outside of tcp_hostcache.c will ever point directly to any
51  * entry in the tcp_hostcache.  All communication is done in an
52  * object-oriented way and only functions of tcp_hostcache will manipulate
53  * hostcache entries.  Otherwise, we are unable to achieve good behaviour in
54  * concurrent access situations.  Since tcp_hostcache is only caching
55  * information, there are no fatal consequences if we either can't satisfy
56  * any particular request or have to drop/overwrite an existing entry because
57  * of bucket limit memory constrains.
58  */
59
60 /*
61  * Many thanks to jlemon for basic structure of tcp_syncache which is being
62  * followed here.
63  */
64
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
67
68 #include "opt_inet6.h"
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/lock.h>
74 #include <sys/mutex.h>
75 #include <sys/malloc.h>
76 #include <sys/sbuf.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <sys/sysctl.h>
80
81 #include <net/if.h>
82 #include <net/if_var.h>
83 #include <net/route.h>
84 #include <net/vnet.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/in_var.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/ip_var.h>
92 #ifdef INET6
93 #include <netinet/ip6.h>
94 #include <netinet6/ip6_var.h>
95 #endif
96 #include <netinet/tcp.h>
97 #include <netinet/tcp_var.h>
98 #include <netinet/tcp_hostcache.h>
99 #ifdef INET6
100 #include <netinet6/tcp6_var.h>
101 #endif
102
103 #include <vm/uma.h>
104
105 /* Arbitrary values */
106 #define TCP_HOSTCACHE_HASHSIZE          512
107 #define TCP_HOSTCACHE_BUCKETLIMIT       30
108 #define TCP_HOSTCACHE_EXPIRE            60*60   /* one hour */
109 #define TCP_HOSTCACHE_PRUNE             5*60    /* every 5 minutes */
110
111 static VNET_DEFINE(struct tcp_hostcache, tcp_hostcache);
112 #define V_tcp_hostcache         VNET(tcp_hostcache)
113
114 static VNET_DEFINE(struct callout, tcp_hc_callout);
115 #define V_tcp_hc_callout        VNET(tcp_hc_callout)
116
117 static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *);
118 static struct hc_metrics *tcp_hc_insert(struct in_conninfo *);
119 static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS);
120 static void tcp_hc_purge_internal(int);
121 static void tcp_hc_purge(void *);
122
123 static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0,
124     "TCP Host cache");
125
126 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN,
127     &VNET_NAME(tcp_hostcache.cache_limit), 0,
128     "Overall entry limit for hostcache");
129
130 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN,
131     &VNET_NAME(tcp_hostcache.hashsize), 0,
132     "Size of TCP hostcache hashtable");
133
134 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
135     CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
136     "Per-bucket hash limit for hostcache");
137
138 SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
139      &VNET_NAME(tcp_hostcache.cache_count), 0,
140     "Current number of entries in hostcache");
141
142 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
143     &VNET_NAME(tcp_hostcache.expire), 0,
144     "Expire time of TCP hostcache entries");
145
146 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW,
147     &VNET_NAME(tcp_hostcache.prune), 0,
148     "Time between purge runs");
149
150 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW,
151     &VNET_NAME(tcp_hostcache.purgeall), 0,
152     "Expire all entires on next purge run");
153
154 SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list,
155     CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
156     sysctl_tcp_hc_list, "A", "List of all hostcache entries");
157
158
159 static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
160
161 #define HOSTCACHE_HASH(ip) \
162         (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) &  \
163           V_tcp_hostcache.hashmask)
164
165 /* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
166 #define HOSTCACHE_HASH6(ip6)                            \
167         (((ip6)->s6_addr32[0] ^                         \
168           (ip6)->s6_addr32[1] ^                         \
169           (ip6)->s6_addr32[2] ^                         \
170           (ip6)->s6_addr32[3]) &                        \
171          V_tcp_hostcache.hashmask)
172
173 #define THC_LOCK(lp)            mtx_lock(lp)
174 #define THC_UNLOCK(lp)          mtx_unlock(lp)
175
176 void
177 tcp_hc_init(void)
178 {
179         u_int cache_limit;
180         int i;
181
182         /*
183          * Initialize hostcache structures.
184          */
185         V_tcp_hostcache.cache_count = 0;
186         V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
187         V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
188         V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
189         V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
190
191         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
192             &V_tcp_hostcache.hashsize);
193         if (!powerof2(V_tcp_hostcache.hashsize)) {
194                 printf("WARNING: hostcache hash size is not a power of 2.\n");
195                 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
196         }
197         V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
198
199         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
200             &V_tcp_hostcache.bucket_limit);
201
202         cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
203         V_tcp_hostcache.cache_limit = cache_limit;
204         TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
205             &V_tcp_hostcache.cache_limit);
206         if (V_tcp_hostcache.cache_limit > cache_limit)
207                 V_tcp_hostcache.cache_limit = cache_limit;
208
209         /*
210          * Allocate the hash table.
211          */
212         V_tcp_hostcache.hashbase = (struct hc_head *)
213             malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
214                    M_HOSTCACHE, M_WAITOK | M_ZERO);
215
216         /*
217          * Initialize the hash buckets.
218          */
219         for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
220                 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
221                 V_tcp_hostcache.hashbase[i].hch_length = 0;
222                 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
223                           NULL, MTX_DEF);
224         }
225
226         /*
227          * Allocate the hostcache entries.
228          */
229         V_tcp_hostcache.zone =
230             uma_zcreate("hostcache", sizeof(struct hc_metrics),
231             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
232         uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
233
234         /*
235          * Set up periodic cache cleanup.
236          */
237         callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE);
238         callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
239             tcp_hc_purge, curvnet);
240 }
241
242 #ifdef VIMAGE
243 void
244 tcp_hc_destroy(void)
245 {
246         int i;
247
248         callout_drain(&V_tcp_hc_callout);
249
250         /* Purge all hc entries. */
251         tcp_hc_purge_internal(1);
252
253         /* Free the uma zone and the allocated hash table. */
254         uma_zdestroy(V_tcp_hostcache.zone);
255
256         for (i = 0; i < V_tcp_hostcache.hashsize; i++)
257                 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx);
258         free(V_tcp_hostcache.hashbase, M_HOSTCACHE);
259 }
260 #endif
261
262 /*
263  * Internal function: look up an entry in the hostcache or return NULL.
264  *
265  * If an entry has been returned, the caller becomes responsible for
266  * unlocking the bucket row after he is done reading/modifying the entry.
267  */
268 static struct hc_metrics *
269 tcp_hc_lookup(struct in_conninfo *inc)
270 {
271         int hash;
272         struct hc_head *hc_head;
273         struct hc_metrics *hc_entry;
274
275         KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer"));
276
277         /*
278          * Hash the foreign ip address.
279          */
280         if (inc->inc_flags & INC_ISIPV6)
281                 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
282         else
283                 hash = HOSTCACHE_HASH(&inc->inc_faddr);
284
285         hc_head = &V_tcp_hostcache.hashbase[hash];
286
287         /*
288          * Acquire lock for this bucket row; we release the lock if we don't
289          * find an entry, otherwise the caller has to unlock after he is
290          * done.
291          */
292         THC_LOCK(&hc_head->hch_mtx);
293
294         /*
295          * Iterate through entries in bucket row looking for a match.
296          */
297         TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) {
298                 if (inc->inc_flags & INC_ISIPV6) {
299                         /* XXX: check ip6_zoneid */
300                         if (memcmp(&inc->inc6_faddr, &hc_entry->ip6,
301                             sizeof(inc->inc6_faddr)) == 0)
302                                 return hc_entry;
303                 } else {
304                         if (memcmp(&inc->inc_faddr, &hc_entry->ip4,
305                             sizeof(inc->inc_faddr)) == 0)
306                                 return hc_entry;
307                 }
308         }
309
310         /*
311          * We were unsuccessful and didn't find anything.
312          */
313         THC_UNLOCK(&hc_head->hch_mtx);
314         return NULL;
315 }
316
317 /*
318  * Internal function: insert an entry into the hostcache or return NULL if
319  * unable to allocate a new one.
320  *
321  * If an entry has been returned, the caller becomes responsible for
322  * unlocking the bucket row after he is done reading/modifying the entry.
323  */
324 static struct hc_metrics *
325 tcp_hc_insert(struct in_conninfo *inc)
326 {
327         int hash;
328         struct hc_head *hc_head;
329         struct hc_metrics *hc_entry;
330
331         KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer"));
332
333         /*
334          * Hash the foreign ip address.
335          */
336         if (inc->inc_flags & INC_ISIPV6)
337                 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
338         else
339                 hash = HOSTCACHE_HASH(&inc->inc_faddr);
340
341         hc_head = &V_tcp_hostcache.hashbase[hash];
342
343         /*
344          * Acquire lock for this bucket row; we release the lock if we don't
345          * find an entry, otherwise the caller has to unlock after he is
346          * done.
347          */
348         THC_LOCK(&hc_head->hch_mtx);
349
350         /*
351          * If the bucket limit is reached, reuse the least-used element.
352          */
353         if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
354             V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
355                 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
356                 /*
357                  * At first we were dropping the last element, just to
358                  * reacquire it in the next two lines again, which isn't very
359                  * efficient.  Instead just reuse the least used element.
360                  * We may drop something that is still "in-use" but we can be
361                  * "lossy".
362                  * Just give up if this bucket row is empty and we don't have
363                  * anything to replace.
364                  */
365                 if (hc_entry == NULL) {
366                         THC_UNLOCK(&hc_head->hch_mtx);
367                         return NULL;
368                 }
369                 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
370                 V_tcp_hostcache.hashbase[hash].hch_length--;
371                 V_tcp_hostcache.cache_count--;
372                 TCPSTAT_INC(tcps_hc_bucketoverflow);
373 #if 0
374                 uma_zfree(V_tcp_hostcache.zone, hc_entry);
375 #endif
376         } else {
377                 /*
378                  * Allocate a new entry, or balk if not possible.
379                  */
380                 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
381                 if (hc_entry == NULL) {
382                         THC_UNLOCK(&hc_head->hch_mtx);
383                         return NULL;
384                 }
385         }
386
387         /*
388          * Initialize basic information of hostcache entry.
389          */
390         bzero(hc_entry, sizeof(*hc_entry));
391         if (inc->inc_flags & INC_ISIPV6) {
392                 hc_entry->ip6 = inc->inc6_faddr;
393                 hc_entry->ip6_zoneid = inc->inc6_zoneid;
394         } else
395                 hc_entry->ip4 = inc->inc_faddr;
396         hc_entry->rmx_head = hc_head;
397         hc_entry->rmx_expire = V_tcp_hostcache.expire;
398
399         /*
400          * Put it upfront.
401          */
402         TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
403         V_tcp_hostcache.hashbase[hash].hch_length++;
404         V_tcp_hostcache.cache_count++;
405         TCPSTAT_INC(tcps_hc_added);
406
407         return hc_entry;
408 }
409
410 /*
411  * External function: look up an entry in the hostcache and fill out the
412  * supplied TCP metrics structure.  Fills in NULL when no entry was found or
413  * a value is not set.
414  */
415 void
416 tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite)
417 {
418         struct hc_metrics *hc_entry;
419
420         /*
421          * Find the right bucket.
422          */
423         hc_entry = tcp_hc_lookup(inc);
424
425         /*
426          * If we don't have an existing object.
427          */
428         if (hc_entry == NULL) {
429                 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
430                 return;
431         }
432         hc_entry->rmx_hits++;
433         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
434
435         hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
436         hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
437         hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
438         hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
439         hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth;
440         hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
441         hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;
442         hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe;
443
444         /*
445          * Unlock bucket row.
446          */
447         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
448 }
449
450 /*
451  * External function: look up an entry in the hostcache and return the
452  * discovered path MTU.  Returns NULL if no entry is found or value is not
453  * set.
454  */
455 u_long
456 tcp_hc_getmtu(struct in_conninfo *inc)
457 {
458         struct hc_metrics *hc_entry;
459         u_long mtu;
460
461         hc_entry = tcp_hc_lookup(inc);
462         if (hc_entry == NULL) {
463                 return 0;
464         }
465         hc_entry->rmx_hits++;
466         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
467
468         mtu = hc_entry->rmx_mtu;
469         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
470         return mtu;
471 }
472
473 /*
474  * External function: update the MTU value of an entry in the hostcache.
475  * Creates a new entry if none was found.
476  */
477 void
478 tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu)
479 {
480         struct hc_metrics *hc_entry;
481
482         /*
483          * Find the right bucket.
484          */
485         hc_entry = tcp_hc_lookup(inc);
486
487         /*
488          * If we don't have an existing object, try to insert a new one.
489          */
490         if (hc_entry == NULL) {
491                 hc_entry = tcp_hc_insert(inc);
492                 if (hc_entry == NULL)
493                         return;
494         }
495         hc_entry->rmx_updates++;
496         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
497
498         hc_entry->rmx_mtu = mtu;
499
500         /*
501          * Put it upfront so we find it faster next time.
502          */
503         TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
504         TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
505
506         /*
507          * Unlock bucket row.
508          */
509         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
510 }
511
512 /*
513  * External function: update the TCP metrics of an entry in the hostcache.
514  * Creates a new entry if none was found.
515  */
516 void
517 tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml)
518 {
519         struct hc_metrics *hc_entry;
520
521         hc_entry = tcp_hc_lookup(inc);
522         if (hc_entry == NULL) {
523                 hc_entry = tcp_hc_insert(inc);
524                 if (hc_entry == NULL)
525                         return;
526         }
527         hc_entry->rmx_updates++;
528         hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
529
530         if (hcml->rmx_rtt != 0) {
531                 if (hc_entry->rmx_rtt == 0)
532                         hc_entry->rmx_rtt = hcml->rmx_rtt;
533                 else
534                         hc_entry->rmx_rtt =
535                             (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
536                 TCPSTAT_INC(tcps_cachedrtt);
537         }
538         if (hcml->rmx_rttvar != 0) {
539                 if (hc_entry->rmx_rttvar == 0)
540                         hc_entry->rmx_rttvar = hcml->rmx_rttvar;
541                 else
542                         hc_entry->rmx_rttvar =
543                             (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
544                 TCPSTAT_INC(tcps_cachedrttvar);
545         }
546         if (hcml->rmx_ssthresh != 0) {
547                 if (hc_entry->rmx_ssthresh == 0)
548                         hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
549                 else
550                         hc_entry->rmx_ssthresh =
551                             (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
552                 TCPSTAT_INC(tcps_cachedssthresh);
553         }
554         if (hcml->rmx_bandwidth != 0) {
555                 if (hc_entry->rmx_bandwidth == 0)
556                         hc_entry->rmx_bandwidth = hcml->rmx_bandwidth;
557                 else
558                         hc_entry->rmx_bandwidth =
559                             (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
560                 /* TCPSTAT_INC(tcps_cachedbandwidth); */
561         }
562         if (hcml->rmx_cwnd != 0) {
563                 if (hc_entry->rmx_cwnd == 0)
564                         hc_entry->rmx_cwnd = hcml->rmx_cwnd;
565                 else
566                         hc_entry->rmx_cwnd =
567                             (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
568                 /* TCPSTAT_INC(tcps_cachedcwnd); */
569         }
570         if (hcml->rmx_sendpipe != 0) {
571                 if (hc_entry->rmx_sendpipe == 0)
572                         hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
573                 else
574                         hc_entry->rmx_sendpipe =
575                             (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
576                 /* TCPSTAT_INC(tcps_cachedsendpipe); */
577         }
578         if (hcml->rmx_recvpipe != 0) {
579                 if (hc_entry->rmx_recvpipe == 0)
580                         hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
581                 else
582                         hc_entry->rmx_recvpipe =
583                             (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
584                 /* TCPSTAT_INC(tcps_cachedrecvpipe); */
585         }
586
587         TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
588         TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
589         THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
590 }
591
592 /*
593  * Sysctl function: prints the list and values of all hostcache entries in
594  * unsorted order.
595  */
596 static int
597 sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
598 {
599         int linesize = 128;
600         struct sbuf sb;
601         int i, error;
602         struct hc_metrics *hc_entry;
603 #ifdef INET6
604         char ip6buf[INET6_ADDRSTRLEN];
605 #endif
606
607         sbuf_new(&sb, NULL, linesize * (V_tcp_hostcache.cache_count + 1),
608             SBUF_FIXEDLEN);
609
610         sbuf_printf(&sb,
611                 "\nIP address        MTU  SSTRESH      RTT   RTTVAR BANDWIDTH "
612                 "    CWND SENDPIPE RECVPIPE HITS  UPD  EXP\n");
613
614 #define msec(u) (((u) + 500) / 1000)
615         for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
616                 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
617                 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
618                               rmx_q) {
619                         sbuf_printf(&sb,
620                             "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
621                             "%4lu %4lu %4i\n",
622                             hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
623 #ifdef INET6
624                                 ip6_sprintf(ip6buf, &hc_entry->ip6),
625 #else
626                                 "IPv6?",
627 #endif
628                             hc_entry->rmx_mtu,
629                             hc_entry->rmx_ssthresh,
630                             msec(hc_entry->rmx_rtt *
631                                 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))),
632                             msec(hc_entry->rmx_rttvar *
633                                 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))),
634                             hc_entry->rmx_bandwidth * 8,
635                             hc_entry->rmx_cwnd,
636                             hc_entry->rmx_sendpipe,
637                             hc_entry->rmx_recvpipe,
638                             hc_entry->rmx_hits,
639                             hc_entry->rmx_updates,
640                             hc_entry->rmx_expire);
641                 }
642                 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
643         }
644 #undef msec
645         sbuf_finish(&sb);
646         error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
647         sbuf_delete(&sb);
648         return(error);
649 }
650
651 /*
652  * Caller has to make sure the curvnet is set properly.
653  */
654 static void
655 tcp_hc_purge_internal(int all)
656 {
657         struct hc_metrics *hc_entry, *hc_next;
658         int i;
659
660         for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
661                 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
662                 TAILQ_FOREACH_SAFE(hc_entry,
663                     &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) {
664                         if (all || hc_entry->rmx_expire <= 0) {
665                                 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
666                                               hc_entry, rmx_q);
667                                 uma_zfree(V_tcp_hostcache.zone, hc_entry);
668                                 V_tcp_hostcache.hashbase[i].hch_length--;
669                                 V_tcp_hostcache.cache_count--;
670                         } else
671                                 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
672                 }
673                 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
674         }
675 }
676
677 /*
678  * Expire and purge (old|all) entries in the tcp_hostcache.  Runs
679  * periodically from the callout.
680  */
681 static void
682 tcp_hc_purge(void *arg)
683 {
684         CURVNET_SET((struct vnet *) arg);
685         int all = 0;
686
687         if (V_tcp_hostcache.purgeall) {
688                 all = 1;
689                 V_tcp_hostcache.purgeall = 0;
690         }
691
692         tcp_hc_purge_internal(all);
693
694         callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz,
695             tcp_hc_purge, arg);
696         CURVNET_RESTORE();
697 }