3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include "opt_param.h"
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
40 #include <sys/kernel.h>
41 #include <sys/sysctl.h>
42 #include <sys/domain.h>
43 #include <sys/protosw.h>
46 #include <vm/vm_kern.h>
47 #include <vm/vm_extern.h>
50 * Maximum number of PCPU containers. If you know what you're doing you could
51 * explicitly define MBALLOC_NCPU to be exactly the number of CPUs on your
52 * system during compilation, and thus prevent kernel structure bloat.
54 * SMP and non-SMP kernels clearly have a different number of possible CPUs,
55 * but because we cannot assume a dense array of CPUs, we always allocate
56 * and traverse PCPU containers up to NCPU amount and merely check for
60 #define NCPU MBALLOC_NCPU
66 * The mbuf allocator is heavily based on Alfred Perlstein's
67 * (alfred@FreeBSD.org) "memcache" allocator which is itself based
68 * on concepts from several per-CPU memory allocators. The difference
69 * between this allocator and memcache is that, among other things:
71 * (i) We don't free back to the map from the free() routine - we leave the
72 * option of implementing lazy freeing (from a kproc) in the future.
74 * (ii) We allocate from separate sub-maps of kmem_map, thus limiting the
75 * maximum number of allocatable objects of a given type. Further,
76 * we handle blocking on a cv in the case that the map is starved and
77 * we have to rely solely on cached (circulating) objects.
79 * The mbuf allocator keeps all objects that it allocates in mb_buckets.
80 * The buckets keep a page worth of objects (an object can be an mbuf or an
81 * mbuf cluster) and facilitate moving larger sets of contiguous objects
82 * from the per-CPU lists to the main list for the given object. The buckets
83 * also have an added advantage in that after several moves from a per-CPU
84 * list to the main list and back to the per-CPU list, contiguous objects
85 * are kept together, thus trying to put the TLB cache to good use.
87 * The buckets are kept on singly-linked lists called "containers." A container
88 * is protected by a mutex lock in order to ensure consistency. The mutex lock
89 * itself is allocated seperately and attached to the container at boot time,
90 * thus allowing for certain containers to share the same mutex lock. Per-CPU
91 * containers for mbufs and mbuf clusters all share the same per-CPU
92 * lock whereas the "general system" containers (i.e., the "main lists") for
93 * these objects share one global lock.
96 SLIST_ENTRY(mb_bucket) mb_blist;
102 struct mb_container {
103 SLIST_HEAD(mc_buckethd, mb_bucket) mc_bhead;
113 struct mb_container mb_cont;
114 struct cv mgl_mstarved;
117 struct mb_pcpu_list {
118 struct mb_container mb_cont;
122 * Boot-time configurable object counts that will determine the maximum
123 * number of permitted objects in the mbuf and mcluster cases. In the
124 * ext counter (nmbcnt) case, it's just an indicator serving to scale
125 * kmem_map size properly - in other words, we may be allowed to allocate
126 * more than nmbcnt counters, whereas we will never be allowed to allocate
127 * more than nmbufs mbufs or nmbclusters mclusters.
128 * As for nsfbufs, it is used to indicate how many sendfile(2) buffers will be
129 * allocatable by the sfbuf allocator (found in uipc_syscalls.c)
132 #define NMBCLUSTERS (1024 + maxusers * 64)
135 #define NMBUFS (nmbclusters * 2)
138 #define NSFBUFS (512 + maxusers * 16)
141 #define NMBCNTS (nmbclusters + nsfbufs)
149 * Perform sanity checks of tunables declared above.
152 tunable_mbinit(void *dummy)
156 * This has to be done before VM init.
158 nmbclusters = NMBCLUSTERS;
159 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
161 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
163 TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
165 TUNABLE_INT_FETCH("kern.ipc.nmbcnt", &nmbcnt);
167 if (nmbufs < nmbclusters * 2)
168 nmbufs = nmbclusters * 2;
169 if (nmbcnt < nmbclusters + nsfbufs)
170 nmbcnt = nmbclusters + nsfbufs;
172 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
175 * The freelist structures and mutex locks. The number statically declared
176 * here depends on the number of CPUs.
178 * We set up in such a way that all the objects (mbufs, clusters)
179 * share the same mutex lock. It has been established that we do not benefit
180 * from different locks for different objects, so we use the same lock,
181 * regardless of object type.
184 struct mb_gen_list *ml_genlist;
185 struct mb_pcpu_list *ml_cntlst[NCPU];
186 struct mb_bucket **ml_btable;
188 vm_offset_t ml_mapbase;
189 vm_offset_t ml_maptop;
194 static struct mb_lstmngr mb_list_mbuf, mb_list_clust;
195 static struct mtx mbuf_gen, mbuf_pcpu[NCPU];
198 * Local macros for internal allocator structure manipulations.
201 #define MB_GET_PCPU_LIST(mb_lst) (mb_lst)->ml_cntlst[PCPU_GET(cpuid)]
203 #define MB_GET_PCPU_LIST(mb_lst) (mb_lst)->ml_cntlst[0]
206 #define MB_GET_GEN_LIST(mb_lst) (mb_lst)->ml_genlist
208 #define MB_LOCK_CONT(mb_cnt) mtx_lock((mb_cnt)->mb_cont.mc_lock)
210 #define MB_UNLOCK_CONT(mb_cnt) mtx_unlock((mb_cnt)->mb_cont.mc_lock)
212 #define MB_GET_PCPU_LIST_NUM(mb_lst, num) \
213 (mb_lst)->ml_cntlst[(num)]
215 #define MB_BUCKET_INDX(mb_obj, mb_lst) \
216 (int)(((caddr_t)(mb_obj) - (caddr_t)(mb_lst)->ml_mapbase) / PAGE_SIZE)
218 #define MB_GET_OBJECT(mb_objp, mb_bckt, mb_lst) \
220 struct mc_buckethd *_mchd = &((mb_lst)->mb_cont.mc_bhead); \
222 (mb_bckt)->mb_numfree--; \
223 (mb_objp) = (mb_bckt)->mb_free[((mb_bckt)->mb_numfree)]; \
224 (*((mb_lst)->mb_cont.mc_objcount))--; \
225 if ((mb_bckt)->mb_numfree == 0) { \
226 SLIST_REMOVE_HEAD(_mchd, mb_blist); \
227 SLIST_NEXT((mb_bckt), mb_blist) = NULL; \
228 (mb_bckt)->mb_owner |= MB_BUCKET_FREE; \
232 #define MB_PUT_OBJECT(mb_objp, mb_bckt, mb_lst) \
233 (mb_bckt)->mb_free[((mb_bckt)->mb_numfree)] = (mb_objp); \
234 (mb_bckt)->mb_numfree++; \
235 (*((mb_lst)->mb_cont.mc_objcount))++;
237 #define MB_MBTYPES_INC(mb_cnt, mb_type, mb_num) \
238 if ((mb_type) != MT_NOTMBUF) \
239 (*((mb_cnt)->mb_cont.mc_types + (mb_type))) += (mb_num)
241 #define MB_MBTYPES_DEC(mb_cnt, mb_type, mb_num) \
242 if ((mb_type) != MT_NOTMBUF) \
243 (*((mb_cnt)->mb_cont.mc_types + (mb_type))) -= (mb_num)
246 * Ownership of buckets/containers is represented by integers. The PCPU
247 * lists range from 0 to NCPU-1. We need a free numerical id for the general
248 * list (we use NCPU). We also need a non-conflicting free bit to indicate
249 * that the bucket is free and removed from a container, while not losing
250 * the bucket's originating container id. We use the highest bit
251 * for the free marker.
253 #define MB_GENLIST_OWNER (NCPU)
254 #define MB_BUCKET_FREE (1 << (sizeof(int) * 8 - 1))
256 /* Statistics structures for allocator (per-CPU and general). */
257 static struct mbpstat mb_statpcpu[NCPU + 1];
258 struct mbstat mbstat;
260 /* Sleep time for wait code (in ticks). */
261 static int mbuf_wait = 64;
263 static u_int mbuf_limit = 512; /* Upper limit on # of mbufs per CPU. */
264 static u_int clust_limit = 128; /* Upper limit on # of clusters per CPU. */
267 * Objects exported by sysctl(8).
269 SYSCTL_DECL(_kern_ipc);
270 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbclusters, CTLFLAG_RD, &nmbclusters, 0,
271 "Maximum number of mbuf clusters available");
272 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
273 "Maximum number of mbufs available");
274 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0,
275 "Number used to scale kmem_map to ensure sufficient space for counters");
276 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RD, &nsfbufs, 0,
277 "Maximum number of sendfile(2) sf_bufs available");
278 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, &mbuf_wait, 0,
279 "Sleep time of mbuf subsystem wait allocations during exhaustion");
280 SYSCTL_UINT(_kern_ipc, OID_AUTO, mbuf_limit, CTLFLAG_RW, &mbuf_limit, 0,
281 "Upper limit of number of mbufs allowed on each PCPU list");
282 SYSCTL_UINT(_kern_ipc, OID_AUTO, clust_limit, CTLFLAG_RW, &clust_limit, 0,
283 "Upper limit of number of mbuf clusters allowed on each PCPU list");
284 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
285 "Mbuf general information and statistics");
286 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mb_statpcpu, CTLFLAG_RD, mb_statpcpu,
287 sizeof(mb_statpcpu), "S,", "Mbuf allocator per CPU statistics");
290 * Prototypes of local allocator routines.
292 static void *mb_alloc_wait(struct mb_lstmngr *, short);
293 static struct mb_bucket *mb_pop_cont(struct mb_lstmngr *, int,
294 struct mb_pcpu_list *);
295 static void mb_reclaim(void);
296 static void mbuf_init(void *);
299 * Initial allocation numbers. Each parameter represents the number of buckets
300 * of each object that will be placed initially in each PCPU container for
303 #define NMB_MBUF_INIT 4
304 #define NMB_CLUST_INIT 16
307 * Initialize the mbuf subsystem.
309 * We sub-divide the kmem_map into several submaps; this way, we don't have
310 * to worry about artificially limiting the number of mbuf or mbuf cluster
311 * allocations, due to fear of one type of allocation "stealing" address
312 * space initially reserved for another.
314 * Set up both the general containers and all the PCPU containers. Populate
315 * the PCPU containers with initial numbers.
317 MALLOC_DEFINE(M_MBUF, "mbufmgr", "mbuf subsystem management structures");
318 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL)
320 mbuf_init(void *dummy)
322 struct mb_pcpu_list *pcpu_cnt;
323 vm_size_t mb_map_size;
327 * Set up all the submaps, for each type of object that we deal
328 * with in this allocator.
330 mb_map_size = (vm_size_t)(nmbufs * MSIZE);
331 mb_map_size = rounddown(mb_map_size, PAGE_SIZE);
332 mb_list_mbuf.ml_btable = malloc((unsigned long)mb_map_size / PAGE_SIZE *
333 sizeof(struct mb_bucket *), M_MBUF, M_NOWAIT);
334 if (mb_list_mbuf.ml_btable == NULL)
336 mb_list_mbuf.ml_map = kmem_suballoc(kmem_map,&(mb_list_mbuf.ml_mapbase),
337 &(mb_list_mbuf.ml_maptop), mb_map_size);
338 mb_list_mbuf.ml_mapfull = 0;
339 mb_list_mbuf.ml_objsize = MSIZE;
340 mb_list_mbuf.ml_wmhigh = &mbuf_limit;
342 mb_map_size = (vm_size_t)(nmbclusters * MCLBYTES);
343 mb_map_size = rounddown(mb_map_size, PAGE_SIZE);
344 mb_list_clust.ml_btable = malloc((unsigned long)mb_map_size / PAGE_SIZE
345 * sizeof(struct mb_bucket *), M_MBUF, M_NOWAIT);
346 if (mb_list_clust.ml_btable == NULL)
348 mb_list_clust.ml_map = kmem_suballoc(kmem_map,
349 &(mb_list_clust.ml_mapbase), &(mb_list_clust.ml_maptop),
351 mb_list_clust.ml_mapfull = 0;
352 mb_list_clust.ml_objsize = MCLBYTES;
353 mb_list_clust.ml_wmhigh = &clust_limit;
355 /* XXX XXX XXX: mbuf_map->system_map = clust_map->system_map = 1. */
358 * Allocate required general (global) containers for each object type.
360 mb_list_mbuf.ml_genlist = malloc(sizeof(struct mb_gen_list), M_MBUF,
362 mb_list_clust.ml_genlist = malloc(sizeof(struct mb_gen_list), M_MBUF,
364 if ((mb_list_mbuf.ml_genlist == NULL) ||
365 (mb_list_clust.ml_genlist == NULL))
369 * Initialize condition variables and general container mutex locks.
371 mtx_init(&mbuf_gen, "mbuf subsystem general lists lock", 0);
372 cv_init(&(mb_list_mbuf.ml_genlist->mgl_mstarved), "mbuf pool starved");
373 cv_init(&(mb_list_clust.ml_genlist->mgl_mstarved),
374 "mcluster pool starved");
375 mb_list_mbuf.ml_genlist->mb_cont.mc_lock =
376 mb_list_clust.ml_genlist->mb_cont.mc_lock = &mbuf_gen;
379 * Set up the general containers for each object.
381 mb_list_mbuf.ml_genlist->mb_cont.mc_numowner =
382 mb_list_clust.ml_genlist->mb_cont.mc_numowner = MB_GENLIST_OWNER;
383 mb_list_mbuf.ml_genlist->mb_cont.mc_starved =
384 mb_list_clust.ml_genlist->mb_cont.mc_starved = 0;
385 mb_list_mbuf.ml_genlist->mb_cont.mc_objcount =
386 &(mb_statpcpu[MB_GENLIST_OWNER].mb_mbfree);
387 mb_list_clust.ml_genlist->mb_cont.mc_objcount =
388 &(mb_statpcpu[MB_GENLIST_OWNER].mb_clfree);
389 mb_list_mbuf.ml_genlist->mb_cont.mc_numpgs =
390 &(mb_statpcpu[MB_GENLIST_OWNER].mb_mbpgs);
391 mb_list_clust.ml_genlist->mb_cont.mc_numpgs =
392 &(mb_statpcpu[MB_GENLIST_OWNER].mb_clpgs);
393 mb_list_mbuf.ml_genlist->mb_cont.mc_types =
394 &(mb_statpcpu[MB_GENLIST_OWNER].mb_mbtypes[0]);
395 mb_list_clust.ml_genlist->mb_cont.mc_types = NULL;
396 SLIST_INIT(&(mb_list_mbuf.ml_genlist->mb_cont.mc_bhead));
397 SLIST_INIT(&(mb_list_clust.ml_genlist->mb_cont.mc_bhead));
400 * Initialize general mbuf statistics.
402 mbstat.m_msize = MSIZE;
403 mbstat.m_mclbytes = MCLBYTES;
404 mbstat.m_minclsize = MINCLSIZE;
405 mbstat.m_mlen = MLEN;
406 mbstat.m_mhlen = MHLEN;
407 mbstat.m_numtypes = MT_NTYPES;
410 * Allocate and initialize PCPU containers.
412 for (i = 0; i < NCPU; i++) {
416 mb_list_mbuf.ml_cntlst[i] = malloc(sizeof(struct mb_pcpu_list),
418 mb_list_clust.ml_cntlst[i] = malloc(sizeof(struct mb_pcpu_list),
420 if ((mb_list_mbuf.ml_cntlst[i] == NULL) ||
421 (mb_list_clust.ml_cntlst[i] == NULL))
424 mtx_init(&mbuf_pcpu[i], "mbuf PCPU list lock", 0);
425 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_lock =
426 mb_list_clust.ml_cntlst[i]->mb_cont.mc_lock = &mbuf_pcpu[i];
428 mb_statpcpu[i].mb_active = 1;
429 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_numowner =
430 mb_list_clust.ml_cntlst[i]->mb_cont.mc_numowner = i;
431 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_starved =
432 mb_list_clust.ml_cntlst[i]->mb_cont.mc_starved = 0;
433 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_objcount =
434 &(mb_statpcpu[i].mb_mbfree);
435 mb_list_clust.ml_cntlst[i]->mb_cont.mc_objcount =
436 &(mb_statpcpu[i].mb_clfree);
437 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_numpgs =
438 &(mb_statpcpu[i].mb_mbpgs);
439 mb_list_clust.ml_cntlst[i]->mb_cont.mc_numpgs =
440 &(mb_statpcpu[i].mb_clpgs);
441 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_types =
442 &(mb_statpcpu[i].mb_mbtypes[0]);
443 mb_list_clust.ml_cntlst[i]->mb_cont.mc_types = NULL;
445 SLIST_INIT(&(mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_bhead));
446 SLIST_INIT(&(mb_list_clust.ml_cntlst[i]->mb_cont.mc_bhead));
449 * Perform initial allocations.
451 pcpu_cnt = MB_GET_PCPU_LIST_NUM(&mb_list_mbuf, i);
452 MB_LOCK_CONT(pcpu_cnt);
453 for (j = 0; j < NMB_MBUF_INIT; j++) {
454 if (mb_pop_cont(&mb_list_mbuf, M_DONTWAIT, pcpu_cnt)
458 MB_UNLOCK_CONT(pcpu_cnt);
460 pcpu_cnt = MB_GET_PCPU_LIST_NUM(&mb_list_clust, i);
461 MB_LOCK_CONT(pcpu_cnt);
462 for (j = 0; j < NMB_CLUST_INIT; j++) {
463 if (mb_pop_cont(&mb_list_clust, M_DONTWAIT, pcpu_cnt)
467 MB_UNLOCK_CONT(pcpu_cnt);
472 panic("mbuf_init(): failed to initialize mbuf subsystem!");
476 * Populate a given mbuf PCPU container with a bucket full of fresh new
477 * buffers. Return a pointer to the new bucket (already in the container if
478 * successful), or return NULL on failure.
481 * PCPU container lock must be held when this is called.
482 * The lock is dropped here so that we can cleanly call the underlying VM
483 * code. If we fail, we return with no locks held. If we succeed (i.e., return
484 * non-NULL), we return with the PCPU lock held, ready for allocation from
485 * the returned bucket.
487 static struct mb_bucket *
488 mb_pop_cont(struct mb_lstmngr *mb_list, int how, struct mb_pcpu_list *cnt_lst)
490 struct mb_bucket *bucket;
494 MB_UNLOCK_CONT(cnt_lst);
496 * If our object's (finite) map is starved now (i.e., no more address
497 * space), bail out now.
499 if (mb_list->ml_mapfull)
502 bucket = malloc(sizeof(struct mb_bucket) +
503 PAGE_SIZE / mb_list->ml_objsize * sizeof(void *), M_MBUF,
504 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
508 p = (caddr_t)kmem_malloc(mb_list->ml_map, PAGE_SIZE,
509 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
511 free(bucket, M_MBUF);
512 if (how == M_TRYWAIT)
513 mb_list->ml_mapfull = 1;
517 bucket->mb_numfree = 0;
518 mb_list->ml_btable[MB_BUCKET_INDX(p, mb_list)] = bucket;
519 for (i = 0; i < (PAGE_SIZE / mb_list->ml_objsize); i++) {
520 bucket->mb_free[i] = p;
521 bucket->mb_numfree++;
522 p += mb_list->ml_objsize;
525 MB_LOCK_CONT(cnt_lst);
526 bucket->mb_owner = cnt_lst->mb_cont.mc_numowner;
527 SLIST_INSERT_HEAD(&(cnt_lst->mb_cont.mc_bhead), bucket, mb_blist);
528 (*(cnt_lst->mb_cont.mc_numpgs))++;
529 *(cnt_lst->mb_cont.mc_objcount) += bucket->mb_numfree;
535 * Allocate an mbuf-subsystem type object.
536 * The general case is very easy. Complications only arise if our PCPU
537 * container is empty. Things get worse if the PCPU container is empty,
538 * the general container is empty, and we've run out of address space
539 * in our map; then we try to block if we're willing to (M_TRYWAIT).
543 mb_alloc(struct mb_lstmngr *mb_list, int how, short type)
545 static int last_report;
546 struct mb_pcpu_list *cnt_lst;
547 struct mb_bucket *bucket;
551 cnt_lst = MB_GET_PCPU_LIST(mb_list);
552 MB_LOCK_CONT(cnt_lst);
554 if ((bucket = SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead))) != NULL) {
556 * This is the easy allocation case. We just grab an object
557 * from a bucket in the PCPU container. At worst, we
558 * have just emptied the bucket and so we remove it
559 * from the container.
561 MB_GET_OBJECT(m, bucket, cnt_lst);
562 MB_MBTYPES_INC(cnt_lst, type, 1);
563 MB_UNLOCK_CONT(cnt_lst);
565 struct mb_gen_list *gen_list;
568 * This is the less-common more difficult case. We must
569 * first verify if the general list has anything for us
570 * and if that also fails, we must allocate a page from
571 * the map and create a new bucket to place in our PCPU
572 * container (already locked). If the map is starved then
573 * we're really in for trouble, as we have to wait on
574 * the general container's condition variable.
576 gen_list = MB_GET_GEN_LIST(mb_list);
577 MB_LOCK_CONT(gen_list);
579 if ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead)))
582 * Give ownership of the bucket to our CPU's
583 * container, but only actually put the bucket
584 * in the container if it doesn't become free
585 * upon removing an mbuf from it.
587 SLIST_REMOVE_HEAD(&(gen_list->mb_cont.mc_bhead),
589 bucket->mb_owner = cnt_lst->mb_cont.mc_numowner;
590 (*(gen_list->mb_cont.mc_numpgs))--;
591 (*(cnt_lst->mb_cont.mc_numpgs))++;
592 *(gen_list->mb_cont.mc_objcount) -= bucket->mb_numfree;
593 bucket->mb_numfree--;
594 m = bucket->mb_free[(bucket->mb_numfree)];
595 if (bucket->mb_numfree == 0) {
596 SLIST_NEXT(bucket, mb_blist) = NULL;
597 bucket->mb_owner |= MB_BUCKET_FREE;
599 SLIST_INSERT_HEAD(&(cnt_lst->mb_cont.mc_bhead),
601 *(cnt_lst->mb_cont.mc_objcount) +=
604 MB_UNLOCK_CONT(gen_list);
605 MB_MBTYPES_INC(cnt_lst, type, 1);
606 MB_UNLOCK_CONT(cnt_lst);
609 * We'll have to allocate a new page.
611 MB_UNLOCK_CONT(gen_list);
612 bucket = mb_pop_cont(mb_list, how, cnt_lst);
613 if (bucket != NULL) {
614 MB_GET_OBJECT(m, bucket, cnt_lst);
615 MB_MBTYPES_INC(cnt_lst, type, 1);
616 MB_UNLOCK_CONT(cnt_lst);
618 if (how == M_TRYWAIT) {
620 * Absolute worst-case scenario.
621 * We block if we're willing to, but
622 * only after trying to steal from
625 m = mb_alloc_wait(mb_list, type);
627 /* XXX: No consistency. */
630 if (ticks < last_report ||
631 (ticks - last_report) >= hz) {
634 "mb_alloc for mbuf type %d failed.\n", type);
646 * This is the worst-case scenario called only if we're allocating with
647 * M_TRYWAIT. We first drain all the protocols, then try to find an mbuf
648 * by looking in every PCPU container. If we're still unsuccesful, we
649 * try the general container one last time and possibly block on our
653 mb_alloc_wait(struct mb_lstmngr *mb_list, short type)
655 struct mb_pcpu_list *cnt_lst;
656 struct mb_gen_list *gen_list;
657 struct mb_bucket *bucket;
662 * Try to reclaim mbuf-related objects (mbufs, clusters).
667 * Cycle all the PCPU containers. Increment starved counts if found
670 for (i = 0; i < NCPU; i++) {
673 cnt_lst = MB_GET_PCPU_LIST_NUM(mb_list, i);
674 MB_LOCK_CONT(cnt_lst);
677 * If container is non-empty, get a single object from it.
678 * If empty, increment starved count.
680 if ((bucket = SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead))) !=
682 MB_GET_OBJECT(m, bucket, cnt_lst);
683 MB_MBTYPES_INC(cnt_lst, type, 1);
684 MB_UNLOCK_CONT(cnt_lst);
685 mbstat.m_wait++; /* XXX: No consistency. */
688 cnt_lst->mb_cont.mc_starved++;
690 MB_UNLOCK_CONT(cnt_lst);
694 * We're still here, so that means it's time to get the general
695 * container lock, check it one more time (now that mb_reclaim()
696 * has been called) and if we still get nothing, block on the cv.
698 gen_list = MB_GET_GEN_LIST(mb_list);
699 MB_LOCK_CONT(gen_list);
700 if ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead))) != NULL) {
701 MB_GET_OBJECT(m, bucket, gen_list);
702 MB_MBTYPES_INC(gen_list, type, 1);
703 MB_UNLOCK_CONT(gen_list);
704 mbstat.m_wait++; /* XXX: No consistency. */
708 gen_list->mb_cont.mc_starved++;
709 cv_ret = cv_timedwait(&(gen_list->mgl_mstarved),
710 gen_list->mb_cont.mc_lock, mbuf_wait);
711 gen_list->mb_cont.mc_starved--;
714 ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead))) != NULL)) {
715 MB_GET_OBJECT(m, bucket, gen_list);
716 MB_MBTYPES_INC(gen_list, type, 1);
717 mbstat.m_wait++; /* XXX: No consistency. */
719 mbstat.m_drops++; /* XXX: No consistency. */
723 MB_UNLOCK_CONT(gen_list);
729 * Free an object to its rightful container.
730 * In the very general case, this operation is really very easy.
731 * Complications arise primarily if:
732 * (a) We've hit the high limit on number of free objects allowed in
733 * our PCPU container.
734 * (b) We're in a critical situation where our container has been
735 * marked 'starved' and we need to issue wakeups on the starved
736 * condition variable.
737 * (c) Minor (odd) cases: our bucket has migrated while we were
738 * waiting for the lock; our bucket is in the general container;
739 * our bucket is empty.
743 mb_free(struct mb_lstmngr *mb_list, void *m, short type)
745 struct mb_pcpu_list *cnt_lst;
746 struct mb_gen_list *gen_list;
747 struct mb_bucket *bucket;
750 bucket = mb_list->ml_btable[MB_BUCKET_INDX(m, mb_list)];
753 * Make sure that if after we lock the bucket's present container the
754 * bucket has migrated, that we drop the lock and get the new one.
757 owner = bucket->mb_owner & ~MB_BUCKET_FREE;
759 case MB_GENLIST_OWNER:
760 gen_list = MB_GET_GEN_LIST(mb_list);
761 MB_LOCK_CONT(gen_list);
762 if (owner != (bucket->mb_owner & ~MB_BUCKET_FREE)) {
763 MB_UNLOCK_CONT(gen_list);
768 * If we're intended for the general container, this is
769 * real easy: no migrating required. The only `bogon'
770 * is that we're now contending with all the threads
771 * dealing with the general list, but this is expected.
773 MB_PUT_OBJECT(m, bucket, gen_list);
774 MB_MBTYPES_DEC(gen_list, type, 1);
775 if (gen_list->mb_cont.mc_starved > 0)
776 cv_signal(&(gen_list->mgl_mstarved));
777 MB_UNLOCK_CONT(gen_list);
781 cnt_lst = MB_GET_PCPU_LIST_NUM(mb_list, owner);
782 MB_LOCK_CONT(cnt_lst);
783 if (owner != (bucket->mb_owner & ~MB_BUCKET_FREE)) {
784 MB_UNLOCK_CONT(cnt_lst);
788 MB_PUT_OBJECT(m, bucket, cnt_lst);
789 MB_MBTYPES_DEC(cnt_lst, type, 1);
791 if (cnt_lst->mb_cont.mc_starved > 0) {
793 * This is a tough case. It means that we've
794 * been flagged at least once to indicate that
795 * we're empty, and that the system is in a critical
796 * situation, so we ought to migrate at least one
797 * bucket over to the general container.
798 * There may or may not be a thread blocking on
799 * the starved condition variable, but chances
800 * are that one will eventually come up soon so
801 * it's better to migrate now than never.
803 gen_list = MB_GET_GEN_LIST(mb_list);
804 MB_LOCK_CONT(gen_list);
805 KASSERT((bucket->mb_owner & MB_BUCKET_FREE) != 0,
806 ("mb_free: corrupt bucket %p\n", bucket));
807 SLIST_INSERT_HEAD(&(gen_list->mb_cont.mc_bhead),
809 bucket->mb_owner = MB_GENLIST_OWNER;
810 (*(cnt_lst->mb_cont.mc_objcount))--;
811 (*(gen_list->mb_cont.mc_objcount))++;
812 (*(cnt_lst->mb_cont.mc_numpgs))--;
813 (*(gen_list->mb_cont.mc_numpgs))++;
816 * Determine whether or not to keep transferring
817 * buckets to the general list or whether we've
818 * transferred enough already.
819 * We realize that although we may flag another
820 * bucket to be migrated to the general container
821 * that in the meantime, the thread that was
822 * blocked on the cv is already woken up and
823 * long gone. But in that case, the worst
824 * consequence is that we will end up migrating
825 * one bucket too many, which is really not a big
826 * deal, especially if we're close to a critical
829 if (gen_list->mb_cont.mc_starved > 0) {
830 cnt_lst->mb_cont.mc_starved--;
831 cv_signal(&(gen_list->mgl_mstarved));
833 cnt_lst->mb_cont.mc_starved = 0;
835 MB_UNLOCK_CONT(gen_list);
836 MB_UNLOCK_CONT(cnt_lst);
840 if (*(cnt_lst->mb_cont.mc_objcount) > *(mb_list->ml_wmhigh)) {
842 * We've hit the high limit of allowed numbers of mbufs
843 * on this PCPU list. We must now migrate a bucket
844 * over to the general container.
846 gen_list = MB_GET_GEN_LIST(mb_list);
847 MB_LOCK_CONT(gen_list);
848 if ((bucket->mb_owner & MB_BUCKET_FREE) == 0) {
850 SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead));
851 SLIST_REMOVE_HEAD(&(cnt_lst->mb_cont.mc_bhead),
854 SLIST_INSERT_HEAD(&(gen_list->mb_cont.mc_bhead),
856 bucket->mb_owner = MB_GENLIST_OWNER;
857 *(cnt_lst->mb_cont.mc_objcount) -= bucket->mb_numfree;
858 *(gen_list->mb_cont.mc_objcount) += bucket->mb_numfree;
859 (*(cnt_lst->mb_cont.mc_numpgs))--;
860 (*(gen_list->mb_cont.mc_numpgs))++;
863 * While we're at it, transfer some of the mbtypes
864 * "count load" onto the general list's mbtypes
865 * array, seeing as how we're moving the bucket
866 * there now, meaning that the freeing of objects
867 * there will now decrement the _general list's_
868 * mbtypes counters, and no longer our PCPU list's
869 * mbtypes counters. We do this for the type presently
870 * being freed in an effort to keep the mbtypes
871 * counters approximately balanced across all lists.
873 MB_MBTYPES_DEC(cnt_lst, type, (PAGE_SIZE /
874 mb_list->ml_objsize) - bucket->mb_numfree);
875 MB_MBTYPES_INC(gen_list, type, (PAGE_SIZE /
876 mb_list->ml_objsize) - bucket->mb_numfree);
878 MB_UNLOCK_CONT(gen_list);
879 MB_UNLOCK_CONT(cnt_lst);
883 if (bucket->mb_owner & MB_BUCKET_FREE) {
884 SLIST_INSERT_HEAD(&(cnt_lst->mb_cont.mc_bhead),
886 bucket->mb_owner = cnt_lst->mb_cont.mc_numowner;
889 MB_UNLOCK_CONT(cnt_lst);
895 * Drain protocols in hopes to free up some resources.
898 * No locks should be held when this is called. The drain routines have to
899 * presently acquire some locks which raises the possibility of lock order
900 * violation if we're holding any mutex if that mutex is acquired in reverse
901 * order relative to one of the locks in the drain routines.
910 * XXX: Argh, we almost always trip here with witness turned on now-a-days
911 * XXX: because we often come in with Giant held. For now, there's no way
912 * XXX: to avoid this.
915 KASSERT(witness_list(curthread) == 0,
916 ("mb_reclaim() called with locks held"));
919 mbstat.m_drain++; /* XXX: No consistency. */
921 for (dp = domains; dp != NULL; dp = dp->dom_next)
922 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
923 if (pr->pr_drain != NULL)
928 * Local mbuf & cluster alloc macros and routines.
929 * Local macro and function names begin with an underscore ("_").
931 static void _mclfree(struct mbuf *);
933 #define _m_get(m, how, type) do { \
934 (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how), (type)); \
936 (m)->m_type = (type); \
937 (m)->m_next = NULL; \
938 (m)->m_nextpkt = NULL; \
939 (m)->m_data = (m)->m_dat; \
944 #define _m_gethdr(m, how, type) do { \
945 (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how), (type)); \
947 (m)->m_type = (type); \
948 (m)->m_next = NULL; \
949 (m)->m_nextpkt = NULL; \
950 (m)->m_data = (m)->m_pktdat; \
951 (m)->m_flags = M_PKTHDR; \
952 (m)->m_pkthdr.rcvif = NULL; \
953 (m)->m_pkthdr.csum_flags = 0; \
954 (m)->m_pkthdr.aux = NULL; \
958 /* XXX: Check for M_PKTHDR && m_pkthdr.aux is bogus... please fix (see KAME). */
959 #define _m_free(m, n) do { \
961 if ((m)->m_flags & M_EXT) \
963 if (((m)->m_flags & M_PKTHDR) != 0 && (m)->m_pkthdr.aux) { \
964 m_freem((m)->m_pkthdr.aux); \
965 (m)->m_pkthdr.aux = NULL; \
967 mb_free(&mb_list_mbuf, (m), (m)->m_type); \
970 #define _mext_init_ref(m) do { \
971 (m)->m_ext.ref_cnt = malloc(sizeof(u_int), M_MBUF, M_NOWAIT); \
972 if ((m)->m_ext.ref_cnt != NULL) { \
973 *((m)->m_ext.ref_cnt) = 0; \
978 #define _mext_dealloc_ref(m) \
979 free((m)->m_ext.ref_cnt, M_MBUF)
982 _mext_free(struct mbuf *mb)
985 if (mb->m_ext.ext_type == EXT_CLUSTER)
986 mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf, MT_NOTMBUF);
988 (*(mb->m_ext.ext_free))(mb->m_ext.ext_buf, mb->m_ext.ext_args);
989 _mext_dealloc_ref(mb);
993 * We only include this here to avoid making m_clget() excessively large
994 * due to too much inlined code.
997 _mclfree(struct mbuf *mb)
1000 mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf, MT_NOTMBUF);
1001 mb->m_ext.ext_buf = NULL;
1005 * Exported space allocation and de-allocation routines.
1008 m_get(int how, int type)
1012 _m_get(mb, how, type);
1017 m_gethdr(int how, int type)
1021 _m_gethdr(mb, how, type);
1026 m_get_clrd(int how, int type)
1030 _m_get(mb, how, type);
1032 bzero(mtod(mb, caddr_t), MLEN);
1037 m_gethdr_clrd(int how, int type)
1041 _m_gethdr(mb, how, type);
1043 bzero(mtod(mb, caddr_t), MHLEN);
1048 m_free(struct mbuf *mb)
1057 m_clget(struct mbuf *mb, int how)
1060 mb->m_ext.ext_buf = (caddr_t)mb_alloc(&mb_list_clust, how, MT_NOTMBUF);
1061 if (mb->m_ext.ext_buf != NULL) {
1063 if (mb->m_ext.ref_cnt == NULL)
1066 mb->m_data = mb->m_ext.ext_buf;
1067 mb->m_flags |= M_EXT;
1068 mb->m_ext.ext_free = NULL;
1069 mb->m_ext.ext_args = NULL;
1070 mb->m_ext.ext_size = MCLBYTES;
1071 mb->m_ext.ext_type = EXT_CLUSTER;
1077 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
1078 void (*freef)(caddr_t, void *), void *args, short flags, int type)
1082 if (mb->m_ext.ref_cnt != NULL) {
1083 mb->m_flags |= (M_EXT | flags);
1084 mb->m_ext.ext_buf = buf;
1085 mb->m_data = mb->m_ext.ext_buf;
1086 mb->m_ext.ext_size = size;
1087 mb->m_ext.ext_free = freef;
1088 mb->m_ext.ext_args = args;
1089 mb->m_ext.ext_type = type;
1094 * Change type for mbuf `mb'; this is a relatively expensive operation and
1095 * should be avoided.
1098 m_chtype(struct mbuf *mb, short new_type)
1100 struct mb_gen_list *gen_list;
1102 gen_list = MB_GET_GEN_LIST(&mb_list_mbuf);
1103 MB_LOCK_CONT(gen_list);
1104 MB_MBTYPES_DEC(gen_list, mb->m_type, 1);
1105 MB_MBTYPES_INC(gen_list, new_type, 1);
1106 MB_UNLOCK_CONT(gen_list);
1107 mb->m_type = new_type;