3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include "opt_param.h"
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/condvar.h>
40 #include <sys/kernel.h>
41 #include <sys/sysctl.h>
42 #include <sys/domain.h>
43 #include <sys/protosw.h>
45 #include <vm/vm_kern.h>
46 #include <vm/vm_extern.h>
49 * Maximum number of PCPU containers. If you know what you're doing you could
50 * explicitly define MBALLOC_NCPU to be exactly the number of CPUs on your
51 * system during compilation, and thus prevent kernel structure bloats.
54 #define NCPU MBALLOC_NCPU
60 * SMP and non-SMP kernels clearly have a different number of possible cpus.
63 #define NCPU_PRESENT mp_ncpus
65 #define NCPU_PRESENT 1
69 * The mbuf allocator is heavily based on Alfred Perlstein's
70 * (alfred@FreeBSD.org) "memcache" allocator which is itself based
71 * on concepts from several per-CPU memory allocators. The difference
72 * between this allocator and memcache is that, among other things:
74 * (i) We don't free back to the map from the free() routine - we leave the
75 * option of implementing lazy freeing (from a kproc) in the future.
77 * (ii) We allocate from separate sub-maps of kmem_map, thus limiting the
78 * maximum number of allocatable objects of a given type. Further,
79 * we handle blocking on a cv in the case that the map is starved and
80 * we have to rely solely on cached (circulating) objects.
82 * The mbuf allocator keeps all objects that it allocates in mb_buckets.
83 * The buckets keep a page worth of objects (an object can be an mbuf or an
84 * mbuf cluster) and facilitate moving larger sets of contiguous objects
85 * from the per-CPU lists to the main list for the given object. The buckets
86 * also have an added advantage in that after several moves from a per-CPU
87 * list to the main list and back to the per-CPU list, contiguous objects
88 * are kept together, thus trying to put the TLB cache to good use.
90 * The buckets are kept on singly-linked lists called "containers." A container
91 * is protected by a mutex lock in order to ensure consistency. The mutex lock
92 * itself is allocated seperately and attached to the container at boot time,
93 * thus allowing for certain containers to share the same mutex lock. Per-CPU
94 * containers for mbufs and mbuf clusters all share the same per-CPU
95 * lock whereas the "general system" containers (i.e. the "main lists") for
96 * these objects share one global lock.
100 SLIST_ENTRY(mb_bucket) mb_blist;
106 struct mb_container {
107 SLIST_HEAD(mc_buckethd, mb_bucket) mc_bhead;
116 struct mb_container mb_cont;
117 struct cv mgl_mstarved;
120 struct mb_pcpu_list {
121 struct mb_container mb_cont;
125 * Boot-time configurable object counts that will determine the maximum
126 * number of permitted objects in the mbuf and mcluster cases. In the
127 * ext counter (nmbcnt) case, it's just an indicator serving to scale
128 * kmem_map size properly - in other words, we may be allowed to allocate
129 * more than nmbcnt counters, whereas we will never be allowed to allocate
130 * more than nmbufs mbufs or nmbclusters mclusters.
131 * As for nsfbufs, it is used to indicate how many sendfile(2) buffers will be
132 * allocatable by the sfbuf allocator (found in uipc_syscalls.c)
135 #define NMBCLUSTERS (1024 + MAXUSERS * 64)
138 #define NMBUFS (NMBCLUSTERS * 2)
141 #define NSFBUFS (512 + MAXUSERS * 16)
144 #define NMBCNTS (NMBCLUSTERS + NSFBUFS)
147 int nmbclusters = NMBCLUSTERS;
148 int nmbcnt = NMBCNTS;
149 int nsfbufs = NSFBUFS;
150 TUNABLE_INT("kern.ipc.nmbufs", &nmbufs);
151 TUNABLE_INT("kern.ipc.nmbclusters", &nmbclusters);
152 TUNABLE_INT("kern.ipc.nmbcnt", &nmbcnt);
153 TUNABLE_INT("kern.ipc.nsfbufs", &nsfbufs);
156 * Perform sanity checks of tunables declared above.
159 tunable_mbinit(void *dummy)
162 * This has to be done before VM init.
164 if (nmbufs < nmbclusters * 2)
165 nmbufs = nmbclusters * 2;
166 if (nmbcnt < nmbclusters + nsfbufs)
167 nmbcnt = nmbclusters + nsfbufs;
171 SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
174 * The freelist structures and mutex locks. The number statically declared
175 * here depends on the number of CPUs.
177 * We setup in such a way that all the objects (mbufs, clusters)
178 * share the same mutex lock. It has been established that we do not benefit
179 * from different locks for different objects, so we use the same lock,
180 * regardless of object type.
183 struct mb_gen_list *ml_genlist;
184 struct mb_pcpu_list *ml_cntlst[NCPU];
185 struct mb_bucket **ml_btable;
187 vm_offset_t ml_mapbase;
188 vm_offset_t ml_maptop;
193 struct mb_lstmngr mb_list_mbuf, mb_list_clust;
194 struct mtx mbuf_gen, mbuf_pcpu[NCPU];
197 * Local macros for internal allocator structure manipulations.
199 #define MB_GET_PCPU_LIST(mb_lst) (mb_lst)->ml_cntlst[PCPU_GET(cpuid)]
201 #define MB_GET_PCPU_LIST_NUM(mb_lst, num) (mb_lst)->ml_cntlst[(num)]
203 #define MB_GET_GEN_LIST(mb_lst) (mb_lst)->ml_genlist
205 #define MB_LOCK_CONT(mb_cnt) mtx_lock((mb_cnt)->mb_cont.mc_lock)
207 #define MB_UNLOCK_CONT(mb_cnt) mtx_unlock((mb_cnt)->mb_cont.mc_lock)
209 #define MB_BUCKET_INDX(mb_obj, mb_lst) \
210 (int)(((caddr_t)(mb_obj) - (caddr_t)(mb_lst)->ml_mapbase) / PAGE_SIZE)
212 #define MB_GET_OBJECT(mb_objp, mb_bckt, mb_lst) \
214 struct mc_buckethd *_mchd = &((mb_lst)->mb_cont.mc_bhead); \
216 (mb_bckt)->mb_numfree--; \
217 (mb_objp) = (mb_bckt)->mb_free[((mb_bckt)->mb_numfree)]; \
218 (*((mb_lst)->mb_cont.mc_objcount))--; \
219 if ((mb_bckt)->mb_numfree == 0) { \
220 SLIST_REMOVE_HEAD(_mchd, mb_blist); \
221 SLIST_NEXT((mb_bckt), mb_blist) = NULL; \
222 (mb_bckt)->mb_owner |= MB_BUCKET_FREE; \
226 #define MB_PUT_OBJECT(mb_objp, mb_bckt, mb_lst) \
227 (mb_bckt)->mb_free[((mb_bckt)->mb_numfree)] = (mb_objp); \
228 (mb_bckt)->mb_numfree++; \
229 (*((mb_lst)->mb_cont.mc_objcount))++;
232 * Ownership of buckets/containers is represented by integers. The PCPU
233 * lists range from 0 to NCPU-1. We need a free numerical id for the general
234 * list (we use NCPU). We also need a non-conflicting free bit to indicate
235 * that the bucket is free and removed from a container, while not losing
236 * the bucket's originating container id. We use the highest bit
237 * for the free marker.
239 #define MB_GENLIST_OWNER (NCPU)
240 #define MB_BUCKET_FREE (1 << (sizeof(int) * 8 - 1))
243 * sysctl(8) exported objects
245 struct mbstat mbstat; /* General stats + infos. */
246 struct mbpstat mb_statpcpu[NCPU+1]; /* PCPU + Gen. container alloc stats */
247 int mbuf_wait = 64; /* Sleep time for wait code (ticks) */
248 u_int mbuf_limit = 512; /* Upper lim. on # of mbufs per CPU */
249 u_int clust_limit = 128; /* Upper lim. on # of clusts per CPU */
250 SYSCTL_DECL(_kern_ipc);
251 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbclusters, CTLFLAG_RD, &nmbclusters, 0,
252 "Maximum number of mbuf clusters available");
253 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
254 "Maximum number of mbufs available");
255 SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0,
256 "Number used to scale kmem_map to ensure sufficient space for counters");
257 SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RD, &nsfbufs, 0,
258 "Maximum number of sendfile(2) sf_bufs available");
259 SYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, &mbuf_wait, 0,
260 "Sleep time of mbuf subsystem wait allocations during exhaustion");
261 SYSCTL_UINT(_kern_ipc, OID_AUTO, mbuf_limit, CTLFLAG_RW, &mbuf_limit, 0,
262 "Upper limit of number of mbufs allowed on each PCPU list");
263 SYSCTL_UINT(_kern_ipc, OID_AUTO, clust_limit, CTLFLAG_RW, &clust_limit, 0,
264 "Upper limit of number of mbuf clusters allowed on each PCPU list");
265 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
266 "Mbuf general information and statistics");
267 SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mb_statpcpu, CTLFLAG_RD, mb_statpcpu,
268 sizeof(mb_statpcpu), "S,", "Mbuf allocator per CPU statistics");
271 * Prototypes of local allocator routines.
273 static __inline void *mb_alloc(struct mb_lstmngr *, int);
274 void *mb_alloc_wait(struct mb_lstmngr *);
275 static __inline void mb_free(struct mb_lstmngr *, void *);
276 static void mb_init(void *);
277 struct mb_bucket *mb_pop_cont(struct mb_lstmngr *, int,
278 struct mb_pcpu_list *);
279 void mb_reclaim(void);
282 * Initial allocation numbers. Each parameter represents the number of buckets
283 * of each object that will be placed initially in each PCPU container for
286 #define NMB_MBUF_INIT 4
287 #define NMB_CLUST_INIT 16
290 * Initialize the mbuf subsystem.
292 * We sub-divide the kmem_map into several submaps; this way, we don't have
293 * to worry about artificially limiting the number of mbuf or mbuf cluster
294 * allocations, due to fear of one type of allocation "stealing" address
295 * space initially reserved for another.
297 * Setup both the general containers and all the PCPU containers. Populate
298 * the PCPU containers with initial numbers.
300 MALLOC_DEFINE(M_MBUF, "mbufmgr", "mbuf subsystem management structures");
301 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mb_init, NULL)
305 struct mb_pcpu_list *pcpu_cnt;
306 vm_size_t mb_map_size;
310 * Setup all the submaps, for each type of object that we deal
311 * with in this allocator.
313 mb_map_size = (vm_size_t)(nmbufs * MSIZE);
314 mb_map_size = rounddown(mb_map_size, PAGE_SIZE);
315 mb_list_mbuf.ml_btable = malloc((unsigned long)mb_map_size / PAGE_SIZE *
316 sizeof(struct mb_bucket *), M_MBUF, M_NOWAIT);
317 if (mb_list_mbuf.ml_btable == NULL)
319 mb_list_mbuf.ml_map = kmem_suballoc(kmem_map,&(mb_list_mbuf.ml_mapbase),
320 &(mb_list_mbuf.ml_maptop), mb_map_size);
321 mb_list_mbuf.ml_mapfull = 0;
322 mb_list_mbuf.ml_objsize = MSIZE;
323 mb_list_mbuf.ml_wmhigh = &mbuf_limit;
325 mb_map_size = (vm_size_t)(nmbclusters * MCLBYTES);
326 mb_map_size = rounddown(mb_map_size, PAGE_SIZE);
327 mb_list_clust.ml_btable = malloc((unsigned long)mb_map_size / PAGE_SIZE
328 * sizeof(struct mb_bucket *), M_MBUF, M_NOWAIT);
329 if (mb_list_clust.ml_btable == NULL)
331 mb_list_clust.ml_map = kmem_suballoc(kmem_map,
332 &(mb_list_clust.ml_mapbase), &(mb_list_clust.ml_maptop),
334 mb_list_clust.ml_mapfull = 0;
335 mb_list_clust.ml_objsize = MCLBYTES;
336 mb_list_clust.ml_wmhigh = &clust_limit;
338 /* XXX XXX XXX: mbuf_map->system_map = clust_map->system_map = 1 */
341 * Allocate required general (global) containers for each object type.
343 mb_list_mbuf.ml_genlist = malloc(sizeof(struct mb_gen_list), M_MBUF,
345 mb_list_clust.ml_genlist = malloc(sizeof(struct mb_gen_list), M_MBUF,
347 if ((mb_list_mbuf.ml_genlist == NULL) ||
348 (mb_list_clust.ml_genlist == NULL))
352 * Initialize condition variables and general container mutex locks.
354 mtx_init(&mbuf_gen, "mbuf subsystem general lists lock", 0);
355 cv_init(&(mb_list_mbuf.ml_genlist->mgl_mstarved), "mbuf pool starved");
356 cv_init(&(mb_list_clust.ml_genlist->mgl_mstarved),
357 "mcluster pool starved");
358 mb_list_mbuf.ml_genlist->mb_cont.mc_lock =
359 mb_list_clust.ml_genlist->mb_cont.mc_lock = &mbuf_gen;
362 * Setup the general containers for each object.
364 mb_list_mbuf.ml_genlist->mb_cont.mc_numowner =
365 mb_list_clust.ml_genlist->mb_cont.mc_numowner = MB_GENLIST_OWNER;
366 mb_list_mbuf.ml_genlist->mb_cont.mc_starved =
367 mb_list_clust.ml_genlist->mb_cont.mc_starved = 0;
368 mb_list_mbuf.ml_genlist->mb_cont.mc_objcount =
369 &(mb_statpcpu[MB_GENLIST_OWNER].mb_mbfree);
370 mb_list_clust.ml_genlist->mb_cont.mc_objcount =
371 &(mb_statpcpu[MB_GENLIST_OWNER].mb_clfree);
372 mb_list_mbuf.ml_genlist->mb_cont.mc_numpgs =
373 &(mb_statpcpu[MB_GENLIST_OWNER].mb_mbpgs);
374 mb_list_clust.ml_genlist->mb_cont.mc_numpgs =
375 &(mb_statpcpu[MB_GENLIST_OWNER].mb_clpgs);
376 SLIST_INIT(&(mb_list_mbuf.ml_genlist->mb_cont.mc_bhead));
377 SLIST_INIT(&(mb_list_clust.ml_genlist->mb_cont.mc_bhead));
380 * Initialize general mbuf statistics
382 mbstat.m_msize = MSIZE;
383 mbstat.m_mclbytes = MCLBYTES;
384 mbstat.m_minclsize = MINCLSIZE;
385 mbstat.m_mlen = MLEN;
386 mbstat.m_mhlen = MHLEN;
389 * Allocate and initialize PCPU containers.
391 for (i = 0; i < NCPU_PRESENT; i++) {
392 mb_list_mbuf.ml_cntlst[i] = malloc(sizeof(struct mb_pcpu_list),
394 mb_list_clust.ml_cntlst[i] = malloc(sizeof(struct mb_pcpu_list),
396 if ((mb_list_mbuf.ml_cntlst[i] == NULL) ||
397 (mb_list_clust.ml_cntlst[i] == NULL))
400 mtx_init(&mbuf_pcpu[i], "mbuf PCPU list lock", 0);
401 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_lock =
402 mb_list_clust.ml_cntlst[i]->mb_cont.mc_lock = &mbuf_pcpu[i];
404 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_numowner =
405 mb_list_clust.ml_cntlst[i]->mb_cont.mc_numowner = i;
406 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_starved =
407 mb_list_clust.ml_cntlst[i]->mb_cont.mc_starved = 0;
408 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_objcount =
409 &(mb_statpcpu[i].mb_mbfree);
410 mb_list_clust.ml_cntlst[i]->mb_cont.mc_objcount =
411 &(mb_statpcpu[i].mb_clfree);
412 mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_numpgs =
413 &(mb_statpcpu[i].mb_mbpgs);
414 mb_list_clust.ml_cntlst[i]->mb_cont.mc_numpgs =
415 &(mb_statpcpu[i].mb_clpgs);
417 SLIST_INIT(&(mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_bhead));
418 SLIST_INIT(&(mb_list_clust.ml_cntlst[i]->mb_cont.mc_bhead));
421 * Perform initial allocations.
423 pcpu_cnt = MB_GET_PCPU_LIST_NUM(&mb_list_mbuf, i);
424 MB_LOCK_CONT(pcpu_cnt);
425 for (j = 0; j < NMB_MBUF_INIT; j++) {
426 if (mb_pop_cont(&mb_list_mbuf, M_DONTWAIT, pcpu_cnt)
430 MB_UNLOCK_CONT(pcpu_cnt);
432 pcpu_cnt = MB_GET_PCPU_LIST_NUM(&mb_list_clust, i);
433 MB_LOCK_CONT(pcpu_cnt);
434 for (j = 0; j < NMB_CLUST_INIT; j++) {
435 if (mb_pop_cont(&mb_list_clust, M_DONTWAIT, pcpu_cnt)
439 MB_UNLOCK_CONT(pcpu_cnt);
444 panic("mb_init(): failed to initialize mbuf subsystem!");
448 * Populate a given mbuf PCPU container with a bucket full of fresh new
449 * buffers. Return a pointer to the new bucket (already in the container if
450 * successful), or return NULL on failure.
453 * PCPU container lock must be held when this is called.
454 * The lock is dropped here so that we can cleanly call the underlying VM
455 * code. If we fail, we return with no locks held. If we succeed (i.e. return
456 * non-NULL), we return with the PCPU lock held, ready for allocation from
457 * the returned bucket.
460 mb_pop_cont(struct mb_lstmngr *mb_list, int how, struct mb_pcpu_list *cnt_lst)
462 struct mb_bucket *bucket;
466 MB_UNLOCK_CONT(cnt_lst);
468 * If our object's (finite) map is starved now (i.e. no more address
469 * space), bail out now.
471 if (mb_list->ml_mapfull)
474 bucket = malloc(sizeof(struct mb_bucket) +
475 PAGE_SIZE / mb_list->ml_objsize * sizeof(void *), M_MBUF,
476 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
480 p = (caddr_t)kmem_malloc(mb_list->ml_map, PAGE_SIZE,
481 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
483 free(bucket, M_MBUF);
487 bucket->mb_numfree = 0;
488 mb_list->ml_btable[MB_BUCKET_INDX(p, mb_list)] = bucket;
489 for (i = 0; i < (PAGE_SIZE / mb_list->ml_objsize); i++) {
490 bucket->mb_free[i] = p;
491 bucket->mb_numfree++;
492 p += mb_list->ml_objsize;
495 MB_LOCK_CONT(cnt_lst);
496 bucket->mb_owner = cnt_lst->mb_cont.mc_numowner;
497 SLIST_INSERT_HEAD(&(cnt_lst->mb_cont.mc_bhead), bucket, mb_blist);
498 (*(cnt_lst->mb_cont.mc_numpgs))++;
499 *(cnt_lst->mb_cont.mc_objcount) += bucket->mb_numfree;
505 * Allocate an mbuf-subsystem type object.
506 * The general case is very easy. Complications only arise if our PCPU
507 * container is empty. Things get worse if the PCPU container is empty,
508 * the general container is empty, and we've run out of address space
509 * in our map; then we try to block if we're willing to (M_TRYWAIT).
513 mb_alloc(struct mb_lstmngr *mb_list, int how)
515 struct mb_pcpu_list *cnt_lst;
516 struct mb_bucket *bucket;
520 cnt_lst = MB_GET_PCPU_LIST(mb_list);
521 MB_LOCK_CONT(cnt_lst);
523 if ((bucket = SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead))) != NULL) {
525 * This is the easy allocation case. We just grab an object
526 * from a bucket in the PCPU container. At worst, we
527 * have just emptied the bucket and so we remove it
528 * from the container.
530 MB_GET_OBJECT(m, bucket, cnt_lst);
531 MB_UNLOCK_CONT(cnt_lst);
533 struct mb_gen_list *gen_list;
536 * This is the less-common more difficult case. We must
537 * first verify if the general list has anything for us
538 * and if that also fails, we must allocate a page from
539 * the map and create a new bucket to place in our PCPU
540 * container (already locked). If the map is starved then
541 * we're really in for trouble, as we have to wait on
542 * the general container's condition variable.
544 gen_list = MB_GET_GEN_LIST(mb_list);
545 MB_LOCK_CONT(gen_list);
547 if ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead)))
550 * Give ownership of the bucket to our CPU's
551 * container, but only actually put the bucket
552 * in the container if it doesn't become free
553 * upon removing an mbuf from it.
555 SLIST_REMOVE_HEAD(&(gen_list->mb_cont.mc_bhead),
557 bucket->mb_owner = cnt_lst->mb_cont.mc_numowner;
558 (*(gen_list->mb_cont.mc_numpgs))--;
559 (*(cnt_lst->mb_cont.mc_numpgs))++;
560 *(gen_list->mb_cont.mc_objcount) -= bucket->mb_numfree;
561 bucket->mb_numfree--;
562 m = bucket->mb_free[(bucket->mb_numfree)];
563 if (bucket->mb_numfree == 0) {
564 SLIST_NEXT(bucket, mb_blist) = NULL;
565 bucket->mb_owner |= MB_BUCKET_FREE;
567 SLIST_INSERT_HEAD(&(cnt_lst->mb_cont.mc_bhead),
569 *(cnt_lst->mb_cont.mc_objcount) +=
572 MB_UNLOCK_CONT(gen_list);
573 MB_UNLOCK_CONT(cnt_lst);
576 * We'll have to allocate a new page.
578 MB_UNLOCK_CONT(gen_list);
579 bucket = mb_pop_cont(mb_list, how, cnt_lst);
580 if (bucket != NULL) {
581 bucket->mb_numfree--;
582 m = bucket->mb_free[(bucket->mb_numfree)];
583 (*(cnt_lst->mb_cont.mc_objcount))--;
584 MB_UNLOCK_CONT(cnt_lst);
586 if (how == M_TRYWAIT) {
588 * Absolute worst-case scenario. We block if
589 * we're willing to, but only after trying to
590 * steal from other lists.
592 mb_list->ml_mapfull = 1;
593 m = mb_alloc_wait(mb_list);
595 /* XXX: No consistency. */
605 * This is the worst-case scenario called only if we're allocating with
606 * M_TRYWAIT. We first drain all the protocols, then try to find an mbuf
607 * by looking in every PCPU container. If we're still unsuccesful, we
608 * try the general container one last time and possibly block on our
612 mb_alloc_wait(struct mb_lstmngr *mb_list)
614 struct mb_pcpu_list *cnt_lst;
615 struct mb_gen_list *gen_list;
616 struct mb_bucket *bucket;
621 * Try to reclaim mbuf-related objects (mbufs, clusters).
626 * Cycle all the PCPU containers. Increment starved counts if found
629 for (i = 0; i < NCPU_PRESENT; i++) {
630 cnt_lst = MB_GET_PCPU_LIST_NUM(mb_list, i);
631 MB_LOCK_CONT(cnt_lst);
634 * If container is non-empty, get a single object from it.
635 * If empty, increment starved count.
637 if ((bucket = SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead))) !=
639 MB_GET_OBJECT(m, bucket, cnt_lst);
640 MB_UNLOCK_CONT(cnt_lst);
641 mbstat.m_wait++; /* XXX: No consistency. */
644 cnt_lst->mb_cont.mc_starved++;
646 MB_UNLOCK_CONT(cnt_lst);
650 * We're still here, so that means it's time to get the general
651 * container lock, check it one more time (now that mb_reclaim()
652 * has been called) and if we still get nothing, block on the cv.
654 gen_list = MB_GET_GEN_LIST(mb_list);
655 MB_LOCK_CONT(gen_list);
656 if ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead))) != NULL) {
657 MB_GET_OBJECT(m, bucket, gen_list);
658 MB_UNLOCK_CONT(gen_list);
659 mbstat.m_wait++; /* XXX: No consistency. */
663 gen_list->mb_cont.mc_starved++;
664 cv_ret = cv_timedwait(&(gen_list->mgl_mstarved),
665 gen_list->mb_cont.mc_lock, mbuf_wait);
666 gen_list->mb_cont.mc_starved--;
669 ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead))) != NULL)) {
670 MB_GET_OBJECT(m, bucket, gen_list);
671 mbstat.m_wait++; /* XXX: No consistency. */
673 mbstat.m_drops++; /* XXX: No consistency. */
677 MB_UNLOCK_CONT(gen_list);
683 * Free an object to its rightful container.
684 * In the very general case, this operation is really very easy.
685 * Complications arise primarily if:
686 * (a) We've hit the high limit on number of free objects allowed in
687 * our PCPU container.
688 * (b) We're in a critical situation where our container has been
689 * marked 'starved' and we need to issue wakeups on the starved
690 * condition variable.
691 * (c) Minor (odd) cases: our bucket has migrated while we were
692 * waiting for the lock; our bucket is in the general container;
693 * our bucket is empty.
697 mb_free(struct mb_lstmngr *mb_list, void *m)
699 struct mb_pcpu_list *cnt_lst;
700 struct mb_gen_list *gen_list;
701 struct mb_bucket *bucket;
704 bucket = mb_list->ml_btable[MB_BUCKET_INDX(m, mb_list)];
707 * Make sure that if after we lock the bucket's present container the
708 * bucket has migrated, that we drop the lock and get the new one.
711 owner = bucket->mb_owner & ~MB_BUCKET_FREE;
713 case MB_GENLIST_OWNER:
714 gen_list = MB_GET_GEN_LIST(mb_list);
715 MB_LOCK_CONT(gen_list);
716 if (owner != (bucket->mb_owner & ~MB_BUCKET_FREE)) {
717 MB_UNLOCK_CONT(gen_list);
722 * If we're intended for the general container, this is
723 * real easy: no migrating required. The only `bogon'
724 * is that we're now contending with all the threads
725 * dealing with the general list, but this is expected.
727 MB_PUT_OBJECT(m, bucket, gen_list);
728 if (gen_list->mb_cont.mc_starved > 0)
729 cv_signal(&(gen_list->mgl_mstarved));
730 MB_UNLOCK_CONT(gen_list);
734 cnt_lst = MB_GET_PCPU_LIST_NUM(mb_list, owner);
735 MB_LOCK_CONT(cnt_lst);
736 if (owner != (bucket->mb_owner & ~MB_BUCKET_FREE)) {
737 MB_UNLOCK_CONT(cnt_lst);
741 MB_PUT_OBJECT(m, bucket, cnt_lst);
743 if (cnt_lst->mb_cont.mc_starved > 0) {
745 * This is a tough case. It means that we've
746 * been flagged at least once to indicate that
747 * we're empty, and that the system is in a critical
748 * situation, so we ought to migrate at least one
749 * bucket over to the general container.
750 * There may or may not be a thread blocking on
751 * the starved condition variable, but chances
752 * are that one will eventually come up soon so
753 * it's better to migrate now than never.
755 gen_list = MB_GET_GEN_LIST(mb_list);
756 MB_LOCK_CONT(gen_list);
757 KASSERT((bucket->mb_owner & MB_BUCKET_FREE) != 0,
758 ("mb_free: corrupt bucket %p\n", bucket));
759 SLIST_INSERT_HEAD(&(gen_list->mb_cont.mc_bhead),
761 bucket->mb_owner = MB_GENLIST_OWNER;
762 (*(cnt_lst->mb_cont.mc_objcount))--;
763 (*(gen_list->mb_cont.mc_objcount))++;
764 (*(cnt_lst->mb_cont.mc_numpgs))--;
765 (*(gen_list->mb_cont.mc_numpgs))++;
768 * Determine whether or not to keep transferring
769 * buckets to the general list or whether we've
770 * transferred enough already.
771 * We realize that although we may flag another
772 * bucket to be migrated to the general container
773 * that in the meantime, the thread that was
774 * blocked on the cv is already woken up and
775 * long gone. But in that case, the worst
776 * consequence is that we will end up migrating
777 * one bucket too many, which is really not a big
778 * deal, especially if we're close to a critical
781 if (gen_list->mb_cont.mc_starved > 0) {
782 cnt_lst->mb_cont.mc_starved--;
783 cv_signal(&(gen_list->mgl_mstarved));
785 cnt_lst->mb_cont.mc_starved = 0;
787 MB_UNLOCK_CONT(gen_list);
788 MB_UNLOCK_CONT(cnt_lst);
792 if (*(cnt_lst->mb_cont.mc_objcount) > *(mb_list->ml_wmhigh)) {
794 * We've hit the high limit of allowed numbers of mbufs
795 * on this PCPU list. We must now migrate a bucket
796 * over to the general container.
798 gen_list = MB_GET_GEN_LIST(mb_list);
799 MB_LOCK_CONT(gen_list);
800 if ((bucket->mb_owner & MB_BUCKET_FREE) == 0) {
802 SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead));
803 SLIST_REMOVE_HEAD(&(cnt_lst->mb_cont.mc_bhead),
806 SLIST_INSERT_HEAD(&(gen_list->mb_cont.mc_bhead),
808 bucket->mb_owner = MB_GENLIST_OWNER;
809 *(cnt_lst->mb_cont.mc_objcount) -= bucket->mb_numfree;
810 *(gen_list->mb_cont.mc_objcount) += bucket->mb_numfree;
811 (*(cnt_lst->mb_cont.mc_numpgs))--;
812 (*(gen_list->mb_cont.mc_numpgs))++;
814 MB_UNLOCK_CONT(gen_list);
815 MB_UNLOCK_CONT(cnt_lst);
819 if (bucket->mb_owner & MB_BUCKET_FREE) {
820 SLIST_INSERT_HEAD(&(cnt_lst->mb_cont.mc_bhead),
822 bucket->mb_owner = cnt_lst->mb_cont.mc_numowner;
825 MB_UNLOCK_CONT(cnt_lst);
833 * Drain protocols in hopes to free up some resources.
836 * No locks should be held when this is called. The drain routines have to
837 * presently acquire some locks which raises the possibility of lock order
838 * violation if we're holding any mutex if that mutex is acquired in reverse
839 * order relative to one of the locks in the drain routines.
848 * XXX: Argh, we almost always trip here with witness turned on now-a-days
849 * XXX: because we often come in with Giant held. For now, there's no way
850 * XXX: to avoid this.
853 KASSERT(witness_list(curproc) == 0,
854 ("mb_reclaim() called with locks held"));
857 mbstat.m_drain++; /* XXX: No consistency. */
859 for (dp = domains; dp; dp = dp->dom_next)
860 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
867 * Local mbuf & cluster alloc macros and routines.
868 * Local macro and function names begin with an underscore ("_").
870 void _mext_free(struct mbuf *);
871 void _mclfree(struct mbuf *);
873 #define _m_get(m, how, type) do { \
874 (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how)); \
876 (m)->m_type = (type); \
877 (m)->m_next = NULL; \
878 (m)->m_nextpkt = NULL; \
879 (m)->m_data = (m)->m_dat; \
884 #define _m_gethdr(m, how, type) do { \
885 (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how)); \
887 (m)->m_type = (type); \
888 (m)->m_next = NULL; \
889 (m)->m_nextpkt = NULL; \
890 (m)->m_data = (m)->m_pktdat; \
891 (m)->m_flags = M_PKTHDR; \
892 (m)->m_pkthdr.rcvif = NULL; \
893 (m)->m_pkthdr.csum_flags = 0; \
894 (m)->m_pkthdr.aux = NULL; \
898 /* XXX: Check for M_PKTHDR && m_pkthdr.aux is bogus... please fix (see KAME) */
899 #define _m_free(m, n) do { \
901 if ((m)->m_flags & M_EXT) \
903 if (((m)->m_flags & M_PKTHDR) != 0 && (m)->m_pkthdr.aux) { \
904 m_freem((m)->m_pkthdr.aux); \
905 (m)->m_pkthdr.aux = NULL; \
907 mb_free(&mb_list_mbuf, (m)); \
910 #define _mext_init_ref(m) do { \
911 (m)->m_ext.ref_cnt = malloc(sizeof(u_int), M_MBUF, M_NOWAIT); \
912 if ((m)->m_ext.ref_cnt != NULL) { \
913 *((m)->m_ext.ref_cnt) = 0; \
918 #define _mext_dealloc_ref(m) \
919 free((m)->m_ext.ref_cnt, M_MBUF)
922 _mext_free(struct mbuf *mb)
925 if (mb->m_ext.ext_type == EXT_CLUSTER)
926 mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf);
928 (*(mb->m_ext.ext_free))(mb->m_ext.ext_buf, mb->m_ext.ext_args);
930 _mext_dealloc_ref(mb);
934 /* We only include this here to avoid making m_clget() excessively large
935 * due to too much inlined code. */
937 _mclfree(struct mbuf *mb)
940 mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf);
941 mb->m_ext.ext_buf = NULL;
946 * Exported space allocation and de-allocation routines.
949 m_get(int how, int type)
953 _m_get(mb, how, type);
958 m_gethdr(int how, int type)
962 _m_gethdr(mb, how, type);
967 m_get_clrd(int how, int type)
971 _m_get(mb, how, type);
974 bzero(mtod(mb, caddr_t), MLEN);
980 m_gethdr_clrd(int how, int type)
984 _m_gethdr(mb, how, type);
987 bzero(mtod(mb, caddr_t), MHLEN);
993 m_free(struct mbuf *mb)
1002 m_clget(struct mbuf *mb, int how)
1005 mb->m_ext.ext_buf = (caddr_t)mb_alloc(&mb_list_clust, how);
1006 if (mb->m_ext.ext_buf != NULL) {
1008 if (mb->m_ext.ref_cnt == NULL)
1011 mb->m_data = mb->m_ext.ext_buf;
1012 mb->m_flags |= M_EXT;
1013 mb->m_ext.ext_free = NULL;
1014 mb->m_ext.ext_args = NULL;
1015 mb->m_ext.ext_size = MCLBYTES;
1016 mb->m_ext.ext_type = EXT_CLUSTER;
1023 m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
1024 void (*freef)(caddr_t, void *), void *args, short flags, int type)
1028 if (mb->m_ext.ref_cnt != NULL) {
1029 mb->m_flags |= (M_EXT | flags);
1030 mb->m_ext.ext_buf = buf;
1031 mb->m_data = mb->m_ext.ext_buf;
1032 mb->m_ext.ext_size = size;
1033 mb->m_ext.ext_free = freef;
1034 mb->m_ext.ext_args = args;
1035 mb->m_ext.ext_type = type;