2 * Copyright (c) 2004, 2005,
3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_param.h"
33 #include <sys/param.h>
34 #include <sys/malloc.h>
35 #include <sys/systm.h>
37 #include <sys/domain.h>
38 #include <sys/eventhandler.h>
39 #include <sys/kernel.h>
40 #include <sys/protosw.h>
42 #include <sys/sysctl.h>
44 #include <security/mac/mac_framework.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_map.h>
52 #include <vm/uma_int.h>
53 #include <vm/uma_dbg.h>
56 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
59 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
60 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
61 * administrator so desires.
63 * Mbufs are allocated from a UMA Master Zone called the Mbuf
66 * Additionally, FreeBSD provides a Packet Zone, which it
67 * configures as a Secondary Zone to the Mbuf Master Zone,
68 * thus sharing backend Slab kegs with the Mbuf Master Zone.
70 * Thus common-case allocations and locking are simplified:
74 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
76 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
77 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
83 * \____________(VM)_________________/
86 * Whenever an object is allocated with uma_zalloc() out of
87 * one of the Zones its _ctor_ function is executed. The same
88 * for any deallocation through uma_zfree() the _dtor_ function
91 * Caches are per-CPU and are filled from the Master Zone.
93 * Whenever an object is allocated from the underlying global
94 * memory pool it gets pre-initialized with the _zinit_ functions.
95 * When the Keg's are overfull objects get decomissioned with
96 * _zfini_ functions and free'd back to the global memory pool.
100 int nmbufs; /* limits number of mbufs */
101 int nmbclusters; /* limits number of mbuf clusters */
102 int nmbjumbop; /* limits number of page size jumbo clusters */
103 int nmbjumbo9; /* limits number of 9k jumbo clusters */
104 int nmbjumbo16; /* limits number of 16k jumbo clusters */
105 struct mbstat mbstat;
108 * tunable_mbinit() has to be run before any mbuf allocations are done.
111 tunable_mbinit(void *dummy)
113 quad_t realmem, maxmbufmem;
116 * The default limit for all mbuf related memory is 1/2 of all
117 * available kernel memory (physical or kmem).
118 * At most it can be 3/4 of available kernel memory.
120 realmem = qmin((quad_t)physmem * PAGE_SIZE,
121 vm_map_max(kernel_map) - vm_map_min(kernel_map));
122 maxmbufmem = realmem / 2;
123 TUNABLE_QUAD_FETCH("kern.maxmbufmem", &maxmbufmem);
124 if (maxmbufmem > realmem / 4 * 3)
125 maxmbufmem = realmem / 4 * 3;
127 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
128 if (nmbclusters == 0)
129 nmbclusters = maxmbufmem / MCLBYTES / 4;
131 TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
133 nmbjumbop = maxmbufmem / MJUMPAGESIZE / 4;
135 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo9", &nmbjumbo9);
137 nmbjumbo9 = maxmbufmem / MJUM9BYTES / 6;
139 TUNABLE_INT_FETCH("kern.ipc.nmbjumbo16", &nmbjumbo16);
141 nmbjumbo16 = maxmbufmem / MJUM16BYTES / 6;
144 * We need at least as many mbufs as we have clusters of
145 * the various types added together.
147 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs);
148 if (nmbufs < nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16)
149 nmbufs = lmax(maxmbufmem / MSIZE / 5,
150 nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16);
152 SYSINIT(tunable_mbinit, SI_SUB_KMEM, SI_ORDER_MIDDLE, tunable_mbinit, NULL);
155 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
157 int error, newnmbclusters;
159 newnmbclusters = nmbclusters;
160 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
161 if (error == 0 && req->newptr) {
162 if (newnmbclusters > nmbclusters &&
163 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
164 nmbclusters = newnmbclusters;
165 uma_zone_set_max(zone_clust, nmbclusters);
166 nmbclusters = uma_zone_get_max(zone_clust);
167 EVENTHANDLER_INVOKE(nmbclusters_change);
173 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
174 &nmbclusters, 0, sysctl_nmbclusters, "IU",
175 "Maximum number of mbuf clusters allowed");
178 sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
180 int error, newnmbjumbop;
182 newnmbjumbop = nmbjumbop;
183 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
184 if (error == 0 && req->newptr) {
185 if (newnmbjumbop > nmbjumbop &&
186 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
187 nmbjumbop = newnmbjumbop;
188 uma_zone_set_max(zone_jumbop, nmbjumbop);
189 nmbjumbop = uma_zone_get_max(zone_jumbop);
195 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
196 &nmbjumbop, 0, sysctl_nmbjumbop, "IU",
197 "Maximum number of mbuf page size jumbo clusters allowed");
200 sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
202 int error, newnmbjumbo9;
204 newnmbjumbo9 = nmbjumbo9;
205 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
206 if (error == 0 && req->newptr) {
207 if (newnmbjumbo9 > nmbjumbo9&&
208 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
209 nmbjumbo9 = newnmbjumbo9;
210 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
211 nmbjumbo9 = uma_zone_get_max(zone_jumbo9);
217 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
218 &nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
219 "Maximum number of mbuf 9k jumbo clusters allowed");
222 sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
224 int error, newnmbjumbo16;
226 newnmbjumbo16 = nmbjumbo16;
227 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
228 if (error == 0 && req->newptr) {
229 if (newnmbjumbo16 > nmbjumbo16 &&
230 nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
231 nmbjumbo16 = newnmbjumbo16;
232 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
233 nmbjumbo16 = uma_zone_get_max(zone_jumbo16);
239 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
240 &nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
241 "Maximum number of mbuf 16k jumbo clusters allowed");
244 sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
246 int error, newnmbufs;
249 error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
250 if (error == 0 && req->newptr) {
251 if (newnmbufs > nmbufs) {
253 uma_zone_set_max(zone_mbuf, nmbufs);
254 nmbufs = uma_zone_get_max(zone_mbuf);
255 EVENTHANDLER_INVOKE(nmbufs_change);
261 SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbuf, CTLTYPE_INT|CTLFLAG_RW,
262 &nmbufs, 0, sysctl_nmbufs, "IU",
263 "Maximum number of mbufs allowed");
265 SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
266 "Mbuf general information and statistics");
269 * Zones from which we allocate.
271 uma_zone_t zone_mbuf;
272 uma_zone_t zone_clust;
273 uma_zone_t zone_pack;
274 uma_zone_t zone_jumbop;
275 uma_zone_t zone_jumbo9;
276 uma_zone_t zone_jumbo16;
277 uma_zone_t zone_ext_refcnt;
282 static int mb_ctor_mbuf(void *, int, void *, int);
283 static int mb_ctor_clust(void *, int, void *, int);
284 static int mb_ctor_pack(void *, int, void *, int);
285 static void mb_dtor_mbuf(void *, int, void *);
286 static void mb_dtor_clust(void *, int, void *);
287 static void mb_dtor_pack(void *, int, void *);
288 static int mb_zinit_pack(void *, int, int);
289 static void mb_zfini_pack(void *, int);
291 static void mb_reclaim(void *);
292 static void *mbuf_jumbo_alloc(uma_zone_t, int, uint8_t *, int);
294 /* Ensure that MSIZE is a power of 2. */
295 CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
298 * Initialize FreeBSD Network buffer allocation.
301 mbuf_init(void *dummy)
305 * Configure UMA zones for Mbufs, Clusters, and Packets.
307 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
308 mb_ctor_mbuf, mb_dtor_mbuf,
310 trash_init, trash_fini,
314 MSIZE - 1, UMA_ZONE_MAXBUCKET);
316 nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
317 uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
319 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
320 mb_ctor_clust, mb_dtor_clust,
322 trash_init, trash_fini,
326 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
328 nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
329 uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
331 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
332 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
334 /* Make jumbo frame zone too. Page size, 9k and 16k. */
335 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
336 mb_ctor_clust, mb_dtor_clust,
338 trash_init, trash_fini,
342 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
344 nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
345 uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
347 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
348 mb_ctor_clust, mb_dtor_clust,
350 trash_init, trash_fini,
354 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
355 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
357 nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
358 uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
360 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
361 mb_ctor_clust, mb_dtor_clust,
363 trash_init, trash_fini,
367 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
368 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
370 nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
371 uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
373 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
376 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
378 /* uma_prealloc() goes here... */
381 * Hook event handler for low-memory situation, used to
382 * drain protocols and push data back to the caches (UMA
383 * later pushes it back to VM).
385 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
386 EVENTHANDLER_PRI_FIRST);
389 * [Re]set counters and local statistics knobs.
390 * XXX Some of these should go and be replaced, but UMA stat
391 * gathering needs to be revised.
394 mbstat.m_mclusts = 0;
396 mbstat.m_msize = MSIZE;
397 mbstat.m_mclbytes = MCLBYTES;
398 mbstat.m_minclsize = MINCLSIZE;
399 mbstat.m_mlen = MLEN;
400 mbstat.m_mhlen = MHLEN;
401 mbstat.m_numtypes = MT_NTYPES;
403 mbstat.m_mcfail = mbstat.m_mpfail = 0;
405 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
407 SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
410 * UMA backend page allocator for the jumbo frame zones.
412 * Allocates kernel virtual memory that is backed by contiguous physical
416 mbuf_jumbo_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
419 /* Inform UMA that this allocator uses kernel_map/object. */
420 *flags = UMA_SLAB_KERNEL;
421 return ((void *)kmem_alloc_contig(kernel_map, bytes, wait,
422 (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
426 * Constructor for Mbuf master zone.
428 * The 'arg' pointer points to a mb_args structure which
429 * contains call-specific information required to support the
430 * mbuf allocation API. See mbuf.h.
433 mb_ctor_mbuf(void *mem, int size, void *arg, int how)
436 struct mb_args *args;
444 trash_ctor(mem, size, arg, how);
446 m = (struct mbuf *)mem;
447 args = (struct mb_args *)arg;
452 * The mbuf is initialized later. The caller has the
453 * responsibility to set up any MAC labels too.
455 if (type == MT_NOINIT)
463 if (flags & M_PKTHDR) {
464 m->m_data = m->m_pktdat;
465 m->m_pkthdr.rcvif = NULL;
466 m->m_pkthdr.header = NULL;
468 m->m_pkthdr.csum_flags = 0;
469 m->m_pkthdr.csum_data = 0;
470 m->m_pkthdr.tso_segsz = 0;
471 m->m_pkthdr.ether_vtag = 0;
472 m->m_pkthdr.flowid = 0;
473 SLIST_INIT(&m->m_pkthdr.tags);
475 /* If the label init fails, fail the alloc */
476 error = mac_mbuf_init(m, how);
481 m->m_data = m->m_dat;
486 * The Mbuf master zone destructor.
489 mb_dtor_mbuf(void *mem, int size, void *arg)
494 m = (struct mbuf *)mem;
495 flags = (unsigned long)arg;
497 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
498 m_tag_delete_chain(m, NULL);
499 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
500 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
502 trash_dtor(mem, size, arg);
507 * The Mbuf Packet zone destructor.
510 mb_dtor_pack(void *mem, int size, void *arg)
514 m = (struct mbuf *)mem;
515 if ((m->m_flags & M_PKTHDR) != 0)
516 m_tag_delete_chain(m, NULL);
518 /* Make sure we've got a clean cluster back. */
519 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
520 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
521 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
522 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
523 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
524 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
525 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
526 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
528 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
531 * If there are processes blocked on zone_clust, waiting for pages
532 * to be freed up, * cause them to be woken up by draining the
533 * packet zone. We are exposed to a race here * (in the check for
534 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
535 * is deliberate. We don't want to acquire the zone lock for every
538 if (uma_zone_exhausted_nolock(zone_clust))
539 zone_drain(zone_pack);
543 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
545 * Here the 'arg' pointer points to the Mbuf which we
546 * are configuring cluster storage for. If 'arg' is
547 * empty we allocate just the cluster without setting
548 * the mbuf to it. See mbuf.h.
551 mb_ctor_clust(void *mem, int size, void *arg, int how)
559 trash_ctor(mem, size, arg, how);
566 #if MJUMPAGESIZE != MCLBYTES
581 panic("unknown cluster size");
585 m = (struct mbuf *)arg;
586 refcnt = uma_find_refcnt(zone, mem);
589 m->m_ext.ext_buf = (caddr_t)mem;
590 m->m_data = m->m_ext.ext_buf;
592 m->m_ext.ext_free = NULL;
593 m->m_ext.ext_arg1 = NULL;
594 m->m_ext.ext_arg2 = NULL;
595 m->m_ext.ext_size = size;
596 m->m_ext.ext_type = type;
597 m->m_ext.ref_cnt = refcnt;
604 * The Mbuf Cluster zone destructor.
607 mb_dtor_clust(void *mem, int size, void *arg)
612 zone = m_getzone(size);
613 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
614 ("%s: refcnt incorrect %u", __func__,
615 *(uma_find_refcnt(zone, mem))) );
617 trash_dtor(mem, size, arg);
622 * The Packet secondary zone's init routine, executed on the
623 * object's transition from mbuf keg slab to zone cache.
626 mb_zinit_pack(void *mem, int size, int how)
630 m = (struct mbuf *)mem; /* m is virgin. */
631 if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
632 m->m_ext.ext_buf == NULL)
634 m->m_ext.ext_type = EXT_PACKET; /* Override. */
636 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
642 * The Packet secondary zone's fini routine, executed on the
643 * object's transition from zone cache to keg slab.
646 mb_zfini_pack(void *mem, int size)
650 m = (struct mbuf *)mem;
652 trash_fini(m->m_ext.ext_buf, MCLBYTES);
654 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
656 trash_dtor(mem, size, NULL);
661 * The "packet" keg constructor.
664 mb_ctor_pack(void *mem, int size, void *arg, int how)
667 struct mb_args *args;
674 m = (struct mbuf *)mem;
675 args = (struct mb_args *)arg;
680 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
684 m->m_data = m->m_ext.ext_buf;
686 m->m_flags = (flags | M_EXT);
689 if (flags & M_PKTHDR) {
690 m->m_pkthdr.rcvif = NULL;
692 m->m_pkthdr.header = NULL;
693 m->m_pkthdr.csum_flags = 0;
694 m->m_pkthdr.csum_data = 0;
695 m->m_pkthdr.tso_segsz = 0;
696 m->m_pkthdr.ether_vtag = 0;
697 m->m_pkthdr.flowid = 0;
698 SLIST_INIT(&m->m_pkthdr.tags);
700 /* If the label init fails, fail the alloc */
701 error = mac_mbuf_init(m, how);
706 /* m_ext is already initialized. */
712 m_pkthdr_init(struct mbuf *m, int how)
717 m->m_data = m->m_pktdat;
718 SLIST_INIT(&m->m_pkthdr.tags);
719 m->m_pkthdr.rcvif = NULL;
720 m->m_pkthdr.header = NULL;
722 m->m_pkthdr.flowid = 0;
723 m->m_pkthdr.csum_flags = 0;
724 m->m_pkthdr.csum_data = 0;
725 m->m_pkthdr.tso_segsz = 0;
726 m->m_pkthdr.ether_vtag = 0;
728 /* If the label init fails, fail the alloc */
729 error = mac_mbuf_init(m, how);
738 * This is the protocol drain routine.
740 * No locks should be held when this is called. The drain routines have to
741 * presently acquire some locks which raises the possibility of lock order
745 mb_reclaim(void *junk)
750 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
753 for (dp = domains; dp != NULL; dp = dp->dom_next)
754 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
755 if (pr->pr_drain != NULL)