2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1988, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_param.h"
39 #include <sys/param.h>
40 #include <sys/aio.h> /* for aio_swake proto */
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
45 #include <sys/mutex.h>
47 #include <sys/protosw.h>
48 #include <sys/resourcevar.h>
49 #include <sys/signalvar.h>
50 #include <sys/socket.h>
51 #include <sys/socketvar.h>
53 #include <sys/sysctl.h>
56 * Function pointer set by the AIO routines so that the socket buffer code
57 * can call back into the AIO module if it is loaded.
59 void (*aio_swake)(struct socket *, struct sockbuf *);
62 * Primitive routines for operating on socket buffers
65 u_long sb_max = SB_MAX;
67 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
69 static u_long sb_efficiency = 8; /* parameter for sbreserve() */
71 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len);
72 static void sbflush_internal(struct sockbuf *sb);
75 * Our own version of m_clrprotoflags(), that can preserve M_NOTREADY.
78 sbm_clrprotoflags(struct mbuf *m, int flags)
83 if (flags & PRUS_NOTREADY)
92 * Mark ready "count" units of I/O starting with "m". Most mbufs
93 * count as a single unit of I/O except for EXT_PGS-backed mbufs which
94 * can be backed by multiple pages.
97 sbready(struct sockbuf *sb, struct mbuf *m0, int count)
102 SOCKBUF_LOCK_ASSERT(sb);
103 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb));
104 KASSERT(count > 0, ("%s: invalid count %d", __func__, count));
107 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0;
110 KASSERT(m->m_flags & M_NOTREADY,
111 ("%s: m %p !M_NOTREADY", __func__, m));
112 if ((m->m_flags & M_EXT) != 0 &&
113 m->m_ext.ext_type == EXT_PGS) {
114 if (count < m->m_ext.ext_pgs->nrdy) {
115 m->m_ext.ext_pgs->nrdy -= count;
119 count -= m->m_ext.ext_pgs->nrdy;
120 m->m_ext.ext_pgs->nrdy = 0;
124 m->m_flags &= ~(M_NOTREADY | blocker);
126 sb->sb_acc += m->m_len;
131 * If the first mbuf is still not fully ready because only
132 * some of its backing pages were readied, no further progress
136 MPASS(m->m_flags & M_NOTREADY);
137 return (EINPROGRESS);
141 return (EINPROGRESS);
144 /* This one was blocking all the queue. */
145 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) {
146 KASSERT(m->m_flags & M_BLOCKED,
147 ("%s: m %p !M_BLOCKED", __func__, m));
148 m->m_flags &= ~M_BLOCKED;
149 sb->sb_acc += m->m_len;
158 * Adjust sockbuf state reflecting allocation of m.
161 sballoc(struct sockbuf *sb, struct mbuf *m)
164 SOCKBUF_LOCK_ASSERT(sb);
166 sb->sb_ccc += m->m_len;
168 if (sb->sb_fnrdy == NULL) {
169 if (m->m_flags & M_NOTREADY)
172 sb->sb_acc += m->m_len;
174 m->m_flags |= M_BLOCKED;
176 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
177 sb->sb_ctl += m->m_len;
179 sb->sb_mbcnt += MSIZE;
182 if (m->m_flags & M_EXT) {
183 sb->sb_mbcnt += m->m_ext.ext_size;
189 * Adjust sockbuf state reflecting freeing of m.
192 sbfree(struct sockbuf *sb, struct mbuf *m)
195 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */
196 SOCKBUF_LOCK_ASSERT(sb);
199 sb->sb_ccc -= m->m_len;
201 if (!(m->m_flags & M_NOTAVAIL))
202 sb->sb_acc -= m->m_len;
204 if (m == sb->sb_fnrdy) {
207 KASSERT(m->m_flags & M_NOTREADY,
208 ("%s: m %p !M_NOTREADY", __func__, m));
211 while (n != NULL && !(n->m_flags & M_NOTREADY)) {
212 n->m_flags &= ~M_BLOCKED;
213 sb->sb_acc += n->m_len;
219 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
220 sb->sb_ctl -= m->m_len;
222 sb->sb_mbcnt -= MSIZE;
224 if (m->m_flags & M_EXT) {
225 sb->sb_mbcnt -= m->m_ext.ext_size;
229 if (sb->sb_sndptr == m) {
230 sb->sb_sndptr = NULL;
231 sb->sb_sndptroff = 0;
233 if (sb->sb_sndptroff != 0)
234 sb->sb_sndptroff -= m->m_len;
238 * Socantsendmore indicates that no more data will be sent on the socket; it
239 * would normally be applied to a socket when the user informs the system
240 * that no more data is to be sent, by the protocol code (in case
241 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be
242 * received, and will normally be applied to the socket by a protocol when it
243 * detects that the peer will send no more data. Data queued for reading in
244 * the socket may yet be read.
247 socantsendmore_locked(struct socket *so)
250 SOCKBUF_LOCK_ASSERT(&so->so_snd);
252 so->so_snd.sb_state |= SBS_CANTSENDMORE;
253 sowwakeup_locked(so);
254 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
258 socantsendmore(struct socket *so)
261 SOCKBUF_LOCK(&so->so_snd);
262 socantsendmore_locked(so);
263 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
267 socantrcvmore_locked(struct socket *so)
270 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
272 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
273 sorwakeup_locked(so);
274 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
278 socantrcvmore(struct socket *so)
281 SOCKBUF_LOCK(&so->so_rcv);
282 socantrcvmore_locked(so);
283 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
287 * Wait for data to arrive at/drain from a socket buffer.
290 sbwait(struct sockbuf *sb)
293 SOCKBUF_LOCK_ASSERT(sb);
295 sb->sb_flags |= SB_WAIT;
296 return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx,
297 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
298 sb->sb_timeo, 0, 0));
302 sblock(struct sockbuf *sb, int flags)
305 KASSERT((flags & SBL_VALID) == flags,
306 ("sblock: flags invalid (0x%x)", flags));
308 if (flags & SBL_WAIT) {
309 if ((sb->sb_flags & SB_NOINTR) ||
310 (flags & SBL_NOINTR)) {
311 sx_xlock(&sb->sb_sx);
314 return (sx_xlock_sig(&sb->sb_sx));
316 if (sx_try_xlock(&sb->sb_sx) == 0)
317 return (EWOULDBLOCK);
323 sbunlock(struct sockbuf *sb)
326 sx_xunlock(&sb->sb_sx);
330 * Wakeup processes waiting on a socket buffer. Do asynchronous notification
331 * via SIGIO if the socket has the SS_ASYNC flag set.
333 * Called with the socket buffer lock held; will release the lock by the end
334 * of the function. This allows the caller to acquire the socket buffer lock
335 * while testing for the need for various sorts of wakeup and hold it through
336 * to the point where it's no longer required. We currently hold the lock
337 * through calls out to other subsystems (with the exception of kqueue), and
338 * then release it to avoid lock order issues. It's not clear that's
342 sowakeup(struct socket *so, struct sockbuf *sb)
346 SOCKBUF_LOCK_ASSERT(sb);
348 selwakeuppri(sb->sb_sel, PSOCK);
349 if (!SEL_WAITING(sb->sb_sel))
350 sb->sb_flags &= ~SB_SEL;
351 if (sb->sb_flags & SB_WAIT) {
352 sb->sb_flags &= ~SB_WAIT;
355 KNOTE_LOCKED(&sb->sb_sel->si_note, 0);
356 if (sb->sb_upcall != NULL) {
357 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
358 if (ret == SU_ISCONNECTED) {
359 KASSERT(sb == &so->so_rcv,
360 ("SO_SND upcall returned SU_ISCONNECTED"));
361 soupcall_clear(so, SO_RCV);
365 if (sb->sb_flags & SB_AIO)
366 sowakeup_aio(so, sb);
368 if (ret == SU_ISCONNECTED)
370 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
371 pgsigio(&so->so_sigio, SIGIO, 0);
372 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
376 * Socket buffer (struct sockbuf) utility routines.
378 * Each socket contains two socket buffers: one for sending data and one for
379 * receiving data. Each buffer contains a queue of mbufs, information about
380 * the number of mbufs and amount of data in the queue, and other fields
381 * allowing select() statements and notification on data availability to be
384 * Data stored in a socket buffer is maintained as a list of records. Each
385 * record is a list of mbufs chained together with the m_next field. Records
386 * are chained together with the m_nextpkt field. The upper level routine
387 * soreceive() expects the following conventions to be observed when placing
388 * information in the receive buffer:
390 * 1. If the protocol requires each message be preceded by the sender's name,
391 * then a record containing that name must be present before any
392 * associated data (mbuf's must be of type MT_SONAME).
393 * 2. If the protocol supports the exchange of ``access rights'' (really just
394 * additional data associated with the message), and there are ``rights''
395 * to be received, then a record containing this data should be present
396 * (mbuf's must be of type MT_RIGHTS).
397 * 3. If a name or rights record exists, then it must be followed by a data
398 * record, perhaps of zero length.
400 * Before using a new socket structure it is first necessary to reserve
401 * buffer space to the socket, by calling sbreserve(). This should commit
402 * some of the available buffer space in the system buffer pool for the
403 * socket (currently, it does nothing but enforce limits). The space should
404 * be released by calling sbrelease() when the socket is destroyed.
407 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
409 struct thread *td = curthread;
411 SOCKBUF_LOCK(&so->so_snd);
412 SOCKBUF_LOCK(&so->so_rcv);
413 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
415 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
417 if (so->so_rcv.sb_lowat == 0)
418 so->so_rcv.sb_lowat = 1;
419 if (so->so_snd.sb_lowat == 0)
420 so->so_snd.sb_lowat = MCLBYTES;
421 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
422 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
423 SOCKBUF_UNLOCK(&so->so_rcv);
424 SOCKBUF_UNLOCK(&so->so_snd);
427 sbrelease_locked(&so->so_snd, so);
429 SOCKBUF_UNLOCK(&so->so_rcv);
430 SOCKBUF_UNLOCK(&so->so_snd);
435 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
438 u_long tmp_sb_max = sb_max;
440 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
441 if (error || !req->newptr)
443 if (tmp_sb_max < MSIZE + MCLBYTES)
446 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
451 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't
452 * become limiting if buffering efficiency is near the normal case.
455 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
460 SOCKBUF_LOCK_ASSERT(sb);
463 * When a thread is passed, we take into account the thread's socket
464 * buffer size limit. The caller will generally pass curthread, but
465 * in the TCP input path, NULL will be passed to indicate that no
466 * appropriate thread resource limits are available. In that case,
467 * we don't apply a process limit.
472 sbsize_limit = lim_cur(td, RLIMIT_SBSIZE);
474 sbsize_limit = RLIM_INFINITY;
475 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
478 sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
479 if (sb->sb_lowat > sb->sb_hiwat)
480 sb->sb_lowat = sb->sb_hiwat;
485 sbsetopt(struct socket *so, int cmd, u_long cc)
489 u_int *hiwat, *lowat;
494 if (SOLISTENING(so)) {
498 lowat = &so->sol_sbsnd_lowat;
499 hiwat = &so->sol_sbsnd_hiwat;
500 flags = &so->sol_sbsnd_flags;
504 lowat = &so->sol_sbrcv_lowat;
505 hiwat = &so->sol_sbrcv_hiwat;
506 flags = &so->sol_sbrcv_flags;
520 flags = &sb->sb_flags;
521 hiwat = &sb->sb_hiwat;
522 lowat = &sb->sb_lowat;
530 if (SOLISTENING(so)) {
531 if (cc > sb_max_adj) {
539 if (!sbreserve_locked(sb, cc, so, curthread))
543 *flags &= ~SB_AUTOSIZE;
548 * Make sure the low-water is never greater than the
551 *lowat = (cc > *hiwat) ? *hiwat : cc;
555 if (!SOLISTENING(so))
562 * Free mbufs held by a socket, and reserved mbuf space.
565 sbrelease_internal(struct sockbuf *sb, struct socket *so)
568 sbflush_internal(sb);
569 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
575 sbrelease_locked(struct sockbuf *sb, struct socket *so)
578 SOCKBUF_LOCK_ASSERT(sb);
580 sbrelease_internal(sb, so);
584 sbrelease(struct sockbuf *sb, struct socket *so)
588 sbrelease_locked(sb, so);
593 sbdestroy(struct sockbuf *sb, struct socket *so)
596 sbrelease_internal(sb, so);
600 * Routines to add and remove data from an mbuf queue.
602 * The routines sbappend() or sbappendrecord() are normally called to append
603 * new mbufs to a socket buffer, after checking that adequate space is
604 * available, comparing the function sbspace() with the amount of data to be
605 * added. sbappendrecord() differs from sbappend() in that data supplied is
606 * treated as the beginning of a new record. To place a sender's address,
607 * optional access rights, and data in a socket receive buffer,
608 * sbappendaddr() should be used. To place access rights and data in a
609 * socket receive buffer, sbappendrights() should be used. In either case,
610 * the new data begins a new record. Note that unlike sbappend() and
611 * sbappendrecord(), these routines check for the caller that there will be
612 * enough space to store the data. Each fails if there is not enough space,
613 * or if it cannot find mbufs to store additional information in.
615 * Reliable protocols may use the socket send buffer to hold data awaiting
616 * acknowledgement. Data is normally copied from a socket send buffer in a
617 * protocol with m_copy for output to a peer, and then removing the data from
618 * the socket buffer with sbdrop() or sbdroprecord() when the data is
619 * acknowledged by the peer.
623 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
625 struct mbuf *m = sb->sb_mb;
627 SOCKBUF_LOCK_ASSERT(sb);
629 while (m && m->m_nextpkt)
632 if (m != sb->sb_lastrecord) {
633 printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
634 __func__, sb->sb_mb, sb->sb_lastrecord, m);
635 printf("packet chain:\n");
636 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
638 panic("%s from %s:%u", __func__, file, line);
643 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
645 struct mbuf *m = sb->sb_mb;
648 SOCKBUF_LOCK_ASSERT(sb);
650 while (m && m->m_nextpkt)
653 while (m && m->m_next)
656 if (m != sb->sb_mbtail) {
657 printf("%s: sb_mb %p sb_mbtail %p last %p\n",
658 __func__, sb->sb_mb, sb->sb_mbtail, m);
659 printf("packet tree:\n");
660 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
662 for (n = m; n != NULL; n = n->m_next)
666 panic("%s from %s:%u", __func__, file, line);
669 #endif /* SOCKBUF_DEBUG */
671 #define SBLINKRECORD(sb, m0) do { \
672 SOCKBUF_LOCK_ASSERT(sb); \
673 if ((sb)->sb_lastrecord != NULL) \
674 (sb)->sb_lastrecord->m_nextpkt = (m0); \
676 (sb)->sb_mb = (m0); \
677 (sb)->sb_lastrecord = (m0); \
678 } while (/*CONSTCOND*/0)
681 * Append mbuf chain m to the last record in the socket buffer sb. The
682 * additional space associated the mbuf chain is recorded in sb. Empty mbufs
683 * are discarded and mbufs are compacted where possible.
686 sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags)
690 SOCKBUF_LOCK_ASSERT(sb);
694 sbm_clrprotoflags(m, flags);
701 if (n->m_flags & M_EOR) {
702 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
705 } while (n->m_next && (n = n->m_next));
708 * XXX Would like to simply use sb_mbtail here, but
709 * XXX I need to verify that I won't miss an EOR that
712 if ((n = sb->sb_lastrecord) != NULL) {
714 if (n->m_flags & M_EOR) {
715 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
718 } while (n->m_next && (n = n->m_next));
721 * If this is the first record in the socket buffer,
722 * it's also the last record.
724 sb->sb_lastrecord = m;
727 sbcompress(sb, m, n);
732 * Append mbuf chain m to the last record in the socket buffer sb. The
733 * additional space associated the mbuf chain is recorded in sb. Empty mbufs
734 * are discarded and mbufs are compacted where possible.
737 sbappend(struct sockbuf *sb, struct mbuf *m, int flags)
741 sbappend_locked(sb, m, flags);
746 * This version of sbappend() should only be used when the caller absolutely
747 * knows that there will never be more than one record in the socket buffer,
748 * that is, a stream protocol (such as TCP).
751 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
753 SOCKBUF_LOCK_ASSERT(sb);
755 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
756 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
760 /* Remove all packet headers and mbuf tags to get a pure data chain. */
761 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);
763 sbcompress(sb, m, sb->sb_mbtail);
765 sb->sb_lastrecord = sb->sb_mb;
770 * This version of sbappend() should only be used when the caller absolutely
771 * knows that there will never be more than one record in the socket buffer,
772 * that is, a stream protocol (such as TCP).
775 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags)
779 sbappendstream_locked(sb, m, flags);
785 sbcheck(struct sockbuf *sb, const char *file, int line)
787 struct mbuf *m, *n, *fnrdy;
788 u_long acc, ccc, mbcnt;
790 SOCKBUF_LOCK_ASSERT(sb);
792 acc = ccc = mbcnt = 0;
795 for (m = sb->sb_mb; m; m = n) {
797 for (; m; m = m->m_next) {
799 printf("sb %p empty mbuf %p\n", sb, m);
802 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) {
803 if (m != sb->sb_fnrdy) {
804 printf("sb %p: fnrdy %p != m %p\n",
805 sb, sb->sb_fnrdy, m);
811 if (!(m->m_flags & M_NOTAVAIL)) {
812 printf("sb %p: fnrdy %p, m %p is avail\n",
813 sb, sb->sb_fnrdy, m);
820 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
821 mbcnt += m->m_ext.ext_size;
824 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) {
825 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n",
826 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt);
831 panic("%s from %s:%u", __func__, file, line);
836 * As above, except the mbuf chain begins a new record.
839 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
843 SOCKBUF_LOCK_ASSERT(sb);
849 * Put the first mbuf on the queue. Note this permits zero length
854 SBLINKRECORD(sb, m0);
858 if (m && (m0->m_flags & M_EOR)) {
859 m0->m_flags &= ~M_EOR;
862 /* always call sbcompress() so it can do SBLASTMBUFCHK() */
863 sbcompress(sb, m, m0);
867 * As above, except the mbuf chain begins a new record.
870 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
874 sbappendrecord_locked(sb, m0);
878 /* Helper routine that appends data, control, and address to a sockbuf. */
880 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa,
881 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last)
883 struct mbuf *m, *n, *nlast;
885 if (asa->sa_len > MLEN)
888 m = m_get(M_NOWAIT, MT_SONAME);
891 m->m_len = asa->sa_len;
892 bcopy(asa, mtod(m, caddr_t), asa->sa_len);
895 m_tag_delete_chain(m0, NULL);
897 * Clear some persistent info from pkthdr.
898 * We don't use m_demote(), because some netgraph consumers
899 * expect M_PKTHDR presence.
901 m0->m_pkthdr.rcvif = NULL;
902 m0->m_pkthdr.flowid = 0;
903 m0->m_pkthdr.csum_flags = 0;
904 m0->m_pkthdr.fibnum = 0;
905 m0->m_pkthdr.rsstype = 0;
908 ctrl_last->m_next = m0; /* concatenate data to control */
912 for (n = m; n->m_next != NULL; n = n->m_next)
918 sb->sb_mbtail = nlast;
926 * Append address and data, and optionally, control (ancillary) data to the
927 * receive queue of a socket. If present, m0 must include a packet header
928 * with total length. Returns 0 if no space in sockbuf or insufficient
932 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
933 struct mbuf *m0, struct mbuf *control)
935 struct mbuf *ctrl_last;
936 int space = asa->sa_len;
938 SOCKBUF_LOCK_ASSERT(sb);
940 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
941 panic("sbappendaddr_locked");
943 space += m0->m_pkthdr.len;
944 space += m_length(control, &ctrl_last);
946 if (space > sbspace(sb))
948 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
952 * Append address and data, and optionally, control (ancillary) data to the
953 * receive queue of a socket. If present, m0 must include a packet header
954 * with total length. Returns 0 if insufficient mbufs. Does not validate space
955 * on the receiving sockbuf.
958 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa,
959 struct mbuf *m0, struct mbuf *control)
961 struct mbuf *ctrl_last;
963 SOCKBUF_LOCK_ASSERT(sb);
965 ctrl_last = (control == NULL) ? NULL : m_last(control);
966 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
970 * Append address and data, and optionally, control (ancillary) data to the
971 * receive queue of a socket. If present, m0 must include a packet header
972 * with total length. Returns 0 if no space in sockbuf or insufficient
976 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
977 struct mbuf *m0, struct mbuf *control)
982 retval = sbappendaddr_locked(sb, asa, m0, control);
988 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
989 struct mbuf *control)
991 struct mbuf *m, *mlast;
994 m_last(control)->m_next = m0;
998 for (m = control; m->m_next; m = m->m_next)
1002 SBLINKRECORD(sb, control);
1004 sb->sb_mbtail = mlast;
1007 SBLASTRECORDCHK(sb);
1011 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
1015 sbappendcontrol_locked(sb, m0, control);
1020 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
1021 * (n). If (n) is NULL, the buffer is presumed empty.
1023 * When the data is compressed, mbufs in the chain may be handled in one of
1026 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
1027 * record boundary, and no change in data type).
1029 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
1030 * an mbuf already in the socket buffer. This can occur if an
1031 * appropriate mbuf exists, there is room, both mbufs are not marked as
1032 * not ready, and no merging of data types will occur.
1034 * (3) The mbuf may be appended to the end of the existing mbuf chain.
1036 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
1040 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
1045 SOCKBUF_LOCK_ASSERT(sb);
1048 eor |= m->m_flags & M_EOR;
1049 if (m->m_len == 0 &&
1051 (((o = m->m_next) || (o = n)) &&
1052 o->m_type == m->m_type))) {
1053 if (sb->sb_lastrecord == m)
1054 sb->sb_lastrecord = m->m_next;
1058 if (n && (n->m_flags & M_EOR) == 0 &&
1060 ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
1061 !(m->m_flags & M_NOTREADY) &&
1062 !(n->m_flags & (M_NOTREADY | M_NOMAP)) &&
1063 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
1064 m->m_len <= M_TRAILINGSPACE(n) &&
1065 n->m_type == m->m_type) {
1066 m_copydata(m, 0, m->m_len, mtodo(n, n->m_len));
1067 n->m_len += m->m_len;
1068 sb->sb_ccc += m->m_len;
1069 if (sb->sb_fnrdy == NULL)
1070 sb->sb_acc += m->m_len;
1071 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1072 /* XXX: Probably don't need.*/
1073 sb->sb_ctl += m->m_len;
1077 if (m->m_len <= MLEN && (m->m_flags & M_NOMAP) &&
1078 (m->m_flags & M_NOTREADY) == 0)
1079 (void)mb_unmapped_compress(m);
1087 m->m_flags &= ~M_EOR;
1092 KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
1099 * Free all mbufs in a sockbuf. Check that all resources are reclaimed.
1102 sbflush_internal(struct sockbuf *sb)
1105 while (sb->sb_mbcnt) {
1107 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty:
1108 * we would loop forever. Panic instead.
1110 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len))
1112 m_freem(sbcut_internal(sb, (int)sb->sb_ccc));
1114 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
1115 ("%s: ccc %u mb %p mbcnt %u", __func__,
1116 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
1120 sbflush_locked(struct sockbuf *sb)
1123 SOCKBUF_LOCK_ASSERT(sb);
1124 sbflush_internal(sb);
1128 sbflush(struct sockbuf *sb)
1137 * Cut data from (the front of) a sockbuf.
1139 static struct mbuf *
1140 sbcut_internal(struct sockbuf *sb, int len)
1142 struct mbuf *m, *next, *mfree;
1144 KASSERT(len >= 0, ("%s: len is %d but it is supposed to be >= 0",
1146 KASSERT(len <= sb->sb_ccc, ("%s: len: %d is > ccc: %u",
1147 __func__, len, sb->sb_ccc));
1149 next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1154 KASSERT(next, ("%s: no next, len %d", __func__, len));
1156 next = m->m_nextpkt;
1158 if (m->m_len > len) {
1159 KASSERT(!(m->m_flags & M_NOTAVAIL),
1160 ("%s: m %p M_NOTAVAIL", __func__, m));
1165 if (sb->sb_sndptroff != 0)
1166 sb->sb_sndptroff -= len;
1167 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1174 * Do not put M_NOTREADY buffers to the free list, they
1175 * are referenced from outside.
1177 if (m->m_flags & M_NOTREADY)
1189 * Free any zero-length mbufs from the buffer.
1190 * For SOCK_DGRAM sockets such mbufs represent empty records.
1191 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer,
1192 * when sosend_generic() needs to send only control data.
1194 while (m && m->m_len == 0) {
1205 m->m_nextpkt = next;
1209 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure
1210 * sb_lastrecord is up-to-date if we dropped part of the last record.
1214 sb->sb_mbtail = NULL;
1215 sb->sb_lastrecord = NULL;
1216 } else if (m->m_nextpkt == NULL) {
1217 sb->sb_lastrecord = m;
1224 * Drop data from (the front of) a sockbuf.
1227 sbdrop_locked(struct sockbuf *sb, int len)
1230 SOCKBUF_LOCK_ASSERT(sb);
1231 m_freem(sbcut_internal(sb, len));
1235 * Drop data from (the front of) a sockbuf,
1236 * and return it to caller.
1239 sbcut_locked(struct sockbuf *sb, int len)
1242 SOCKBUF_LOCK_ASSERT(sb);
1243 return (sbcut_internal(sb, len));
1247 sbdrop(struct sockbuf *sb, int len)
1252 mfree = sbcut_internal(sb, len);
1259 sbsndptr_noadv(struct sockbuf *sb, uint32_t off, uint32_t *moff)
1263 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1264 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
1266 if (sb->sb_sndptr == NULL) {
1267 sb->sb_sndptr = sb->sb_mb;
1268 sb->sb_sndptroff = 0;
1273 off -= sb->sb_sndptroff;
1280 sbsndptr_adv(struct sockbuf *sb, struct mbuf *mb, uint32_t len)
1283 * A small copy was done, advance forward the sb_sbsndptr to cover
1288 if (mb != sb->sb_sndptr) {
1289 /* Did not copyout at the same mbuf */
1293 while (m && (len > 0)) {
1294 if (len >= m->m_len) {
1297 sb->sb_sndptroff += m->m_len;
1298 sb->sb_sndptr = m->m_next;
1308 * Return the first mbuf and the mbuf data offset for the provided
1309 * send offset without changing the "sb_sndptroff" field.
1312 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff)
1316 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1319 * If the "off" is below the stored offset, which happens on
1320 * retransmits, just use "sb_mb":
1322 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
1326 off -= sb->sb_sndptroff;
1328 while (off > 0 && m != NULL) {
1339 * Drop a record off the front of a sockbuf and move the next record to the
1343 sbdroprecord_locked(struct sockbuf *sb)
1347 SOCKBUF_LOCK_ASSERT(sb);
1351 sb->sb_mb = m->m_nextpkt;
1361 * Drop a record off the front of a sockbuf and move the next record to the
1365 sbdroprecord(struct sockbuf *sb)
1369 sbdroprecord_locked(sb);
1374 * Create a "control" mbuf containing the specified data with the specified
1375 * type for presentation on a socket buffer.
1378 sbcreatecontrol(caddr_t p, int size, int type, int level)
1383 if (CMSG_SPACE((u_int)size) > MCLBYTES)
1384 return ((struct mbuf *) NULL);
1385 if (CMSG_SPACE((u_int)size) > MLEN)
1386 m = m_getcl(M_NOWAIT, MT_CONTROL, 0);
1388 m = m_get(M_NOWAIT, MT_CONTROL);
1390 return ((struct mbuf *) NULL);
1391 cp = mtod(m, struct cmsghdr *);
1393 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1394 ("sbcreatecontrol: short mbuf"));
1396 * Don't leave the padding between the msg header and the
1397 * cmsg data and the padding after the cmsg data un-initialized.
1399 bzero(cp, CMSG_SPACE((u_int)size));
1401 (void)memcpy(CMSG_DATA(cp), p, size);
1402 m->m_len = CMSG_SPACE(size);
1403 cp->cmsg_len = CMSG_LEN(size);
1404 cp->cmsg_level = level;
1405 cp->cmsg_type = type;
1410 * This does the same for socket buffers that sotoxsocket does for sockets:
1411 * generate an user-format data structure describing the socket buffer. Note
1412 * that the xsockbuf structure, since it is always embedded in a socket, does
1413 * not include a self pointer nor a length. We make this entry point public
1414 * in case some other mechanism needs it.
1417 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1420 xsb->sb_cc = sb->sb_ccc;
1421 xsb->sb_hiwat = sb->sb_hiwat;
1422 xsb->sb_mbcnt = sb->sb_mbcnt;
1423 xsb->sb_mcnt = sb->sb_mcnt;
1424 xsb->sb_ccnt = sb->sb_ccnt;
1425 xsb->sb_mbmax = sb->sb_mbmax;
1426 xsb->sb_lowat = sb->sb_lowat;
1427 xsb->sb_flags = sb->sb_flags;
1428 xsb->sb_timeo = sb->sb_timeo;
1431 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1433 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1434 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1435 &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1436 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1437 &sb_efficiency, 0, "Socket buffer size waste factor");