2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_param.h"
37 #include <sys/param.h>
38 #include <sys/aio.h> /* for aio_swake proto */
39 #include <sys/kernel.h>
42 #include <sys/mutex.h>
44 #include <sys/protosw.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/socket.h>
48 #include <sys/socketvar.h>
50 #include <sys/sysctl.h>
53 * Function pointer set by the AIO routines so that the socket buffer code
54 * can call back into the AIO module if it is loaded.
56 void (*aio_swake)(struct socket *, struct sockbuf *);
59 * Primitive routines for operating on socket buffers
62 u_long sb_max = SB_MAX;
64 (quad_t)SB_MAX * MCLBYTES / (MSIZE + MCLBYTES); /* adjusted sb_max */
66 static u_long sb_efficiency = 8; /* parameter for sbreserve() */
68 static struct mbuf *sbcut_internal(struct sockbuf *sb, int len);
69 static void sbflush_internal(struct sockbuf *sb);
72 * Mark ready "count" mbufs starting with "m".
75 sbready(struct sockbuf *sb, struct mbuf *m, int count)
79 SOCKBUF_LOCK_ASSERT(sb);
80 KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb));
82 blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0;
84 for (int i = 0; i < count; i++, m = m->m_next) {
85 KASSERT(m->m_flags & M_NOTREADY,
86 ("%s: m %p !M_NOTREADY", __func__, m));
87 m->m_flags &= ~(M_NOTREADY | blocker);
89 sb->sb_acc += m->m_len;
95 /* This one was blocking all the queue. */
96 for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) {
97 KASSERT(m->m_flags & M_BLOCKED,
98 ("%s: m %p !M_BLOCKED", __func__, m));
99 m->m_flags &= ~M_BLOCKED;
100 sb->sb_acc += m->m_len;
109 * Adjust sockbuf state reflecting allocation of m.
112 sballoc(struct sockbuf *sb, struct mbuf *m)
115 SOCKBUF_LOCK_ASSERT(sb);
117 sb->sb_ccc += m->m_len;
119 if (sb->sb_fnrdy == NULL) {
120 if (m->m_flags & M_NOTREADY)
123 sb->sb_acc += m->m_len;
125 m->m_flags |= M_BLOCKED;
127 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
128 sb->sb_ctl += m->m_len;
130 sb->sb_mbcnt += MSIZE;
133 if (m->m_flags & M_EXT) {
134 sb->sb_mbcnt += m->m_ext.ext_size;
140 * Adjust sockbuf state reflecting freeing of m.
143 sbfree(struct sockbuf *sb, struct mbuf *m)
146 #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */
147 SOCKBUF_LOCK_ASSERT(sb);
150 sb->sb_ccc -= m->m_len;
152 if (!(m->m_flags & M_NOTAVAIL))
153 sb->sb_acc -= m->m_len;
155 if (m == sb->sb_fnrdy) {
158 KASSERT(m->m_flags & M_NOTREADY,
159 ("%s: m %p !M_NOTREADY", __func__, m));
162 while (n != NULL && !(n->m_flags & M_NOTREADY)) {
163 n->m_flags &= ~M_BLOCKED;
164 sb->sb_acc += n->m_len;
170 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
171 sb->sb_ctl -= m->m_len;
173 sb->sb_mbcnt -= MSIZE;
175 if (m->m_flags & M_EXT) {
176 sb->sb_mbcnt -= m->m_ext.ext_size;
180 if (sb->sb_sndptr == m) {
181 sb->sb_sndptr = NULL;
182 sb->sb_sndptroff = 0;
184 if (sb->sb_sndptroff != 0)
185 sb->sb_sndptroff -= m->m_len;
189 * Socantsendmore indicates that no more data will be sent on the socket; it
190 * would normally be applied to a socket when the user informs the system
191 * that no more data is to be sent, by the protocol code (in case
192 * PRU_SHUTDOWN). Socantrcvmore indicates that no more data will be
193 * received, and will normally be applied to the socket by a protocol when it
194 * detects that the peer will send no more data. Data queued for reading in
195 * the socket may yet be read.
198 socantsendmore_locked(struct socket *so)
201 SOCKBUF_LOCK_ASSERT(&so->so_snd);
203 so->so_snd.sb_state |= SBS_CANTSENDMORE;
204 sowwakeup_locked(so);
205 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
209 socantsendmore(struct socket *so)
212 SOCKBUF_LOCK(&so->so_snd);
213 socantsendmore_locked(so);
214 mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED);
218 socantrcvmore_locked(struct socket *so)
221 SOCKBUF_LOCK_ASSERT(&so->so_rcv);
223 so->so_rcv.sb_state |= SBS_CANTRCVMORE;
224 sorwakeup_locked(so);
225 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
229 socantrcvmore(struct socket *so)
232 SOCKBUF_LOCK(&so->so_rcv);
233 socantrcvmore_locked(so);
234 mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED);
238 * Wait for data to arrive at/drain from a socket buffer.
241 sbwait(struct sockbuf *sb)
244 SOCKBUF_LOCK_ASSERT(sb);
246 sb->sb_flags |= SB_WAIT;
247 return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx,
248 (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
249 sb->sb_timeo, 0, 0));
253 sblock(struct sockbuf *sb, int flags)
256 KASSERT((flags & SBL_VALID) == flags,
257 ("sblock: flags invalid (0x%x)", flags));
259 if (flags & SBL_WAIT) {
260 if ((sb->sb_flags & SB_NOINTR) ||
261 (flags & SBL_NOINTR)) {
262 sx_xlock(&sb->sb_sx);
265 return (sx_xlock_sig(&sb->sb_sx));
267 if (sx_try_xlock(&sb->sb_sx) == 0)
268 return (EWOULDBLOCK);
274 sbunlock(struct sockbuf *sb)
277 sx_xunlock(&sb->sb_sx);
281 * Wakeup processes waiting on a socket buffer. Do asynchronous notification
282 * via SIGIO if the socket has the SS_ASYNC flag set.
284 * Called with the socket buffer lock held; will release the lock by the end
285 * of the function. This allows the caller to acquire the socket buffer lock
286 * while testing for the need for various sorts of wakeup and hold it through
287 * to the point where it's no longer required. We currently hold the lock
288 * through calls out to other subsystems (with the exception of kqueue), and
289 * then release it to avoid lock order issues. It's not clear that's
293 sowakeup(struct socket *so, struct sockbuf *sb)
297 SOCKBUF_LOCK_ASSERT(sb);
299 selwakeuppri(&sb->sb_sel, PSOCK);
300 if (!SEL_WAITING(&sb->sb_sel))
301 sb->sb_flags &= ~SB_SEL;
302 if (sb->sb_flags & SB_WAIT) {
303 sb->sb_flags &= ~SB_WAIT;
306 KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
307 if (sb->sb_upcall != NULL) {
308 ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT);
309 if (ret == SU_ISCONNECTED) {
310 KASSERT(sb == &so->so_rcv,
311 ("SO_SND upcall returned SU_ISCONNECTED"));
312 soupcall_clear(so, SO_RCV);
316 if (sb->sb_flags & SB_AIO)
319 if (ret == SU_ISCONNECTED)
321 if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL)
322 pgsigio(&so->so_sigio, SIGIO, 0);
323 mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED);
327 * Socket buffer (struct sockbuf) utility routines.
329 * Each socket contains two socket buffers: one for sending data and one for
330 * receiving data. Each buffer contains a queue of mbufs, information about
331 * the number of mbufs and amount of data in the queue, and other fields
332 * allowing select() statements and notification on data availability to be
335 * Data stored in a socket buffer is maintained as a list of records. Each
336 * record is a list of mbufs chained together with the m_next field. Records
337 * are chained together with the m_nextpkt field. The upper level routine
338 * soreceive() expects the following conventions to be observed when placing
339 * information in the receive buffer:
341 * 1. If the protocol requires each message be preceded by the sender's name,
342 * then a record containing that name must be present before any
343 * associated data (mbuf's must be of type MT_SONAME).
344 * 2. If the protocol supports the exchange of ``access rights'' (really just
345 * additional data associated with the message), and there are ``rights''
346 * to be received, then a record containing this data should be present
347 * (mbuf's must be of type MT_RIGHTS).
348 * 3. If a name or rights record exists, then it must be followed by a data
349 * record, perhaps of zero length.
351 * Before using a new socket structure it is first necessary to reserve
352 * buffer space to the socket, by calling sbreserve(). This should commit
353 * some of the available buffer space in the system buffer pool for the
354 * socket (currently, it does nothing but enforce limits). The space should
355 * be released by calling sbrelease() when the socket is destroyed.
358 soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
360 struct thread *td = curthread;
362 SOCKBUF_LOCK(&so->so_snd);
363 SOCKBUF_LOCK(&so->so_rcv);
364 if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0)
366 if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0)
368 if (so->so_rcv.sb_lowat == 0)
369 so->so_rcv.sb_lowat = 1;
370 if (so->so_snd.sb_lowat == 0)
371 so->so_snd.sb_lowat = MCLBYTES;
372 if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat)
373 so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
374 SOCKBUF_UNLOCK(&so->so_rcv);
375 SOCKBUF_UNLOCK(&so->so_snd);
378 sbrelease_locked(&so->so_snd, so);
380 SOCKBUF_UNLOCK(&so->so_rcv);
381 SOCKBUF_UNLOCK(&so->so_snd);
386 sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS)
389 u_long tmp_sb_max = sb_max;
391 error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req);
392 if (error || !req->newptr)
394 if (tmp_sb_max < MSIZE + MCLBYTES)
397 sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
402 * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't
403 * become limiting if buffering efficiency is near the normal case.
406 sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
411 SOCKBUF_LOCK_ASSERT(sb);
414 * When a thread is passed, we take into account the thread's socket
415 * buffer size limit. The caller will generally pass curthread, but
416 * in the TCP input path, NULL will be passed to indicate that no
417 * appropriate thread resource limits are available. In that case,
418 * we don't apply a process limit.
423 PROC_LOCK(td->td_proc);
424 sbsize_limit = lim_cur(td->td_proc, RLIMIT_SBSIZE);
425 PROC_UNLOCK(td->td_proc);
427 sbsize_limit = RLIM_INFINITY;
428 if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc,
431 sb->sb_mbmax = min(cc * sb_efficiency, sb_max);
432 if (sb->sb_lowat > sb->sb_hiwat)
433 sb->sb_lowat = sb->sb_hiwat;
438 sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
444 error = sbreserve_locked(sb, cc, so, td);
450 * Free mbufs held by a socket, and reserved mbuf space.
453 sbrelease_internal(struct sockbuf *sb, struct socket *so)
456 sbflush_internal(sb);
457 (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0,
463 sbrelease_locked(struct sockbuf *sb, struct socket *so)
466 SOCKBUF_LOCK_ASSERT(sb);
468 sbrelease_internal(sb, so);
472 sbrelease(struct sockbuf *sb, struct socket *so)
476 sbrelease_locked(sb, so);
481 sbdestroy(struct sockbuf *sb, struct socket *so)
484 sbrelease_internal(sb, so);
488 * Routines to add and remove data from an mbuf queue.
490 * The routines sbappend() or sbappendrecord() are normally called to append
491 * new mbufs to a socket buffer, after checking that adequate space is
492 * available, comparing the function sbspace() with the amount of data to be
493 * added. sbappendrecord() differs from sbappend() in that data supplied is
494 * treated as the beginning of a new record. To place a sender's address,
495 * optional access rights, and data in a socket receive buffer,
496 * sbappendaddr() should be used. To place access rights and data in a
497 * socket receive buffer, sbappendrights() should be used. In either case,
498 * the new data begins a new record. Note that unlike sbappend() and
499 * sbappendrecord(), these routines check for the caller that there will be
500 * enough space to store the data. Each fails if there is not enough space,
501 * or if it cannot find mbufs to store additional information in.
503 * Reliable protocols may use the socket send buffer to hold data awaiting
504 * acknowledgement. Data is normally copied from a socket send buffer in a
505 * protocol with m_copy for output to a peer, and then removing the data from
506 * the socket buffer with sbdrop() or sbdroprecord() when the data is
507 * acknowledged by the peer.
511 sblastrecordchk(struct sockbuf *sb, const char *file, int line)
513 struct mbuf *m = sb->sb_mb;
515 SOCKBUF_LOCK_ASSERT(sb);
517 while (m && m->m_nextpkt)
520 if (m != sb->sb_lastrecord) {
521 printf("%s: sb_mb %p sb_lastrecord %p last %p\n",
522 __func__, sb->sb_mb, sb->sb_lastrecord, m);
523 printf("packet chain:\n");
524 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt)
526 panic("%s from %s:%u", __func__, file, line);
531 sblastmbufchk(struct sockbuf *sb, const char *file, int line)
533 struct mbuf *m = sb->sb_mb;
536 SOCKBUF_LOCK_ASSERT(sb);
538 while (m && m->m_nextpkt)
541 while (m && m->m_next)
544 if (m != sb->sb_mbtail) {
545 printf("%s: sb_mb %p sb_mbtail %p last %p\n",
546 __func__, sb->sb_mb, sb->sb_mbtail, m);
547 printf("packet tree:\n");
548 for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) {
550 for (n = m; n != NULL; n = n->m_next)
554 panic("%s from %s:%u", __func__, file, line);
557 #endif /* SOCKBUF_DEBUG */
559 #define SBLINKRECORD(sb, m0) do { \
560 SOCKBUF_LOCK_ASSERT(sb); \
561 if ((sb)->sb_lastrecord != NULL) \
562 (sb)->sb_lastrecord->m_nextpkt = (m0); \
564 (sb)->sb_mb = (m0); \
565 (sb)->sb_lastrecord = (m0); \
566 } while (/*CONSTCOND*/0)
569 * Append mbuf chain m to the last record in the socket buffer sb. The
570 * additional space associated the mbuf chain is recorded in sb. Empty mbufs
571 * are discarded and mbufs are compacted where possible.
574 sbappend_locked(struct sockbuf *sb, struct mbuf *m)
578 SOCKBUF_LOCK_ASSERT(sb);
589 if (n->m_flags & M_EOR) {
590 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
593 } while (n->m_next && (n = n->m_next));
596 * XXX Would like to simply use sb_mbtail here, but
597 * XXX I need to verify that I won't miss an EOR that
600 if ((n = sb->sb_lastrecord) != NULL) {
602 if (n->m_flags & M_EOR) {
603 sbappendrecord_locked(sb, m); /* XXXXXX!!!! */
606 } while (n->m_next && (n = n->m_next));
609 * If this is the first record in the socket buffer,
610 * it's also the last record.
612 sb->sb_lastrecord = m;
615 sbcompress(sb, m, n);
620 * Append mbuf chain m to the last record in the socket buffer sb. The
621 * additional space associated the mbuf chain is recorded in sb. Empty mbufs
622 * are discarded and mbufs are compacted where possible.
625 sbappend(struct sockbuf *sb, struct mbuf *m)
629 sbappend_locked(sb, m);
634 * This version of sbappend() should only be used when the caller absolutely
635 * knows that there will never be more than one record in the socket buffer,
636 * that is, a stream protocol (such as TCP).
639 sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
641 SOCKBUF_LOCK_ASSERT(sb);
643 KASSERT(m->m_nextpkt == NULL,("sbappendstream 0"));
644 KASSERT(sb->sb_mb == sb->sb_lastrecord,("sbappendstream 1"));
648 /* Remove all packet headers and mbuf tags to get a pure data chain. */
649 m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);
651 sbcompress(sb, m, sb->sb_mbtail);
653 sb->sb_lastrecord = sb->sb_mb;
658 * This version of sbappend() should only be used when the caller absolutely
659 * knows that there will never be more than one record in the socket buffer,
660 * that is, a stream protocol (such as TCP).
663 sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags)
667 sbappendstream_locked(sb, m, flags);
673 sbcheck(struct sockbuf *sb, const char *file, int line)
675 struct mbuf *m, *n, *fnrdy;
676 u_long acc, ccc, mbcnt;
678 SOCKBUF_LOCK_ASSERT(sb);
680 acc = ccc = mbcnt = 0;
683 for (m = sb->sb_mb; m; m = n) {
685 for (; m; m = m->m_next) {
687 printf("sb %p empty mbuf %p\n", sb, m);
690 if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) {
691 if (m != sb->sb_fnrdy) {
692 printf("sb %p: fnrdy %p != m %p\n",
693 sb, sb->sb_fnrdy, m);
699 if (!(m->m_flags & M_NOTAVAIL)) {
700 printf("sb %p: fnrdy %p, m %p is avail\n",
701 sb, sb->sb_fnrdy, m);
708 if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
709 mbcnt += m->m_ext.ext_size;
712 if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) {
713 printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n",
714 acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt);
719 panic("%s from %s:%u", __func__, file, line);
724 * As above, except the mbuf chain begins a new record.
727 sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0)
731 SOCKBUF_LOCK_ASSERT(sb);
737 * Put the first mbuf on the queue. Note this permits zero length
742 SBLINKRECORD(sb, m0);
746 if (m && (m0->m_flags & M_EOR)) {
747 m0->m_flags &= ~M_EOR;
750 /* always call sbcompress() so it can do SBLASTMBUFCHK() */
751 sbcompress(sb, m, m0);
755 * As above, except the mbuf chain begins a new record.
758 sbappendrecord(struct sockbuf *sb, struct mbuf *m0)
762 sbappendrecord_locked(sb, m0);
766 /* Helper routine that appends data, control, and address to a sockbuf. */
768 sbappendaddr_locked_internal(struct sockbuf *sb, const struct sockaddr *asa,
769 struct mbuf *m0, struct mbuf *control, struct mbuf *ctrl_last)
771 struct mbuf *m, *n, *nlast;
773 if (asa->sa_len > MLEN)
776 m = m_get(M_NOWAIT, MT_SONAME);
779 m->m_len = asa->sa_len;
780 bcopy(asa, mtod(m, caddr_t), asa->sa_len);
784 ctrl_last->m_next = m0; /* concatenate data to control */
788 for (n = m; n->m_next != NULL; n = n->m_next)
794 sb->sb_mbtail = nlast;
802 * Append address and data, and optionally, control (ancillary) data to the
803 * receive queue of a socket. If present, m0 must include a packet header
804 * with total length. Returns 0 if no space in sockbuf or insufficient
808 sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
809 struct mbuf *m0, struct mbuf *control)
811 struct mbuf *ctrl_last;
812 int space = asa->sa_len;
814 SOCKBUF_LOCK_ASSERT(sb);
816 if (m0 && (m0->m_flags & M_PKTHDR) == 0)
817 panic("sbappendaddr_locked");
819 space += m0->m_pkthdr.len;
820 space += m_length(control, &ctrl_last);
822 if (space > sbspace(sb))
824 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
828 * Append address and data, and optionally, control (ancillary) data to the
829 * receive queue of a socket. If present, m0 must include a packet header
830 * with total length. Returns 0 if insufficient mbufs. Does not validate space
831 * on the receiving sockbuf.
834 sbappendaddr_nospacecheck_locked(struct sockbuf *sb, const struct sockaddr *asa,
835 struct mbuf *m0, struct mbuf *control)
837 struct mbuf *ctrl_last;
839 SOCKBUF_LOCK_ASSERT(sb);
841 ctrl_last = (control == NULL) ? NULL : m_last(control);
842 return (sbappendaddr_locked_internal(sb, asa, m0, control, ctrl_last));
846 * Append address and data, and optionally, control (ancillary) data to the
847 * receive queue of a socket. If present, m0 must include a packet header
848 * with total length. Returns 0 if no space in sockbuf or insufficient
852 sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
853 struct mbuf *m0, struct mbuf *control)
858 retval = sbappendaddr_locked(sb, asa, m0, control);
864 sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
865 struct mbuf *control)
867 struct mbuf *m, *n, *mlast;
870 SOCKBUF_LOCK_ASSERT(sb);
873 panic("sbappendcontrol_locked");
874 space = m_length(control, &n) + m_length(m0, NULL);
876 if (space > sbspace(sb))
879 n->m_next = m0; /* concatenate data to control */
883 for (m = control; m->m_next; m = m->m_next)
887 SBLINKRECORD(sb, control);
889 sb->sb_mbtail = mlast;
897 sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
902 retval = sbappendcontrol_locked(sb, m0, control);
908 * Append the data in mbuf chain (m) into the socket buffer sb following mbuf
909 * (n). If (n) is NULL, the buffer is presumed empty.
911 * When the data is compressed, mbufs in the chain may be handled in one of
914 * (1) The mbuf may simply be dropped, if it contributes nothing (no data, no
915 * record boundary, and no change in data type).
917 * (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
918 * an mbuf already in the socket buffer. This can occur if an
919 * appropriate mbuf exists, there is room, both mbufs are not marked as
920 * not ready, and no merging of data types will occur.
922 * (3) The mbuf may be appended to the end of the existing mbuf chain.
924 * If any of the new mbufs is marked as M_EOR, mark the last mbuf appended as
928 sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
933 SOCKBUF_LOCK_ASSERT(sb);
936 eor |= m->m_flags & M_EOR;
939 (((o = m->m_next) || (o = n)) &&
940 o->m_type == m->m_type))) {
941 if (sb->sb_lastrecord == m)
942 sb->sb_lastrecord = m->m_next;
946 if (n && (n->m_flags & M_EOR) == 0 &&
948 ((sb->sb_flags & SB_NOCOALESCE) == 0) &&
949 !(m->m_flags & M_NOTREADY) &&
950 !(n->m_flags & M_NOTREADY) &&
951 m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
952 m->m_len <= M_TRAILINGSPACE(n) &&
953 n->m_type == m->m_type) {
954 bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
956 n->m_len += m->m_len;
957 sb->sb_ccc += m->m_len;
958 if (sb->sb_fnrdy == NULL)
959 sb->sb_acc += m->m_len;
960 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
961 /* XXX: Probably don't need.*/
962 sb->sb_ctl += m->m_len;
973 m->m_flags &= ~M_EOR;
978 KASSERT(n != NULL, ("sbcompress: eor && n == NULL"));
985 * Free all mbufs in a sockbuf. Check that all resources are reclaimed.
988 sbflush_internal(struct sockbuf *sb)
991 while (sb->sb_mbcnt) {
993 * Don't call sbcut(sb, 0) if the leading mbuf is non-empty:
994 * we would loop forever. Panic instead.
996 if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len))
998 m_freem(sbcut_internal(sb, (int)sb->sb_ccc));
1000 KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
1001 ("%s: ccc %u mb %p mbcnt %u", __func__,
1002 sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
1006 sbflush_locked(struct sockbuf *sb)
1009 SOCKBUF_LOCK_ASSERT(sb);
1010 sbflush_internal(sb);
1014 sbflush(struct sockbuf *sb)
1023 * Cut data from (the front of) a sockbuf.
1025 static struct mbuf *
1026 sbcut_internal(struct sockbuf *sb, int len)
1028 struct mbuf *m, *next, *mfree;
1030 next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
1035 KASSERT(next, ("%s: no next, len %d", __func__, len));
1037 next = m->m_nextpkt;
1039 if (m->m_len > len) {
1040 KASSERT(!(m->m_flags & M_NOTAVAIL),
1041 ("%s: m %p M_NOTAVAIL", __func__, m));
1046 if (sb->sb_sndptroff != 0)
1047 sb->sb_sndptroff -= len;
1048 if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
1055 * Do not put M_NOTREADY buffers to the free list, they
1056 * are referenced from outside.
1058 if (m->m_flags & M_NOTREADY)
1070 * Free any zero-length mbufs from the buffer.
1071 * For SOCK_DGRAM sockets such mbufs represent empty records.
1072 * XXX: For SOCK_STREAM sockets such mbufs can appear in the buffer,
1073 * when sosend_generic() needs to send only control data.
1075 while (m && m->m_len == 0) {
1086 m->m_nextpkt = next;
1090 * First part is an inline SB_EMPTY_FIXUP(). Second part makes sure
1091 * sb_lastrecord is up-to-date if we dropped part of the last record.
1095 sb->sb_mbtail = NULL;
1096 sb->sb_lastrecord = NULL;
1097 } else if (m->m_nextpkt == NULL) {
1098 sb->sb_lastrecord = m;
1105 * Drop data from (the front of) a sockbuf.
1108 sbdrop_locked(struct sockbuf *sb, int len)
1111 SOCKBUF_LOCK_ASSERT(sb);
1112 m_freem(sbcut_internal(sb, len));
1116 * Drop data from (the front of) a sockbuf,
1117 * and return it to caller.
1120 sbcut_locked(struct sockbuf *sb, int len)
1123 SOCKBUF_LOCK_ASSERT(sb);
1124 return (sbcut_internal(sb, len));
1128 sbdrop(struct sockbuf *sb, int len)
1133 mfree = sbcut_internal(sb, len);
1140 * Maintain a pointer and offset pair into the socket buffer mbuf chain to
1141 * avoid traversal of the entire socket buffer for larger offsets.
1144 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
1146 struct mbuf *m, *ret;
1148 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1149 KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__));
1150 KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__));
1153 * Is off below stored offset? Happens on retransmits.
1154 * Just return, we can't help here.
1156 if (sb->sb_sndptroff > off) {
1161 /* Return closest mbuf in chain for current offset. */
1162 *moff = off - sb->sb_sndptroff;
1163 m = ret = sb->sb_sndptr ? sb->sb_sndptr : sb->sb_mb;
1164 if (*moff == m->m_len) {
1166 sb->sb_sndptroff += m->m_len;
1167 m = ret = m->m_next;
1168 KASSERT(ret->m_len > 0,
1169 ("mbuf %p in sockbuf %p chain has no valid data", ret, sb));
1172 /* Advance by len to be as close as possible for the next transmit. */
1173 for (off = off - sb->sb_sndptroff + len - 1;
1174 off > 0 && m != NULL && off >= m->m_len;
1176 sb->sb_sndptroff += m->m_len;
1179 if (off > 0 && m == NULL)
1180 panic("%s: sockbuf %p and mbuf %p clashing", __func__, sb, ret);
1187 * Return the first mbuf and the mbuf data offset for the provided
1188 * send offset without changing the "sb_sndptroff" field.
1191 sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff)
1195 KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
1198 * If the "off" is below the stored offset, which happens on
1199 * retransmits, just use "sb_mb":
1201 if (sb->sb_sndptr == NULL || sb->sb_sndptroff > off) {
1205 off -= sb->sb_sndptroff;
1207 while (off > 0 && m != NULL) {
1218 * Drop a record off the front of a sockbuf and move the next record to the
1222 sbdroprecord_locked(struct sockbuf *sb)
1226 SOCKBUF_LOCK_ASSERT(sb);
1230 sb->sb_mb = m->m_nextpkt;
1240 * Drop a record off the front of a sockbuf and move the next record to the
1244 sbdroprecord(struct sockbuf *sb)
1248 sbdroprecord_locked(sb);
1253 * Create a "control" mbuf containing the specified data with the specified
1254 * type for presentation on a socket buffer.
1257 sbcreatecontrol(caddr_t p, int size, int type, int level)
1262 if (CMSG_SPACE((u_int)size) > MCLBYTES)
1263 return ((struct mbuf *) NULL);
1264 if (CMSG_SPACE((u_int)size) > MLEN)
1265 m = m_getcl(M_NOWAIT, MT_CONTROL, 0);
1267 m = m_get(M_NOWAIT, MT_CONTROL);
1269 return ((struct mbuf *) NULL);
1270 cp = mtod(m, struct cmsghdr *);
1272 KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
1273 ("sbcreatecontrol: short mbuf"));
1275 * Don't leave the padding between the msg header and the
1276 * cmsg data and the padding after the cmsg data un-initialized.
1278 bzero(cp, CMSG_SPACE((u_int)size));
1280 (void)memcpy(CMSG_DATA(cp), p, size);
1281 m->m_len = CMSG_SPACE(size);
1282 cp->cmsg_len = CMSG_LEN(size);
1283 cp->cmsg_level = level;
1284 cp->cmsg_type = type;
1289 * This does the same for socket buffers that sotoxsocket does for sockets:
1290 * generate an user-format data structure describing the socket buffer. Note
1291 * that the xsockbuf structure, since it is always embedded in a socket, does
1292 * not include a self pointer nor a length. We make this entry point public
1293 * in case some other mechanism needs it.
1296 sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
1299 xsb->sb_cc = sb->sb_ccc;
1300 xsb->sb_hiwat = sb->sb_hiwat;
1301 xsb->sb_mbcnt = sb->sb_mbcnt;
1302 xsb->sb_mcnt = sb->sb_mcnt;
1303 xsb->sb_ccnt = sb->sb_ccnt;
1304 xsb->sb_mbmax = sb->sb_mbmax;
1305 xsb->sb_lowat = sb->sb_lowat;
1306 xsb->sb_flags = sb->sb_flags;
1307 xsb->sb_timeo = sb->sb_timeo;
1310 /* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
1312 SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
1313 SYSCTL_OID(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_ULONG|CTLFLAG_RW,
1314 &sb_max, 0, sysctl_handle_sb_max, "LU", "Maximum socket buffer size");
1315 SYSCTL_ULONG(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
1316 &sb_efficiency, 0, "Socket buffer size waste factor");