2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_indata.h>
52 #include <netinet/sctp_bsd_addr.h>
53 #include <netinet/sctp_input.h>
54 #include <netinet/sctp_crc32.h>
55 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <machine/in_cksum.h>
61 #define SCTP_MAX_GAPS_INARRAY 4
63 uint8_t right_edge; /* mergable on the right edge */
64 uint8_t left_edge; /* mergable on the left edge */
67 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
70 struct sack_track sack_array[256] = {
71 {0, 0, 0, 0, /* 0x00 */
78 {1, 0, 1, 0, /* 0x01 */
85 {0, 0, 1, 0, /* 0x02 */
92 {1, 0, 1, 0, /* 0x03 */
99 {0, 0, 1, 0, /* 0x04 */
106 {1, 0, 2, 0, /* 0x05 */
113 {0, 0, 1, 0, /* 0x06 */
120 {1, 0, 1, 0, /* 0x07 */
127 {0, 0, 1, 0, /* 0x08 */
134 {1, 0, 2, 0, /* 0x09 */
141 {0, 0, 2, 0, /* 0x0a */
148 {1, 0, 2, 0, /* 0x0b */
155 {0, 0, 1, 0, /* 0x0c */
162 {1, 0, 2, 0, /* 0x0d */
169 {0, 0, 1, 0, /* 0x0e */
176 {1, 0, 1, 0, /* 0x0f */
183 {0, 0, 1, 0, /* 0x10 */
190 {1, 0, 2, 0, /* 0x11 */
197 {0, 0, 2, 0, /* 0x12 */
204 {1, 0, 2, 0, /* 0x13 */
211 {0, 0, 2, 0, /* 0x14 */
218 {1, 0, 3, 0, /* 0x15 */
225 {0, 0, 2, 0, /* 0x16 */
232 {1, 0, 2, 0, /* 0x17 */
239 {0, 0, 1, 0, /* 0x18 */
246 {1, 0, 2, 0, /* 0x19 */
253 {0, 0, 2, 0, /* 0x1a */
260 {1, 0, 2, 0, /* 0x1b */
267 {0, 0, 1, 0, /* 0x1c */
274 {1, 0, 2, 0, /* 0x1d */
281 {0, 0, 1, 0, /* 0x1e */
288 {1, 0, 1, 0, /* 0x1f */
295 {0, 0, 1, 0, /* 0x20 */
302 {1, 0, 2, 0, /* 0x21 */
309 {0, 0, 2, 0, /* 0x22 */
316 {1, 0, 2, 0, /* 0x23 */
323 {0, 0, 2, 0, /* 0x24 */
330 {1, 0, 3, 0, /* 0x25 */
337 {0, 0, 2, 0, /* 0x26 */
344 {1, 0, 2, 0, /* 0x27 */
351 {0, 0, 2, 0, /* 0x28 */
358 {1, 0, 3, 0, /* 0x29 */
365 {0, 0, 3, 0, /* 0x2a */
372 {1, 0, 3, 0, /* 0x2b */
379 {0, 0, 2, 0, /* 0x2c */
386 {1, 0, 3, 0, /* 0x2d */
393 {0, 0, 2, 0, /* 0x2e */
400 {1, 0, 2, 0, /* 0x2f */
407 {0, 0, 1, 0, /* 0x30 */
414 {1, 0, 2, 0, /* 0x31 */
421 {0, 0, 2, 0, /* 0x32 */
428 {1, 0, 2, 0, /* 0x33 */
435 {0, 0, 2, 0, /* 0x34 */
442 {1, 0, 3, 0, /* 0x35 */
449 {0, 0, 2, 0, /* 0x36 */
456 {1, 0, 2, 0, /* 0x37 */
463 {0, 0, 1, 0, /* 0x38 */
470 {1, 0, 2, 0, /* 0x39 */
477 {0, 0, 2, 0, /* 0x3a */
484 {1, 0, 2, 0, /* 0x3b */
491 {0, 0, 1, 0, /* 0x3c */
498 {1, 0, 2, 0, /* 0x3d */
505 {0, 0, 1, 0, /* 0x3e */
512 {1, 0, 1, 0, /* 0x3f */
519 {0, 0, 1, 0, /* 0x40 */
526 {1, 0, 2, 0, /* 0x41 */
533 {0, 0, 2, 0, /* 0x42 */
540 {1, 0, 2, 0, /* 0x43 */
547 {0, 0, 2, 0, /* 0x44 */
554 {1, 0, 3, 0, /* 0x45 */
561 {0, 0, 2, 0, /* 0x46 */
568 {1, 0, 2, 0, /* 0x47 */
575 {0, 0, 2, 0, /* 0x48 */
582 {1, 0, 3, 0, /* 0x49 */
589 {0, 0, 3, 0, /* 0x4a */
596 {1, 0, 3, 0, /* 0x4b */
603 {0, 0, 2, 0, /* 0x4c */
610 {1, 0, 3, 0, /* 0x4d */
617 {0, 0, 2, 0, /* 0x4e */
624 {1, 0, 2, 0, /* 0x4f */
631 {0, 0, 2, 0, /* 0x50 */
638 {1, 0, 3, 0, /* 0x51 */
645 {0, 0, 3, 0, /* 0x52 */
652 {1, 0, 3, 0, /* 0x53 */
659 {0, 0, 3, 0, /* 0x54 */
666 {1, 0, 4, 0, /* 0x55 */
673 {0, 0, 3, 0, /* 0x56 */
680 {1, 0, 3, 0, /* 0x57 */
687 {0, 0, 2, 0, /* 0x58 */
694 {1, 0, 3, 0, /* 0x59 */
701 {0, 0, 3, 0, /* 0x5a */
708 {1, 0, 3, 0, /* 0x5b */
715 {0, 0, 2, 0, /* 0x5c */
722 {1, 0, 3, 0, /* 0x5d */
729 {0, 0, 2, 0, /* 0x5e */
736 {1, 0, 2, 0, /* 0x5f */
743 {0, 0, 1, 0, /* 0x60 */
750 {1, 0, 2, 0, /* 0x61 */
757 {0, 0, 2, 0, /* 0x62 */
764 {1, 0, 2, 0, /* 0x63 */
771 {0, 0, 2, 0, /* 0x64 */
778 {1, 0, 3, 0, /* 0x65 */
785 {0, 0, 2, 0, /* 0x66 */
792 {1, 0, 2, 0, /* 0x67 */
799 {0, 0, 2, 0, /* 0x68 */
806 {1, 0, 3, 0, /* 0x69 */
813 {0, 0, 3, 0, /* 0x6a */
820 {1, 0, 3, 0, /* 0x6b */
827 {0, 0, 2, 0, /* 0x6c */
834 {1, 0, 3, 0, /* 0x6d */
841 {0, 0, 2, 0, /* 0x6e */
848 {1, 0, 2, 0, /* 0x6f */
855 {0, 0, 1, 0, /* 0x70 */
862 {1, 0, 2, 0, /* 0x71 */
869 {0, 0, 2, 0, /* 0x72 */
876 {1, 0, 2, 0, /* 0x73 */
883 {0, 0, 2, 0, /* 0x74 */
890 {1, 0, 3, 0, /* 0x75 */
897 {0, 0, 2, 0, /* 0x76 */
904 {1, 0, 2, 0, /* 0x77 */
911 {0, 0, 1, 0, /* 0x78 */
918 {1, 0, 2, 0, /* 0x79 */
925 {0, 0, 2, 0, /* 0x7a */
932 {1, 0, 2, 0, /* 0x7b */
939 {0, 0, 1, 0, /* 0x7c */
946 {1, 0, 2, 0, /* 0x7d */
953 {0, 0, 1, 0, /* 0x7e */
960 {1, 0, 1, 0, /* 0x7f */
967 {0, 1, 1, 0, /* 0x80 */
974 {1, 1, 2, 0, /* 0x81 */
981 {0, 1, 2, 0, /* 0x82 */
988 {1, 1, 2, 0, /* 0x83 */
995 {0, 1, 2, 0, /* 0x84 */
1002 {1, 1, 3, 0, /* 0x85 */
1009 {0, 1, 2, 0, /* 0x86 */
1016 {1, 1, 2, 0, /* 0x87 */
1023 {0, 1, 2, 0, /* 0x88 */
1030 {1, 1, 3, 0, /* 0x89 */
1037 {0, 1, 3, 0, /* 0x8a */
1044 {1, 1, 3, 0, /* 0x8b */
1051 {0, 1, 2, 0, /* 0x8c */
1058 {1, 1, 3, 0, /* 0x8d */
1065 {0, 1, 2, 0, /* 0x8e */
1072 {1, 1, 2, 0, /* 0x8f */
1079 {0, 1, 2, 0, /* 0x90 */
1086 {1, 1, 3, 0, /* 0x91 */
1093 {0, 1, 3, 0, /* 0x92 */
1100 {1, 1, 3, 0, /* 0x93 */
1107 {0, 1, 3, 0, /* 0x94 */
1114 {1, 1, 4, 0, /* 0x95 */
1121 {0, 1, 3, 0, /* 0x96 */
1128 {1, 1, 3, 0, /* 0x97 */
1135 {0, 1, 2, 0, /* 0x98 */
1142 {1, 1, 3, 0, /* 0x99 */
1149 {0, 1, 3, 0, /* 0x9a */
1156 {1, 1, 3, 0, /* 0x9b */
1163 {0, 1, 2, 0, /* 0x9c */
1170 {1, 1, 3, 0, /* 0x9d */
1177 {0, 1, 2, 0, /* 0x9e */
1184 {1, 1, 2, 0, /* 0x9f */
1191 {0, 1, 2, 0, /* 0xa0 */
1198 {1, 1, 3, 0, /* 0xa1 */
1205 {0, 1, 3, 0, /* 0xa2 */
1212 {1, 1, 3, 0, /* 0xa3 */
1219 {0, 1, 3, 0, /* 0xa4 */
1226 {1, 1, 4, 0, /* 0xa5 */
1233 {0, 1, 3, 0, /* 0xa6 */
1240 {1, 1, 3, 0, /* 0xa7 */
1247 {0, 1, 3, 0, /* 0xa8 */
1254 {1, 1, 4, 0, /* 0xa9 */
1261 {0, 1, 4, 0, /* 0xaa */
1268 {1, 1, 4, 0, /* 0xab */
1275 {0, 1, 3, 0, /* 0xac */
1282 {1, 1, 4, 0, /* 0xad */
1289 {0, 1, 3, 0, /* 0xae */
1296 {1, 1, 3, 0, /* 0xaf */
1303 {0, 1, 2, 0, /* 0xb0 */
1310 {1, 1, 3, 0, /* 0xb1 */
1317 {0, 1, 3, 0, /* 0xb2 */
1324 {1, 1, 3, 0, /* 0xb3 */
1331 {0, 1, 3, 0, /* 0xb4 */
1338 {1, 1, 4, 0, /* 0xb5 */
1345 {0, 1, 3, 0, /* 0xb6 */
1352 {1, 1, 3, 0, /* 0xb7 */
1359 {0, 1, 2, 0, /* 0xb8 */
1366 {1, 1, 3, 0, /* 0xb9 */
1373 {0, 1, 3, 0, /* 0xba */
1380 {1, 1, 3, 0, /* 0xbb */
1387 {0, 1, 2, 0, /* 0xbc */
1394 {1, 1, 3, 0, /* 0xbd */
1401 {0, 1, 2, 0, /* 0xbe */
1408 {1, 1, 2, 0, /* 0xbf */
1415 {0, 1, 1, 0, /* 0xc0 */
1422 {1, 1, 2, 0, /* 0xc1 */
1429 {0, 1, 2, 0, /* 0xc2 */
1436 {1, 1, 2, 0, /* 0xc3 */
1443 {0, 1, 2, 0, /* 0xc4 */
1450 {1, 1, 3, 0, /* 0xc5 */
1457 {0, 1, 2, 0, /* 0xc6 */
1464 {1, 1, 2, 0, /* 0xc7 */
1471 {0, 1, 2, 0, /* 0xc8 */
1478 {1, 1, 3, 0, /* 0xc9 */
1485 {0, 1, 3, 0, /* 0xca */
1492 {1, 1, 3, 0, /* 0xcb */
1499 {0, 1, 2, 0, /* 0xcc */
1506 {1, 1, 3, 0, /* 0xcd */
1513 {0, 1, 2, 0, /* 0xce */
1520 {1, 1, 2, 0, /* 0xcf */
1527 {0, 1, 2, 0, /* 0xd0 */
1534 {1, 1, 3, 0, /* 0xd1 */
1541 {0, 1, 3, 0, /* 0xd2 */
1548 {1, 1, 3, 0, /* 0xd3 */
1555 {0, 1, 3, 0, /* 0xd4 */
1562 {1, 1, 4, 0, /* 0xd5 */
1569 {0, 1, 3, 0, /* 0xd6 */
1576 {1, 1, 3, 0, /* 0xd7 */
1583 {0, 1, 2, 0, /* 0xd8 */
1590 {1, 1, 3, 0, /* 0xd9 */
1597 {0, 1, 3, 0, /* 0xda */
1604 {1, 1, 3, 0, /* 0xdb */
1611 {0, 1, 2, 0, /* 0xdc */
1618 {1, 1, 3, 0, /* 0xdd */
1625 {0, 1, 2, 0, /* 0xde */
1632 {1, 1, 2, 0, /* 0xdf */
1639 {0, 1, 1, 0, /* 0xe0 */
1646 {1, 1, 2, 0, /* 0xe1 */
1653 {0, 1, 2, 0, /* 0xe2 */
1660 {1, 1, 2, 0, /* 0xe3 */
1667 {0, 1, 2, 0, /* 0xe4 */
1674 {1, 1, 3, 0, /* 0xe5 */
1681 {0, 1, 2, 0, /* 0xe6 */
1688 {1, 1, 2, 0, /* 0xe7 */
1695 {0, 1, 2, 0, /* 0xe8 */
1702 {1, 1, 3, 0, /* 0xe9 */
1709 {0, 1, 3, 0, /* 0xea */
1716 {1, 1, 3, 0, /* 0xeb */
1723 {0, 1, 2, 0, /* 0xec */
1730 {1, 1, 3, 0, /* 0xed */
1737 {0, 1, 2, 0, /* 0xee */
1744 {1, 1, 2, 0, /* 0xef */
1751 {0, 1, 1, 0, /* 0xf0 */
1758 {1, 1, 2, 0, /* 0xf1 */
1765 {0, 1, 2, 0, /* 0xf2 */
1772 {1, 1, 2, 0, /* 0xf3 */
1779 {0, 1, 2, 0, /* 0xf4 */
1786 {1, 1, 3, 0, /* 0xf5 */
1793 {0, 1, 2, 0, /* 0xf6 */
1800 {1, 1, 2, 0, /* 0xf7 */
1807 {0, 1, 1, 0, /* 0xf8 */
1814 {1, 1, 2, 0, /* 0xf9 */
1821 {0, 1, 2, 0, /* 0xfa */
1828 {1, 1, 2, 0, /* 0xfb */
1835 {0, 1, 1, 0, /* 0xfc */
1842 {1, 1, 2, 0, /* 0xfd */
1849 {0, 1, 1, 0, /* 0xfe */
1856 {1, 1, 1, 0, /* 0xff */
1867 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1868 int ipv4_addr_legal,
1869 int ipv6_addr_legal,
1871 int ipv4_local_scope,
1872 int local_scope SCTP_UNUSED,/* XXX */
1876 if ((loopback_scope == 0) &&
1877 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1879 * skip loopback if not in scope *
1883 switch (ifa->address.sa.sa_family) {
1886 if (ipv4_addr_legal) {
1887 struct sockaddr_in *sin;
1889 sin = (struct sockaddr_in *)&ifa->address.sin;
1890 if (sin->sin_addr.s_addr == 0) {
1891 /* not in scope , unspecified */
1894 if ((ipv4_local_scope == 0) &&
1895 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1896 /* private address not in scope */
1906 if (ipv6_addr_legal) {
1907 struct sockaddr_in6 *sin6;
1910 * Must update the flags, bummer, which means any
1911 * IFA locks must now be applied HERE <->
1914 sctp_gather_internal_ifa_flags(ifa);
1916 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1919 /* ok to use deprecated addresses? */
1920 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1921 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1922 /* skip unspecifed addresses */
1925 if ( /* (local_scope == 0) && */
1926 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1929 if ((site_scope == 0) &&
1930 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1944 static struct mbuf *
1945 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1947 struct sctp_paramhdr *parmh;
1951 switch (ifa->address.sa.sa_family) {
1954 len = sizeof(struct sctp_ipv4addr_param);
1959 len = sizeof(struct sctp_ipv6addr_param);
1965 if (M_TRAILINGSPACE(m) >= len) {
1966 /* easy side we just drop it on the end */
1967 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1970 /* Need more space */
1972 while (SCTP_BUF_NEXT(mret) != NULL) {
1973 mret = SCTP_BUF_NEXT(mret);
1975 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1976 if (SCTP_BUF_NEXT(mret) == NULL) {
1977 /* We are hosed, can't add more addresses */
1980 mret = SCTP_BUF_NEXT(mret);
1981 parmh = mtod(mret, struct sctp_paramhdr *);
1983 /* now add the parameter */
1984 switch (ifa->address.sa.sa_family) {
1988 struct sctp_ipv4addr_param *ipv4p;
1989 struct sockaddr_in *sin;
1991 sin = (struct sockaddr_in *)&ifa->address.sin;
1992 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1993 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1994 parmh->param_length = htons(len);
1995 ipv4p->addr = sin->sin_addr.s_addr;
1996 SCTP_BUF_LEN(mret) += len;
2003 struct sctp_ipv6addr_param *ipv6p;
2004 struct sockaddr_in6 *sin6;
2006 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2007 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2008 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2009 parmh->param_length = htons(len);
2010 memcpy(ipv6p->addr, &sin6->sin6_addr,
2011 sizeof(ipv6p->addr));
2012 /* clear embedded scope in the address */
2013 in6_clearscope((struct in6_addr *)ipv6p->addr);
2014 SCTP_BUF_LEN(mret) += len;
2026 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2027 struct sctp_scoping *scope,
2028 struct mbuf *m_at, int cnt_inits_to)
2030 struct sctp_vrf *vrf = NULL;
2031 int cnt, limit_out = 0, total_count;
2034 vrf_id = inp->def_vrf_id;
2035 SCTP_IPI_ADDR_RLOCK();
2036 vrf = sctp_find_vrf(vrf_id);
2038 SCTP_IPI_ADDR_RUNLOCK();
2041 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2042 struct sctp_ifa *sctp_ifap;
2043 struct sctp_ifn *sctp_ifnp;
2046 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2048 cnt = SCTP_ADDRESS_LIMIT;
2051 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2052 if ((scope->loopback_scope == 0) &&
2053 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2055 * Skip loopback devices if loopback_scope
2060 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2061 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2064 if (sctp_is_address_in_scope(sctp_ifap,
2065 scope->ipv4_addr_legal,
2066 scope->ipv6_addr_legal,
2067 scope->loopback_scope,
2068 scope->ipv4_local_scope,
2070 scope->site_scope, 1) == 0) {
2074 if (cnt > SCTP_ADDRESS_LIMIT) {
2078 if (cnt > SCTP_ADDRESS_LIMIT) {
2085 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2087 if ((scope->loopback_scope == 0) &&
2088 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2090 * Skip loopback devices if
2091 * loopback_scope not set
2095 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2096 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2099 if (sctp_is_address_in_scope(sctp_ifap,
2100 scope->ipv4_addr_legal,
2101 scope->ipv6_addr_legal,
2102 scope->loopback_scope,
2103 scope->ipv4_local_scope,
2105 scope->site_scope, 0) == 0) {
2108 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2119 if (total_count > SCTP_ADDRESS_LIMIT) {
2120 /* No more addresses */
2128 struct sctp_laddr *laddr;
2131 /* First, how many ? */
2132 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2133 if (laddr->ifa == NULL) {
2136 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2138 * Address being deleted by the system, dont
2142 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2144 * Address being deleted on this ep don't
2149 if (sctp_is_address_in_scope(laddr->ifa,
2150 scope->ipv4_addr_legal,
2151 scope->ipv6_addr_legal,
2152 scope->loopback_scope,
2153 scope->ipv4_local_scope,
2155 scope->site_scope, 1) == 0) {
2161 * To get through a NAT we only list addresses if we have
2162 * more than one. That way if you just bind a single address
2163 * we let the source of the init dictate our address.
2167 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2168 if (laddr->ifa == NULL) {
2171 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2174 if (sctp_is_address_in_scope(laddr->ifa,
2175 scope->ipv4_addr_legal,
2176 scope->ipv6_addr_legal,
2177 scope->loopback_scope,
2178 scope->ipv4_local_scope,
2180 scope->site_scope, 0) == 0) {
2183 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2185 if (cnt >= SCTP_ADDRESS_LIMIT) {
2191 SCTP_IPI_ADDR_RUNLOCK();
2195 static struct sctp_ifa *
2196 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2197 uint8_t dest_is_loop,
2198 uint8_t dest_is_priv,
2201 uint8_t dest_is_global = 0;
2203 /* dest_is_priv is true if destination is a private address */
2204 /* dest_is_loop is true if destination is a loopback addresses */
2207 * Here we determine if its a preferred address. A preferred address
2208 * means it is the same scope or higher scope then the destination.
2209 * L = loopback, P = private, G = global
2210 * -----------------------------------------
2211 * src | dest | result
2212 * ----------------------------------------
2214 * -----------------------------------------
2215 * P | L | yes-v4 no-v6
2216 * -----------------------------------------
2217 * G | L | yes-v4 no-v6
2218 * -----------------------------------------
2220 * -----------------------------------------
2222 * -----------------------------------------
2224 * -----------------------------------------
2226 * -----------------------------------------
2228 * -----------------------------------------
2230 * -----------------------------------------
2233 if (ifa->address.sa.sa_family != fam) {
2234 /* forget mis-matched family */
2237 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2240 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2241 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2242 /* Ok the address may be ok */
2244 if (fam == AF_INET6) {
2245 /* ok to use deprecated addresses? no lets not! */
2246 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2247 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2250 if (ifa->src_is_priv && !ifa->src_is_loop) {
2252 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2256 if (ifa->src_is_glob) {
2258 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2265 * Now that we know what is what, implement or table this could in
2266 * theory be done slicker (it used to be), but this is
2267 * straightforward and easier to validate :-)
2269 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2270 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2271 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2272 dest_is_loop, dest_is_priv, dest_is_global);
2274 if ((ifa->src_is_loop) && (dest_is_priv)) {
2275 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2278 if ((ifa->src_is_glob) && (dest_is_priv)) {
2279 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2282 if ((ifa->src_is_loop) && (dest_is_global)) {
2283 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2286 if ((ifa->src_is_priv) && (dest_is_global)) {
2287 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2290 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2291 /* its a preferred address */
2295 static struct sctp_ifa *
2296 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2297 uint8_t dest_is_loop,
2298 uint8_t dest_is_priv,
2301 uint8_t dest_is_global = 0;
2304 * Here we determine if its a acceptable address. A acceptable
2305 * address means it is the same scope or higher scope but we can
2306 * allow for NAT which means its ok to have a global dest and a
2309 * L = loopback, P = private, G = global
2310 * -----------------------------------------
2311 * src | dest | result
2312 * -----------------------------------------
2314 * -----------------------------------------
2315 * P | L | yes-v4 no-v6
2316 * -----------------------------------------
2318 * -----------------------------------------
2320 * -----------------------------------------
2322 * -----------------------------------------
2323 * G | P | yes - May not work
2324 * -----------------------------------------
2326 * -----------------------------------------
2327 * P | G | yes - May not work
2328 * -----------------------------------------
2330 * -----------------------------------------
2333 if (ifa->address.sa.sa_family != fam) {
2334 /* forget non matching family */
2335 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2336 ifa->address.sa.sa_family, fam);
2339 /* Ok the address may be ok */
2340 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2341 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2342 dest_is_loop, dest_is_priv);
2343 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2347 if (fam == AF_INET6) {
2348 /* ok to use deprecated addresses? */
2349 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2352 if (ifa->src_is_priv) {
2353 /* Special case, linklocal to loop */
2360 * Now that we know what is what, implement our table. This could in
2361 * theory be done slicker (it used to be), but this is
2362 * straightforward and easier to validate :-)
2364 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2367 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2373 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2376 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2377 /* its an acceptable address */
2382 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2384 struct sctp_laddr *laddr;
2387 /* There are no restrictions, no TCB :-) */
2390 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2391 if (laddr->ifa == NULL) {
2392 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2396 if (laddr->ifa == ifa) {
2397 /* Yes it is on the list */
2406 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2408 struct sctp_laddr *laddr;
2412 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2413 if (laddr->ifa == NULL) {
2414 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2418 if ((laddr->ifa == ifa) && laddr->action == 0)
2427 static struct sctp_ifa *
2428 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2431 int non_asoc_addr_ok,
2432 uint8_t dest_is_priv,
2433 uint8_t dest_is_loop,
2436 struct sctp_laddr *laddr, *starting_point;
2439 struct sctp_ifn *sctp_ifn;
2440 struct sctp_ifa *sctp_ifa, *sifa;
2441 struct sctp_vrf *vrf;
2444 vrf = sctp_find_vrf(vrf_id);
2448 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2449 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2450 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2452 * first question, is the ifn we will emit on in our list, if so, we
2453 * want such an address. Note that we first looked for a preferred
2457 /* is a preferred one on the interface we route out? */
2458 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2459 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2460 (non_asoc_addr_ok == 0))
2462 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2467 if (sctp_is_addr_in_ep(inp, sifa)) {
2468 atomic_add_int(&sifa->refcount, 1);
2474 * ok, now we now need to find one on the list of the addresses. We
2475 * can't get one on the emitting interface so let's find first a
2476 * preferred one. If not that an acceptable one otherwise... we
2479 starting_point = inp->next_addr_touse;
2481 if (inp->next_addr_touse == NULL) {
2482 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2485 for (laddr = inp->next_addr_touse; laddr;
2486 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2487 if (laddr->ifa == NULL) {
2488 /* address has been removed */
2491 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2492 /* address is being deleted */
2495 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2499 atomic_add_int(&sifa->refcount, 1);
2502 if (resettotop == 0) {
2503 inp->next_addr_touse = NULL;
2506 inp->next_addr_touse = starting_point;
2509 if (inp->next_addr_touse == NULL) {
2510 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2513 /* ok, what about an acceptable address in the inp */
2514 for (laddr = inp->next_addr_touse; laddr;
2515 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2516 if (laddr->ifa == NULL) {
2517 /* address has been removed */
2520 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2521 /* address is being deleted */
2524 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2528 atomic_add_int(&sifa->refcount, 1);
2531 if (resettotop == 0) {
2532 inp->next_addr_touse = NULL;
2533 goto once_again_too;
2536 * no address bound can be a source for the destination we are in
2544 static struct sctp_ifa *
2545 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2546 struct sctp_tcb *stcb,
2549 uint8_t dest_is_priv,
2550 uint8_t dest_is_loop,
2551 int non_asoc_addr_ok,
2554 struct sctp_laddr *laddr, *starting_point;
2556 struct sctp_ifn *sctp_ifn;
2557 struct sctp_ifa *sctp_ifa, *sifa;
2558 uint8_t start_at_beginning = 0;
2559 struct sctp_vrf *vrf;
2563 * first question, is the ifn we will emit on in our list, if so, we
2566 vrf = sctp_find_vrf(vrf_id);
2570 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2571 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2572 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2575 * first question, is the ifn we will emit on in our list? If so,
2576 * we want that one. First we look for a preferred. Second, we go
2577 * for an acceptable.
2580 /* first try for a preferred address on the ep */
2581 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2582 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2584 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2585 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2588 if (((non_asoc_addr_ok == 0) &&
2589 (sctp_is_addr_restricted(stcb, sifa))) ||
2590 (non_asoc_addr_ok &&
2591 (sctp_is_addr_restricted(stcb, sifa)) &&
2592 (!sctp_is_addr_pending(stcb, sifa)))) {
2593 /* on the no-no list */
2596 atomic_add_int(&sifa->refcount, 1);
2600 /* next try for an acceptable address on the ep */
2601 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2602 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2604 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2605 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2608 if (((non_asoc_addr_ok == 0) &&
2609 (sctp_is_addr_restricted(stcb, sifa))) ||
2610 (non_asoc_addr_ok &&
2611 (sctp_is_addr_restricted(stcb, sifa)) &&
2612 (!sctp_is_addr_pending(stcb, sifa)))) {
2613 /* on the no-no list */
2616 atomic_add_int(&sifa->refcount, 1);
2623 * if we can't find one like that then we must look at all addresses
2624 * bound to pick one at first preferable then secondly acceptable.
2626 starting_point = stcb->asoc.last_used_address;
2628 if (stcb->asoc.last_used_address == NULL) {
2629 start_at_beginning = 1;
2630 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2632 /* search beginning with the last used address */
2633 for (laddr = stcb->asoc.last_used_address; laddr;
2634 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2635 if (laddr->ifa == NULL) {
2636 /* address has been removed */
2639 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2640 /* address is being deleted */
2643 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2646 if (((non_asoc_addr_ok == 0) &&
2647 (sctp_is_addr_restricted(stcb, sifa))) ||
2648 (non_asoc_addr_ok &&
2649 (sctp_is_addr_restricted(stcb, sifa)) &&
2650 (!sctp_is_addr_pending(stcb, sifa)))) {
2651 /* on the no-no list */
2654 stcb->asoc.last_used_address = laddr;
2655 atomic_add_int(&sifa->refcount, 1);
2658 if (start_at_beginning == 0) {
2659 stcb->asoc.last_used_address = NULL;
2660 goto sctp_from_the_top;
2662 /* now try for any higher scope than the destination */
2663 stcb->asoc.last_used_address = starting_point;
2664 start_at_beginning = 0;
2666 if (stcb->asoc.last_used_address == NULL) {
2667 start_at_beginning = 1;
2668 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2670 /* search beginning with the last used address */
2671 for (laddr = stcb->asoc.last_used_address; laddr;
2672 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2673 if (laddr->ifa == NULL) {
2674 /* address has been removed */
2677 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2678 /* address is being deleted */
2681 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2685 if (((non_asoc_addr_ok == 0) &&
2686 (sctp_is_addr_restricted(stcb, sifa))) ||
2687 (non_asoc_addr_ok &&
2688 (sctp_is_addr_restricted(stcb, sifa)) &&
2689 (!sctp_is_addr_pending(stcb, sifa)))) {
2690 /* on the no-no list */
2693 stcb->asoc.last_used_address = laddr;
2694 atomic_add_int(&sifa->refcount, 1);
2697 if (start_at_beginning == 0) {
2698 stcb->asoc.last_used_address = NULL;
2699 goto sctp_from_the_top2;
2704 static struct sctp_ifa *
2705 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2706 struct sctp_tcb *stcb,
2707 int non_asoc_addr_ok,
2708 uint8_t dest_is_loop,
2709 uint8_t dest_is_priv,
2715 struct sctp_ifa *ifa, *sifa;
2716 int num_eligible_addr = 0;
2719 struct sockaddr_in6 sin6, lsa6;
2721 if (fam == AF_INET6) {
2722 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2723 (void)sa6_recoverscope(&sin6);
2726 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2727 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2728 (non_asoc_addr_ok == 0))
2730 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2735 if (fam == AF_INET6 &&
2737 sifa->src_is_loop && sifa->src_is_priv) {
2739 * don't allow fe80::1 to be a src on loop ::1, we
2740 * don't list it to the peer so we will get an
2745 if (fam == AF_INET6 &&
2746 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2747 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2749 * link-local <-> link-local must belong to the same
2752 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2753 (void)sa6_recoverscope(&lsa6);
2754 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2761 * Check if the IPv6 address matches to next-hop. In the
2762 * mobile case, old IPv6 address may be not deleted from the
2763 * interface. Then, the interface has previous and new
2764 * addresses. We should use one corresponding to the
2765 * next-hop. (by micchie)
2768 if (stcb && fam == AF_INET6 &&
2769 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2770 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2777 /* Avoid topologically incorrect IPv4 address */
2778 if (stcb && fam == AF_INET &&
2779 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2780 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2786 if (sctp_is_address_in_scope(ifa,
2787 stcb->asoc.ipv4_addr_legal,
2788 stcb->asoc.ipv6_addr_legal,
2789 stcb->asoc.loopback_scope,
2790 stcb->asoc.ipv4_local_scope,
2791 stcb->asoc.local_scope,
2792 stcb->asoc.site_scope, 0) == 0) {
2795 if (((non_asoc_addr_ok == 0) &&
2796 (sctp_is_addr_restricted(stcb, sifa))) ||
2797 (non_asoc_addr_ok &&
2798 (sctp_is_addr_restricted(stcb, sifa)) &&
2799 (!sctp_is_addr_pending(stcb, sifa)))) {
2801 * It is restricted for some reason..
2802 * probably not yet added.
2807 if (num_eligible_addr >= addr_wanted) {
2810 num_eligible_addr++;
2817 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2818 struct sctp_tcb *stcb,
2819 int non_asoc_addr_ok,
2820 uint8_t dest_is_loop,
2821 uint8_t dest_is_priv,
2824 struct sctp_ifa *ifa, *sifa;
2825 int num_eligible_addr = 0;
2827 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2828 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2829 (non_asoc_addr_ok == 0)) {
2832 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2838 if (sctp_is_address_in_scope(ifa,
2839 stcb->asoc.ipv4_addr_legal,
2840 stcb->asoc.ipv6_addr_legal,
2841 stcb->asoc.loopback_scope,
2842 stcb->asoc.ipv4_local_scope,
2843 stcb->asoc.local_scope,
2844 stcb->asoc.site_scope, 0) == 0) {
2847 if (((non_asoc_addr_ok == 0) &&
2848 (sctp_is_addr_restricted(stcb, sifa))) ||
2849 (non_asoc_addr_ok &&
2850 (sctp_is_addr_restricted(stcb, sifa)) &&
2851 (!sctp_is_addr_pending(stcb, sifa)))) {
2853 * It is restricted for some reason..
2854 * probably not yet added.
2859 num_eligible_addr++;
2861 return (num_eligible_addr);
2864 static struct sctp_ifa *
2865 sctp_choose_boundall(struct sctp_tcb *stcb,
2866 struct sctp_nets *net,
2869 uint8_t dest_is_priv,
2870 uint8_t dest_is_loop,
2871 int non_asoc_addr_ok,
2874 int cur_addr_num = 0, num_preferred = 0;
2876 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2877 struct sctp_ifa *sctp_ifa, *sifa;
2879 struct sctp_vrf *vrf;
2887 * For boundall we can use any address in the association.
2888 * If non_asoc_addr_ok is set we can use any address (at least in
2889 * theory). So we look for preferred addresses first. If we find one,
2890 * we use it. Otherwise we next try to get an address on the
2891 * interface, which we should be able to do (unless non_asoc_addr_ok
2892 * is false and we are routed out that way). In these cases where we
2893 * can't use the address of the interface we go through all the
2894 * ifn's looking for an address we can use and fill that in. Punting
2895 * means we send back address 0, which will probably cause problems
2896 * actually since then IP will fill in the address of the route ifn,
2897 * which means we probably already rejected it.. i.e. here comes an
2900 vrf = sctp_find_vrf(vrf_id);
2904 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2905 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2906 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2907 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2908 if (sctp_ifn == NULL) {
2909 /* ?? We don't have this guy ?? */
2910 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2911 goto bound_all_plan_b;
2913 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2914 ifn_index, sctp_ifn->ifn_name);
2917 cur_addr_num = net->indx_of_eligible_next_to_use;
2919 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2924 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2925 num_preferred, sctp_ifn->ifn_name);
2926 if (num_preferred == 0) {
2928 * no eligible addresses, we must use some other interface
2929 * address if we can find one.
2931 goto bound_all_plan_b;
2934 * Ok we have num_eligible_addr set with how many we can use, this
2935 * may vary from call to call due to addresses being deprecated
2938 if (cur_addr_num >= num_preferred) {
2942 * select the nth address from the list (where cur_addr_num is the
2943 * nth) and 0 is the first one, 1 is the second one etc...
2945 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2947 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2948 dest_is_priv, cur_addr_num, fam, ro);
2950 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2952 atomic_add_int(&sctp_ifa->refcount, 1);
2954 /* save off where the next one we will want */
2955 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2960 * plan_b: Look at all interfaces and find a preferred address. If
2961 * no preferred fall through to plan_c.
2964 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2965 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2966 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2967 sctp_ifn->ifn_name);
2968 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2969 /* wrong base scope */
2970 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2973 if ((sctp_ifn == looked_at) && looked_at) {
2974 /* already looked at this guy */
2975 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2978 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2979 dest_is_loop, dest_is_priv, fam);
2980 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2981 "Found ifn:%p %d preferred source addresses\n",
2982 ifn, num_preferred);
2983 if (num_preferred == 0) {
2984 /* None on this interface. */
2985 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2988 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2989 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2990 num_preferred, sctp_ifn, cur_addr_num);
2993 * Ok we have num_eligible_addr set with how many we can
2994 * use, this may vary from call to call due to addresses
2995 * being deprecated etc..
2997 if (cur_addr_num >= num_preferred) {
3000 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
3001 dest_is_priv, cur_addr_num, fam, ro);
3005 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3006 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3008 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3009 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3010 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3011 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3013 atomic_add_int(&sifa->refcount, 1);
3017 again_with_private_addresses_allowed:
3019 /* plan_c: do we have an acceptable address on the emit interface */
3021 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3022 if (emit_ifn == NULL) {
3023 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3026 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3027 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", sctp_ifa);
3028 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3029 (non_asoc_addr_ok == 0)) {
3030 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3033 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3036 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3040 if (sctp_is_address_in_scope(sifa,
3041 stcb->asoc.ipv4_addr_legal,
3042 stcb->asoc.ipv6_addr_legal,
3043 stcb->asoc.loopback_scope,
3044 stcb->asoc.ipv4_local_scope,
3045 stcb->asoc.local_scope,
3046 stcb->asoc.site_scope, 0) == 0) {
3047 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3051 if (((non_asoc_addr_ok == 0) &&
3052 (sctp_is_addr_restricted(stcb, sifa))) ||
3053 (non_asoc_addr_ok &&
3054 (sctp_is_addr_restricted(stcb, sifa)) &&
3055 (!sctp_is_addr_pending(stcb, sifa)))) {
3057 * It is restricted for some reason..
3058 * probably not yet added.
3060 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3065 SCTP_PRINTF("Stcb is null - no print\n");
3067 atomic_add_int(&sifa->refcount, 1);
3072 * plan_d: We are in trouble. No preferred address on the emit
3073 * interface. And not even a preferred address on all interfaces. Go
3074 * out and see if we can find an acceptable address somewhere
3075 * amongst all interfaces.
3077 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", looked_at);
3078 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3079 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3080 /* wrong base scope */
3083 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3084 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3085 (non_asoc_addr_ok == 0))
3087 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3093 if (sctp_is_address_in_scope(sifa,
3094 stcb->asoc.ipv4_addr_legal,
3095 stcb->asoc.ipv6_addr_legal,
3096 stcb->asoc.loopback_scope,
3097 stcb->asoc.ipv4_local_scope,
3098 stcb->asoc.local_scope,
3099 stcb->asoc.site_scope, 0) == 0) {
3103 if (((non_asoc_addr_ok == 0) &&
3104 (sctp_is_addr_restricted(stcb, sifa))) ||
3105 (non_asoc_addr_ok &&
3106 (sctp_is_addr_restricted(stcb, sifa)) &&
3107 (!sctp_is_addr_pending(stcb, sifa)))) {
3109 * It is restricted for some
3110 * reason.. probably not yet added.
3120 if ((retried == 0) && (stcb->asoc.ipv4_local_scope == 0)) {
3121 stcb->asoc.ipv4_local_scope = 1;
3123 goto again_with_private_addresses_allowed;
3124 } else if (retried == 1) {
3125 stcb->asoc.ipv4_local_scope = 0;
3132 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3133 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3134 /* wrong base scope */
3137 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3138 struct sctp_ifa *tmp_sifa;
3140 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3141 (non_asoc_addr_ok == 0))
3143 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3146 if (tmp_sifa == NULL) {
3149 if (tmp_sifa == sifa) {
3153 if (sctp_is_address_in_scope(tmp_sifa,
3154 stcb->asoc.ipv4_addr_legal,
3155 stcb->asoc.ipv6_addr_legal,
3156 stcb->asoc.loopback_scope,
3157 stcb->asoc.ipv4_local_scope,
3158 stcb->asoc.local_scope,
3159 stcb->asoc.site_scope, 0) == 0) {
3162 if (((non_asoc_addr_ok == 0) &&
3163 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3164 (non_asoc_addr_ok &&
3165 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3166 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3176 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3177 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3178 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3183 atomic_add_int(&sifa->refcount, 1);
3191 /* tcb may be NULL */
3193 sctp_source_address_selection(struct sctp_inpcb *inp,
3194 struct sctp_tcb *stcb,
3196 struct sctp_nets *net,
3197 int non_asoc_addr_ok, uint32_t vrf_id)
3199 struct sctp_ifa *answer;
3200 uint8_t dest_is_priv, dest_is_loop;
3204 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3208 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3213 * Rules: - Find the route if needed, cache if I can. - Look at
3214 * interface address in route, Is it in the bound list. If so we
3215 * have the best source. - If not we must rotate amongst the
3220 * Do we need to pay attention to scope. We can have a private address
3221 * or a global address we are sourcing or sending to. So if we draw
3223 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3225 * ------------------------------------------
3226 * source * dest * result
3227 * -----------------------------------------
3228 * <a> Private * Global * NAT
3229 * -----------------------------------------
3230 * <b> Private * Private * No problem
3231 * -----------------------------------------
3232 * <c> Global * Private * Huh, How will this work?
3233 * -----------------------------------------
3234 * <d> Global * Global * No Problem
3235 *------------------------------------------
3236 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3238 *------------------------------------------
3239 * source * dest * result
3240 * -----------------------------------------
3241 * <a> Linklocal * Global *
3242 * -----------------------------------------
3243 * <b> Linklocal * Linklocal * No problem
3244 * -----------------------------------------
3245 * <c> Global * Linklocal * Huh, How will this work?
3246 * -----------------------------------------
3247 * <d> Global * Global * No Problem
3248 *------------------------------------------
3249 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3251 * And then we add to that what happens if there are multiple addresses
3252 * assigned to an interface. Remember the ifa on a ifn is a linked
3253 * list of addresses. So one interface can have more than one IP
3254 * address. What happens if we have both a private and a global
3255 * address? Do we then use context of destination to sort out which
3256 * one is best? And what about NAT's sending P->G may get you a NAT
3257 * translation, or should you select the G thats on the interface in
3262 * - count the number of addresses on the interface.
3263 * - if it is one, no problem except case <c>.
3264 * For <a> we will assume a NAT out there.
3265 * - if there are more than one, then we need to worry about scope P
3266 * or G. We should prefer G -> G and P -> P if possible.
3267 * Then as a secondary fall back to mixed types G->P being a last
3269 * - The above all works for bound all, but bound specific we need to
3270 * use the same concept but instead only consider the bound
3271 * addresses. If the bound set is NOT assigned to the interface then
3272 * we must use rotation amongst the bound addresses..
3274 if (ro->ro_rt == NULL) {
3276 * Need a route to cache.
3278 SCTP_RTALLOC(ro, vrf_id);
3280 if (ro->ro_rt == NULL) {
3283 fam = ro->ro_dst.sa_family;
3284 dest_is_priv = dest_is_loop = 0;
3285 /* Setup our scopes for the destination */
3289 /* Scope based on outbound address */
3290 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3293 /* mark it as local */
3294 net->addr_is_local = 1;
3296 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3303 /* Scope based on outbound address */
3304 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3305 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3307 * If the address is a loopback address, which
3308 * consists of "::1" OR "fe80::1%lo0", we are
3309 * loopback scope. But we don't use dest_is_priv
3310 * (link local addresses).
3314 /* mark it as local */
3315 net->addr_is_local = 1;
3317 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3323 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3324 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3325 SCTP_IPI_ADDR_RLOCK();
3326 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3330 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3331 dest_is_priv, dest_is_loop,
3332 non_asoc_addr_ok, fam);
3333 SCTP_IPI_ADDR_RUNLOCK();
3340 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3341 vrf_id, dest_is_priv,
3343 non_asoc_addr_ok, fam);
3345 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3350 SCTP_IPI_ADDR_RUNLOCK();
3355 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3358 int tlen, at, found;
3359 struct sctp_sndinfo sndinfo;
3360 struct sctp_prinfo prinfo;
3361 struct sctp_authinfo authinfo;
3363 tlen = SCTP_BUF_LEN(control);
3367 * Independent of how many mbufs, find the c_type inside the control
3368 * structure and copy out the data.
3371 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3372 /* There is not enough room for one more. */
3375 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3376 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3377 /* We dont't have a complete CMSG header. */
3380 if (((int)cmh.cmsg_len + at) > tlen) {
3381 /* We don't have the complete CMSG. */
3384 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3385 ((c_type == cmh.cmsg_type) ||
3386 ((c_type == SCTP_SNDRCV) &&
3387 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3388 (cmh.cmsg_type == SCTP_PRINFO) ||
3389 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3390 if (c_type == cmh.cmsg_type) {
3391 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3394 /* It is exactly what we want. Copy it out. */
3395 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), cpsize, (caddr_t)data);
3398 struct sctp_sndrcvinfo *sndrcvinfo;
3400 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3402 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3405 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3407 switch (cmh.cmsg_type) {
3409 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_sndinfo)) {
3412 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3413 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3414 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3415 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3416 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3417 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3420 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_prinfo)) {
3423 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3424 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3425 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3428 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_authinfo)) {
3431 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3432 sndrcvinfo->sinfo_keynumber_valid = 1;
3433 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3441 at += CMSG_ALIGN(cmh.cmsg_len);
3447 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3451 struct sctp_initmsg initmsg;
3454 struct sockaddr_in sin;
3458 struct sockaddr_in6 sin6;
3462 tlen = SCTP_BUF_LEN(control);
3465 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3466 /* There is not enough room for one more. */
3470 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3471 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3472 /* We dont't have a complete CMSG header. */
3476 if (((int)cmh.cmsg_len + at) > tlen) {
3477 /* We don't have the complete CMSG. */
3481 if (cmh.cmsg_level == IPPROTO_SCTP) {
3482 switch (cmh.cmsg_type) {
3484 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_initmsg)) {
3488 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3489 if (initmsg.sinit_max_attempts)
3490 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3491 if (initmsg.sinit_num_ostreams)
3492 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3493 if (initmsg.sinit_max_instreams)
3494 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3495 if (initmsg.sinit_max_init_timeo)
3496 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3497 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3498 struct sctp_stream_out *tmp_str;
3501 /* Default is NOT correct */
3502 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3503 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3504 SCTP_TCB_UNLOCK(stcb);
3505 SCTP_MALLOC(tmp_str,
3506 struct sctp_stream_out *,
3507 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3509 SCTP_TCB_LOCK(stcb);
3510 if (tmp_str != NULL) {
3511 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3512 stcb->asoc.strmout = tmp_str;
3513 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3515 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3517 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3518 stcb->asoc.strmout[i].next_sequence_sent = 0;
3519 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3520 stcb->asoc.strmout[i].stream_no = i;
3521 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3522 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3527 case SCTP_DSTADDRV4:
3528 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3532 memset(&sin, 0, sizeof(struct sockaddr_in));
3533 sin.sin_family = AF_INET;
3534 sin.sin_len = sizeof(struct sockaddr_in);
3535 sin.sin_port = stcb->rport;
3536 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3537 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3538 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3539 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3543 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3544 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3551 case SCTP_DSTADDRV6:
3552 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3556 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3557 sin6.sin6_family = AF_INET6;
3558 sin6.sin6_len = sizeof(struct sockaddr_in6);
3559 sin6.sin6_port = stcb->rport;
3560 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3561 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3562 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3567 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3568 in6_sin6_2_sin(&sin, &sin6);
3569 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3570 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3571 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3575 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3576 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3582 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3583 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3593 at += CMSG_ALIGN(cmh.cmsg_len);
3598 static struct sctp_tcb *
3599 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3601 struct mbuf *control,
3602 struct sctp_nets **net_p,
3607 struct sctp_tcb *stcb;
3608 struct sockaddr *addr;
3611 struct sockaddr_in sin;
3615 struct sockaddr_in6 sin6;
3619 tlen = SCTP_BUF_LEN(control);
3622 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3623 /* There is not enough room for one more. */
3627 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3628 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3629 /* We dont't have a complete CMSG header. */
3633 if (((int)cmh.cmsg_len + at) > tlen) {
3634 /* We don't have the complete CMSG. */
3638 if (cmh.cmsg_level == IPPROTO_SCTP) {
3639 switch (cmh.cmsg_type) {
3641 case SCTP_DSTADDRV4:
3642 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3646 memset(&sin, 0, sizeof(struct sockaddr_in));
3647 sin.sin_family = AF_INET;
3648 sin.sin_len = sizeof(struct sockaddr_in);
3649 sin.sin_port = port;
3650 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3651 addr = (struct sockaddr *)&sin;
3655 case SCTP_DSTADDRV6:
3656 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3660 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3661 sin6.sin6_family = AF_INET6;
3662 sin6.sin6_len = sizeof(struct sockaddr_in6);
3663 sin6.sin6_port = port;
3664 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3666 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3667 in6_sin6_2_sin(&sin, &sin6);
3668 addr = (struct sockaddr *)&sin;
3671 addr = (struct sockaddr *)&sin6;
3679 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3685 at += CMSG_ALIGN(cmh.cmsg_len);
3690 static struct mbuf *
3691 sctp_add_cookie(struct mbuf *init, int init_offset,
3692 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3694 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3695 struct sctp_state_cookie *stc;
3696 struct sctp_paramhdr *ph;
3702 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3703 sizeof(struct sctp_paramhdr)), 0,
3704 M_DONTWAIT, 1, MT_DATA);
3708 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3709 if (copy_init == NULL) {
3713 #ifdef SCTP_MBUF_LOGGING
3714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3717 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3718 if (SCTP_BUF_IS_EXTENDED(mat)) {
3719 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3724 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3726 if (copy_initack == NULL) {
3728 sctp_m_freem(copy_init);
3731 #ifdef SCTP_MBUF_LOGGING
3732 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3735 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3736 if (SCTP_BUF_IS_EXTENDED(mat)) {
3737 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3742 /* easy side we just drop it on the end */
3743 ph = mtod(mret, struct sctp_paramhdr *);
3744 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3745 sizeof(struct sctp_paramhdr);
3746 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3747 sizeof(struct sctp_paramhdr));
3748 ph->param_type = htons(SCTP_STATE_COOKIE);
3749 ph->param_length = 0; /* fill in at the end */
3750 /* Fill in the stc cookie data */
3751 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3753 /* tack the INIT and then the INIT-ACK onto the chain */
3755 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3756 cookie_sz += SCTP_BUF_LEN(m_at);
3757 if (SCTP_BUF_NEXT(m_at) == NULL) {
3758 SCTP_BUF_NEXT(m_at) = copy_init;
3762 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3763 cookie_sz += SCTP_BUF_LEN(m_at);
3764 if (SCTP_BUF_NEXT(m_at) == NULL) {
3765 SCTP_BUF_NEXT(m_at) = copy_initack;
3769 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3770 cookie_sz += SCTP_BUF_LEN(m_at);
3771 if (SCTP_BUF_NEXT(m_at) == NULL) {
3775 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3777 /* no space, so free the entire chain */
3781 SCTP_BUF_LEN(sig) = 0;
3782 SCTP_BUF_NEXT(m_at) = sig;
3784 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3785 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3787 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3788 cookie_sz += SCTP_SIGNATURE_SIZE;
3789 ph->param_length = htons(cookie_sz);
3795 sctp_get_ect(struct sctp_tcb *stcb)
3797 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3798 return (SCTP_ECT0_BIT);
3805 sctp_handle_no_route(struct sctp_tcb *stcb,
3806 struct sctp_nets *net,
3809 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3812 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3813 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3814 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3815 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3816 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3817 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3819 SCTP_FAILED_THRESHOLD,
3822 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3823 net->dest_state &= ~SCTP_ADDR_PF;
3827 if (net == stcb->asoc.primary_destination) {
3828 /* need a new primary */
3829 struct sctp_nets *alt;
3831 alt = sctp_find_alternate_net(stcb, net, 0);
3833 if (stcb->asoc.alternate) {
3834 sctp_free_remote_addr(stcb->asoc.alternate);
3836 stcb->asoc.alternate = alt;
3837 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3838 if (net->ro._s_addr) {
3839 sctp_free_ifa(net->ro._s_addr);
3840 net->ro._s_addr = NULL;
3842 net->src_addr_selected = 0;
3850 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3851 struct sctp_tcb *stcb, /* may be NULL */
3852 struct sctp_nets *net,
3853 struct sockaddr *to,
3855 uint32_t auth_offset,
3856 struct sctp_auth_chunk *auth,
3857 uint16_t auth_keyid,
3858 int nofragment_flag,
3865 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3866 int so_locked SCTP_UNUSED,
3870 union sctp_sockstore *over_addr,
3873 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3876 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3877 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3878 * - fill in the HMAC digest of any AUTH chunk in the packet.
3879 * - calculate and fill in the SCTP checksum.
3880 * - prepend an IP address header.
3881 * - if boundall use INADDR_ANY.
3882 * - if boundspecific do source address selection.
3883 * - set fragmentation option for ipV4.
3884 * - On return from IP output, check/adjust mtu size of output
3885 * interface and smallest_mtu size as well.
3887 /* Will need ifdefs around this */
3890 struct sctphdr *sctphdr;
3894 sctp_route_t *ro = NULL;
3895 struct udphdr *udp = NULL;
3898 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3899 struct socket *so = NULL;
3903 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3904 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3909 vrf_id = stcb->asoc.vrf_id;
3911 vrf_id = inp->def_vrf_id;
3914 /* fill in the HMAC digest for any AUTH chunk in the packet */
3915 if ((auth != NULL) && (stcb != NULL)) {
3916 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3919 tos_value = net->dscp;
3921 tos_value = stcb->asoc.default_dscp;
3923 tos_value = inp->sctp_ep.default_dscp;
3926 switch (to->sa_family) {
3930 struct ip *ip = NULL;
3931 sctp_route_t iproute;
3934 len = sizeof(struct ip) + sizeof(struct sctphdr);
3936 len += sizeof(struct udphdr);
3938 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3941 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3944 SCTP_ALIGN_TO_END(newm, len);
3945 SCTP_BUF_LEN(newm) = len;
3946 SCTP_BUF_NEXT(newm) = m;
3950 if (net->flowidset == 0) {
3951 panic("Flow ID not set");
3954 m->m_pkthdr.flowid = net->flowid;
3955 m->m_flags |= M_FLOWID;
3957 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
3958 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
3959 m->m_flags |= M_FLOWID;
3962 packet_length = sctp_calculate_len(m);
3963 ip = mtod(m, struct ip *);
3964 ip->ip_v = IPVERSION;
3965 ip->ip_hl = (sizeof(struct ip) >> 2);
3966 if (tos_value == 0) {
3968 * This means especially, that it is not set
3969 * at the SCTP layer. So use the value from
3972 tos_value = inp->ip_inp.inp.inp_ip_tos;
3976 tos_value |= sctp_get_ect(stcb);
3978 if ((nofragment_flag) && (port == 0)) {
3983 /* FreeBSD has a function for ip_id's */
3984 ip->ip_id = ip_newid();
3986 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3987 ip->ip_len = packet_length;
3988 ip->ip_tos = tos_value;
3990 ip->ip_p = IPPROTO_UDP;
3992 ip->ip_p = IPPROTO_SCTP;
3997 memset(&iproute, 0, sizeof(iproute));
3998 memcpy(&ro->ro_dst, to, to->sa_len);
4000 ro = (sctp_route_t *) & net->ro;
4002 /* Now the address selection part */
4003 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4005 /* call the routine to select the src address */
4006 if (net && out_of_asoc_ok == 0) {
4007 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4008 sctp_free_ifa(net->ro._s_addr);
4009 net->ro._s_addr = NULL;
4010 net->src_addr_selected = 0;
4016 if (net->src_addr_selected == 0) {
4017 /* Cache the source address */
4018 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4021 net->src_addr_selected = 1;
4023 if (net->ro._s_addr == NULL) {
4024 /* No route to host */
4025 net->src_addr_selected = 0;
4026 sctp_handle_no_route(stcb, net, so_locked);
4027 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4029 return (EHOSTUNREACH);
4031 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4033 if (over_addr == NULL) {
4034 struct sctp_ifa *_lsrc;
4036 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4040 if (_lsrc == NULL) {
4041 sctp_handle_no_route(stcb, net, so_locked);
4042 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4044 return (EHOSTUNREACH);
4046 ip->ip_src = _lsrc->address.sin.sin_addr;
4047 sctp_free_ifa(_lsrc);
4049 ip->ip_src = over_addr->sin.sin_addr;
4050 SCTP_RTALLOC(ro, vrf_id);
4054 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4055 sctp_handle_no_route(stcb, net, so_locked);
4056 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4058 return (EHOSTUNREACH);
4060 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4061 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4062 udp->uh_dport = port;
4063 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4065 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4069 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4071 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4074 sctphdr->src_port = src_port;
4075 sctphdr->dest_port = dest_port;
4076 sctphdr->v_tag = v_tag;
4077 sctphdr->checksum = 0;
4080 * If source address selection fails and we find no
4081 * route then the ip_output should fail as well with
4082 * a NO_ROUTE_TO_HOST type error. We probably should
4083 * catch that somewhere and abort the association
4084 * right away (assuming this is an INIT being sent).
4086 if (ro->ro_rt == NULL) {
4088 * src addr selection failed to find a route
4089 * (or valid source addr), so we can't get
4090 * there from here (yet)!
4092 sctp_handle_no_route(stcb, net, so_locked);
4093 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4095 return (EHOSTUNREACH);
4097 if (ro != &iproute) {
4098 memcpy(&iproute, ro, sizeof(*ro));
4100 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4101 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4102 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4103 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4104 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4107 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4108 /* failed to prepend data, give up */
4109 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4113 #ifdef SCTP_PACKET_LOGGING
4114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4115 sctp_packet_log(m, packet_length);
4117 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4119 #if defined(SCTP_WITH_NO_CSUM)
4120 SCTP_STAT_INCR(sctps_sendnocrc);
4122 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4124 (stcb->asoc.loopback_scope))) {
4125 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4126 SCTP_STAT_INCR(sctps_sendswcrc);
4128 SCTP_STAT_INCR(sctps_sendnocrc);
4132 SCTP_ENABLE_UDP_CSUM(o_pak);
4135 #if defined(SCTP_WITH_NO_CSUM)
4136 SCTP_STAT_INCR(sctps_sendnocrc);
4138 m->m_pkthdr.csum_flags = CSUM_SCTP;
4139 m->m_pkthdr.csum_data = 0;
4140 SCTP_STAT_INCR(sctps_sendhwcrc);
4143 /* send it out. table id is taken from stcb */
4144 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4145 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4146 so = SCTP_INP_SO(inp);
4147 SCTP_SOCKET_UNLOCK(so, 0);
4150 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4151 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4152 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4153 atomic_add_int(&stcb->asoc.refcnt, 1);
4154 SCTP_TCB_UNLOCK(stcb);
4155 SCTP_SOCKET_LOCK(so, 0);
4156 SCTP_TCB_LOCK(stcb);
4157 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4160 SCTP_STAT_INCR(sctps_sendpackets);
4161 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4163 SCTP_STAT_INCR(sctps_senderrors);
4165 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4167 /* free tempy routes */
4174 * PMTU check versus smallest asoc MTU goes
4177 if ((ro->ro_rt != NULL) &&
4178 (net->ro._s_addr)) {
4181 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4183 mtu -= sizeof(struct udphdr);
4185 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4186 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4189 } else if (ro->ro_rt == NULL) {
4190 /* route was freed */
4191 if (net->ro._s_addr &&
4192 net->src_addr_selected) {
4193 sctp_free_ifa(net->ro._s_addr);
4194 net->ro._s_addr = NULL;
4196 net->src_addr_selected = 0;
4205 uint32_t flowlabel, flowinfo;
4206 struct ip6_hdr *ip6h;
4207 struct route_in6 ip6route;
4209 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4211 struct sockaddr_in6 lsa6_storage;
4213 u_short prev_port = 0;
4217 flowlabel = net->flowlabel;
4219 flowlabel = stcb->asoc.default_flowlabel;
4221 flowlabel = inp->sctp_ep.default_flowlabel;
4223 if (flowlabel == 0) {
4225 * This means especially, that it is not set
4226 * at the SCTP layer. So use the value from
4229 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4231 flowlabel &= 0x000fffff;
4232 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4234 len += sizeof(struct udphdr);
4236 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4239 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4242 SCTP_ALIGN_TO_END(newm, len);
4243 SCTP_BUF_LEN(newm) = len;
4244 SCTP_BUF_NEXT(newm) = m;
4248 if (net->flowidset == 0) {
4249 panic("Flow ID not set");
4252 m->m_pkthdr.flowid = net->flowid;
4253 m->m_flags |= M_FLOWID;
4255 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
4256 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
4257 m->m_flags |= M_FLOWID;
4260 packet_length = sctp_calculate_len(m);
4262 ip6h = mtod(m, struct ip6_hdr *);
4263 /* protect *sin6 from overwrite */
4264 sin6 = (struct sockaddr_in6 *)to;
4268 /* KAME hack: embed scopeid */
4269 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4270 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4274 memset(&ip6route, 0, sizeof(ip6route));
4275 ro = (sctp_route_t *) & ip6route;
4276 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4278 ro = (sctp_route_t *) & net->ro;
4281 * We assume here that inp_flow is in host byte
4282 * order within the TCB!
4284 if (tos_value == 0) {
4286 * This means especially, that it is not set
4287 * at the SCTP layer. So use the value from
4290 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4294 tos_value |= sctp_get_ect(stcb);
4298 flowinfo |= tos_value;
4300 flowinfo |= flowlabel;
4301 ip6h->ip6_flow = htonl(flowinfo);
4303 ip6h->ip6_nxt = IPPROTO_UDP;
4305 ip6h->ip6_nxt = IPPROTO_SCTP;
4307 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4308 ip6h->ip6_dst = sin6->sin6_addr;
4311 * Add SRC address selection here: we can only reuse
4312 * to a limited degree the kame src-addr-sel, since
4313 * we can try their selection but it may not be
4316 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4317 lsa6_tmp.sin6_family = AF_INET6;
4318 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4320 if (net && out_of_asoc_ok == 0) {
4321 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4322 sctp_free_ifa(net->ro._s_addr);
4323 net->ro._s_addr = NULL;
4324 net->src_addr_selected = 0;
4330 if (net->src_addr_selected == 0) {
4331 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4332 /* KAME hack: embed scopeid */
4333 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4334 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4337 /* Cache the source address */
4338 net->ro._s_addr = sctp_source_address_selection(inp,
4344 (void)sa6_recoverscope(sin6);
4345 net->src_addr_selected = 1;
4347 if (net->ro._s_addr == NULL) {
4348 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4349 net->src_addr_selected = 0;
4350 sctp_handle_no_route(stcb, net, so_locked);
4351 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4353 return (EHOSTUNREACH);
4355 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4357 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4358 /* KAME hack: embed scopeid */
4359 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4360 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4363 if (over_addr == NULL) {
4364 struct sctp_ifa *_lsrc;
4366 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4370 if (_lsrc == NULL) {
4371 sctp_handle_no_route(stcb, net, so_locked);
4372 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4374 return (EHOSTUNREACH);
4376 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4377 sctp_free_ifa(_lsrc);
4379 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4380 SCTP_RTALLOC(ro, vrf_id);
4382 (void)sa6_recoverscope(sin6);
4384 lsa6->sin6_port = inp->sctp_lport;
4386 if (ro->ro_rt == NULL) {
4388 * src addr selection failed to find a route
4389 * (or valid source addr), so we can't get
4392 sctp_handle_no_route(stcb, net, so_locked);
4393 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4395 return (EHOSTUNREACH);
4398 * XXX: sa6 may not have a valid sin6_scope_id in
4399 * the non-SCOPEDROUTING case.
4401 bzero(&lsa6_storage, sizeof(lsa6_storage));
4402 lsa6_storage.sin6_family = AF_INET6;
4403 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4404 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4405 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4406 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4411 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4412 lsa6_storage.sin6_port = inp->sctp_lport;
4413 lsa6 = &lsa6_storage;
4414 ip6h->ip6_src = lsa6->sin6_addr;
4417 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4418 sctp_handle_no_route(stcb, net, so_locked);
4419 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4421 return (EHOSTUNREACH);
4423 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4424 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4425 udp->uh_dport = port;
4426 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4428 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4430 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4433 sctphdr->src_port = src_port;
4434 sctphdr->dest_port = dest_port;
4435 sctphdr->v_tag = v_tag;
4436 sctphdr->checksum = 0;
4439 * We set the hop limit now since there is a good
4440 * chance that our ro pointer is now filled
4442 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4443 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4446 /* Copy to be sure something bad is not happening */
4447 sin6->sin6_addr = ip6h->ip6_dst;
4448 lsa6->sin6_addr = ip6h->ip6_src;
4451 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4452 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4453 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4454 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4455 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4457 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4459 * preserve the port and scope for link
4462 prev_scope = sin6->sin6_scope_id;
4463 prev_port = sin6->sin6_port;
4465 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4466 /* failed to prepend data, give up */
4468 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4471 #ifdef SCTP_PACKET_LOGGING
4472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4473 sctp_packet_log(m, packet_length);
4475 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4477 #if defined(SCTP_WITH_NO_CSUM)
4478 SCTP_STAT_INCR(sctps_sendnocrc);
4480 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4482 (stcb->asoc.loopback_scope))) {
4483 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4484 SCTP_STAT_INCR(sctps_sendswcrc);
4486 SCTP_STAT_INCR(sctps_sendnocrc);
4489 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4490 udp->uh_sum = 0xffff;
4493 #if defined(SCTP_WITH_NO_CSUM)
4494 SCTP_STAT_INCR(sctps_sendnocrc);
4496 m->m_pkthdr.csum_flags = CSUM_SCTP;
4497 m->m_pkthdr.csum_data = 0;
4498 SCTP_STAT_INCR(sctps_sendhwcrc);
4501 /* send it out. table id is taken from stcb */
4502 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4503 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4504 so = SCTP_INP_SO(inp);
4505 SCTP_SOCKET_UNLOCK(so, 0);
4508 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4509 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4510 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4511 atomic_add_int(&stcb->asoc.refcnt, 1);
4512 SCTP_TCB_UNLOCK(stcb);
4513 SCTP_SOCKET_LOCK(so, 0);
4514 SCTP_TCB_LOCK(stcb);
4515 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4519 /* for link local this must be done */
4520 sin6->sin6_scope_id = prev_scope;
4521 sin6->sin6_port = prev_port;
4523 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4524 SCTP_STAT_INCR(sctps_sendpackets);
4525 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4527 SCTP_STAT_INCR(sctps_senderrors);
4530 /* Now if we had a temp route free it */
4536 * PMTU check versus smallest asoc MTU goes
4539 if (ro->ro_rt == NULL) {
4540 /* Route was freed */
4541 if (net->ro._s_addr &&
4542 net->src_addr_selected) {
4543 sctp_free_ifa(net->ro._s_addr);
4544 net->ro._s_addr = NULL;
4546 net->src_addr_selected = 0;
4548 if ((ro->ro_rt != NULL) &&
4549 (net->ro._s_addr)) {
4552 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4554 (stcb->asoc.smallest_mtu > mtu)) {
4555 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4558 net->mtu -= sizeof(struct udphdr);
4562 if (ND_IFINFO(ifp)->linkmtu &&
4563 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4564 sctp_mtu_size_reset(inp,
4566 ND_IFINFO(ifp)->linkmtu);
4574 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4575 ((struct sockaddr *)to)->sa_family);
4577 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4584 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4585 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4590 struct mbuf *m, *m_at, *mp_last;
4591 struct sctp_nets *net;
4592 struct sctp_init_chunk *init;
4593 struct sctp_supported_addr_param *sup_addr;
4594 struct sctp_adaptation_layer_indication *ali;
4595 struct sctp_ecn_supported_param *ecn;
4596 struct sctp_prsctp_supported_param *prsctp;
4597 struct sctp_supported_chunk_types_param *pr_supported;
4598 int cnt_inits_to = 0;
4603 /* INIT's always go to the primary (and usually ONLY address) */
4605 net = stcb->asoc.primary_destination;
4607 net = TAILQ_FIRST(&stcb->asoc.nets);
4612 /* we confirm any address we send an INIT to */
4613 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4614 (void)sctp_set_primary_addr(stcb, NULL, net);
4616 /* we confirm any address we send an INIT to */
4617 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4619 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4621 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4623 * special hook, if we are sending to link local it will not
4624 * show up in our private address count.
4626 struct sockaddr_in6 *sin6l;
4628 sin6l = &net->ro._l_addr.sin6;
4629 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4633 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4634 /* This case should not happen */
4635 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4638 /* start the INIT timer */
4639 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4641 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4643 /* No memory, INIT timer will re-attempt. */
4644 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4647 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4649 * assume peer supports asconf in order to be able to queue local
4650 * address changes while an INIT is in flight and before the assoc
4653 stcb->asoc.peer_supports_asconf = 1;
4654 /* Now lets put the SCTP header in place */
4655 init = mtod(m, struct sctp_init_chunk *);
4656 /* now the chunk header */
4657 init->ch.chunk_type = SCTP_INITIATION;
4658 init->ch.chunk_flags = 0;
4659 /* fill in later from mbuf we build */
4660 init->ch.chunk_length = 0;
4661 /* place in my tag */
4662 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4663 /* set up some of the credits. */
4664 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4665 SCTP_MINIMAL_RWND));
4667 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4668 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4669 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4670 /* now the address restriction */
4671 /* XXX Should we take the address family of the socket into account? */
4672 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4674 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4677 /* we support 2 types: IPv4/IPv6 */
4678 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + 2 * sizeof(uint16_t));
4679 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4680 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4682 /* we support 1 type: IPv6 */
4683 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4684 sup_addr->addr_type[0] = htons(SCTP_IPV6_ADDRESS);
4685 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4688 /* we support 1 type: IPv4 */
4689 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4690 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4691 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4693 SCTP_BUF_LEN(m) += sizeof(struct sctp_supported_addr_param);
4694 /* adaptation layer indication parameter */
4695 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(struct sctp_supported_addr_param));
4696 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4697 ali->ph.param_length = htons(sizeof(*ali));
4698 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4699 SCTP_BUF_LEN(m) += sizeof(*ali);
4700 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4702 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4703 /* Add NAT friendly parameter */
4704 struct sctp_paramhdr *ph;
4706 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4707 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4708 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4709 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4710 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4712 /* now any cookie time extensions */
4713 if (stcb->asoc.cookie_preserve_req) {
4714 struct sctp_cookie_perserve_param *cookie_preserve;
4716 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4717 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4718 cookie_preserve->ph.param_length = htons(
4719 sizeof(*cookie_preserve));
4720 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4721 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4722 ecn = (struct sctp_ecn_supported_param *)(
4723 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4724 stcb->asoc.cookie_preserve_req = 0;
4727 if (stcb->asoc.ecn_allowed == 1) {
4728 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4729 ecn->ph.param_length = htons(sizeof(*ecn));
4730 SCTP_BUF_LEN(m) += sizeof(*ecn);
4731 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4734 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4736 /* And now tell the peer we do pr-sctp */
4737 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4738 prsctp->ph.param_length = htons(sizeof(*prsctp));
4739 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4741 /* And now tell the peer we do all the extensions */
4742 pr_supported = (struct sctp_supported_chunk_types_param *)
4743 ((caddr_t)prsctp + sizeof(*prsctp));
4744 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4746 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4747 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4748 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4749 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4750 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4751 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4752 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4754 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4755 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4757 p_len = sizeof(*pr_supported) + num_ext;
4758 pr_supported->ph.param_length = htons(p_len);
4759 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4760 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4763 /* add authentication parameters */
4764 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4765 struct sctp_auth_random *randp;
4766 struct sctp_auth_hmac_algo *hmacs;
4767 struct sctp_auth_chunk_list *chunks;
4769 /* attach RANDOM parameter, if available */
4770 if (stcb->asoc.authinfo.random != NULL) {
4771 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4772 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4773 /* random key already contains the header */
4774 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4775 /* zero out any padding required */
4776 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4777 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4779 /* add HMAC_ALGO parameter */
4780 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4781 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4782 (uint8_t *) hmacs->hmac_ids);
4784 p_len += sizeof(*hmacs);
4785 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4786 hmacs->ph.param_length = htons(p_len);
4787 /* zero out any padding required */
4788 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4789 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4791 /* add CHUNKS parameter */
4792 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4793 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4794 chunks->chunk_types);
4796 p_len += sizeof(*chunks);
4797 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4798 chunks->ph.param_length = htons(p_len);
4799 /* zero out any padding required */
4800 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4801 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4804 /* now the addresses */
4806 struct sctp_scoping scp;
4809 * To optimize this we could put the scoping stuff into a
4810 * structure and remove the individual uint8's from the
4811 * assoc structure. Then we could just sifa in the address
4812 * within the stcb. But for now this is a quick hack to get
4813 * the address stuff teased apart.
4816 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4817 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4818 scp.loopback_scope = stcb->asoc.loopback_scope;
4819 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4820 scp.local_scope = stcb->asoc.local_scope;
4821 scp.site_scope = stcb->asoc.site_scope;
4823 sctp_add_addresses_to_i_ia(inp, stcb, &scp, m, cnt_inits_to);
4826 /* calulate the size and update pkt header and chunk header */
4828 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4829 if (SCTP_BUF_NEXT(m_at) == NULL)
4831 p_len += SCTP_BUF_LEN(m_at);
4833 init->ch.chunk_length = htons(p_len);
4835 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4836 * here since the timer will drive a retranmission.
4839 /* I don't expect this to execute but we will be safe here */
4841 if ((padval) && (mp_last)) {
4843 * The compiler worries that mp_last may not be set even
4844 * though I think it is impossible :-> however we add
4845 * mp_last here just in case.
4847 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4849 /* Houston we have a problem, no space */
4854 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4855 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4856 (struct sockaddr *)&net->ro._l_addr,
4857 m, 0, NULL, 0, 0, 0, 0,
4858 inp->sctp_lport, stcb->rport, htonl(0),
4859 net->port, so_locked, NULL, NULL);
4860 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4861 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4862 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4866 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4867 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4870 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4871 * being equal to the beginning of the params i.e. (iphlen +
4872 * sizeof(struct sctp_init_msg) parse through the parameters to the
4873 * end of the mbuf verifying that all parameters are known.
4875 * For unknown parameters build and return a mbuf with
4876 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4877 * processing this chunk stop, and set *abort_processing to 1.
4879 * By having param_offset be pre-set to where parameters begin it is
4880 * hoped that this routine may be reused in the future by new
4883 struct sctp_paramhdr *phdr, params;
4885 struct mbuf *mat, *op_err;
4886 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4887 int at, limit, pad_needed;
4888 uint16_t ptype, plen, padded_size;
4891 *abort_processing = 0;
4894 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4897 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4898 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4899 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4900 ptype = ntohs(phdr->param_type);
4901 plen = ntohs(phdr->param_length);
4902 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4903 /* wacked parameter */
4904 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4907 limit -= SCTP_SIZE32(plen);
4909 * All parameters for all chunks that we know/understand are
4910 * listed here. We process them other places and make
4911 * appropriate stop actions per the upper bits. However this
4912 * is the generic routine processor's can call to get back
4913 * an operr.. to either incorporate (init-ack) or send.
4915 padded_size = SCTP_SIZE32(plen);
4917 /* Param's with variable size */
4918 case SCTP_HEARTBEAT_INFO:
4919 case SCTP_STATE_COOKIE:
4920 case SCTP_UNRECOG_PARAM:
4921 case SCTP_ERROR_CAUSE_IND:
4925 /* Param's with variable size within a range */
4926 case SCTP_CHUNK_LIST:
4927 case SCTP_SUPPORTED_CHUNK_EXT:
4928 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4929 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4934 case SCTP_SUPPORTED_ADDRTYPE:
4935 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4936 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4942 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4943 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4948 case SCTP_SET_PRIM_ADDR:
4949 case SCTP_DEL_IP_ADDRESS:
4950 case SCTP_ADD_IP_ADDRESS:
4951 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4952 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4953 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4958 /* Param's with a fixed size */
4959 case SCTP_IPV4_ADDRESS:
4960 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4961 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4966 case SCTP_IPV6_ADDRESS:
4967 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4968 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4973 case SCTP_COOKIE_PRESERVE:
4974 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4975 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4980 case SCTP_HAS_NAT_SUPPORT:
4983 case SCTP_PRSCTP_SUPPORTED:
4985 if (padded_size != sizeof(struct sctp_paramhdr)) {
4986 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4991 case SCTP_ECN_CAPABLE:
4992 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4993 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4998 case SCTP_ULP_ADAPTATION:
4999 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5000 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5005 case SCTP_SUCCESS_REPORT:
5006 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5007 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5012 case SCTP_HOSTNAME_ADDRESS:
5014 /* We can NOT handle HOST NAME addresses!! */
5017 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5018 *abort_processing = 1;
5019 if (op_err == NULL) {
5020 /* Ok need to try to get a mbuf */
5022 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5024 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5027 l_len += sizeof(struct sctp_paramhdr);
5028 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5030 SCTP_BUF_LEN(op_err) = 0;
5032 * pre-reserve space for ip
5033 * and sctp header and
5037 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5039 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5041 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5042 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5046 /* If we have space */
5047 struct sctp_paramhdr s;
5050 uint32_t cpthis = 0;
5052 pad_needed = 4 - (err_at % 4);
5053 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5054 err_at += pad_needed;
5056 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5057 s.param_length = htons(sizeof(s) + plen);
5058 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5059 err_at += sizeof(s);
5060 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5062 sctp_m_freem(op_err);
5064 * we are out of memory but
5065 * we still need to have a
5066 * look at what to do (the
5067 * system is in trouble
5072 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5079 * we do not recognize the parameter figure out what
5082 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5083 if ((ptype & 0x4000) == 0x4000) {
5084 /* Report bit is set?? */
5085 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5086 if (op_err == NULL) {
5089 /* Ok need to try to get an mbuf */
5091 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5093 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5096 l_len += sizeof(struct sctp_paramhdr);
5097 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5099 SCTP_BUF_LEN(op_err) = 0;
5101 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5103 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5105 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5106 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5110 /* If we have space */
5111 struct sctp_paramhdr s;
5114 uint32_t cpthis = 0;
5116 pad_needed = 4 - (err_at % 4);
5117 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5118 err_at += pad_needed;
5120 s.param_type = htons(SCTP_UNRECOG_PARAM);
5121 s.param_length = htons(sizeof(s) + plen);
5122 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5123 err_at += sizeof(s);
5124 if (plen > sizeof(tempbuf)) {
5125 plen = sizeof(tempbuf);
5127 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5129 sctp_m_freem(op_err);
5131 * we are out of memory but
5132 * we still need to have a
5133 * look at what to do (the
5134 * system is in trouble
5138 goto more_processing;
5140 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5145 if ((ptype & 0x8000) == 0x0000) {
5146 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5149 /* skip this chunk and continue processing */
5150 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5151 at += SCTP_SIZE32(plen);
5156 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5160 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5161 *abort_processing = 1;
5162 if ((op_err == NULL) && phdr) {
5166 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5168 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5170 l_len += (2 * sizeof(struct sctp_paramhdr));
5171 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5173 SCTP_BUF_LEN(op_err) = 0;
5175 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5177 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5179 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5180 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5183 if ((op_err) && phdr) {
5184 struct sctp_paramhdr s;
5187 uint32_t cpthis = 0;
5189 pad_needed = 4 - (err_at % 4);
5190 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5191 err_at += pad_needed;
5193 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5194 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5195 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5196 err_at += sizeof(s);
5197 /* Only copy back the p-hdr that caused the issue */
5198 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5204 sctp_are_there_new_addresses(struct sctp_association *asoc,
5205 struct mbuf *in_initpkt, int offset)
5208 * Given a INIT packet, look through the packet to verify that there
5209 * are NO new addresses. As we go through the parameters add reports
5210 * of any un-understood parameters that require an error. Also we
5211 * must return (1) to drop the packet if we see a un-understood
5212 * parameter that tells us to drop the chunk.
5214 struct sockaddr *sa_touse;
5215 struct sockaddr *sa;
5216 struct sctp_paramhdr *phdr, params;
5217 uint16_t ptype, plen;
5219 struct sctp_nets *net;
5223 struct sockaddr_in sin4, *sa4;
5227 struct sockaddr_in6 sin6, *sa6;
5228 struct ip6_hdr *ip6h;
5233 memset(&sin4, 0, sizeof(sin4));
5234 sin4.sin_family = AF_INET;
5235 sin4.sin_len = sizeof(sin4);
5238 memset(&sin6, 0, sizeof(sin6));
5239 sin6.sin6_family = AF_INET6;
5240 sin6.sin6_len = sizeof(sin6);
5243 /* First what about the src address of the pkt ? */
5244 iph = mtod(in_initpkt, struct ip *);
5245 switch (iph->ip_v) {
5248 /* source addr is IPv4 */
5249 sin4.sin_addr = iph->ip_src;
5250 sa_touse = (struct sockaddr *)&sin4;
5254 case IPV6_VERSION >> 4:
5255 /* source addr is IPv6 */
5256 ip6h = mtod(in_initpkt, struct ip6_hdr *);
5257 sin6.sin6_addr = ip6h->ip6_src;
5258 sa_touse = (struct sockaddr *)&sin6;
5266 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5267 sa = (struct sockaddr *)&net->ro._l_addr;
5268 if (sa->sa_family == sa_touse->sa_family) {
5270 if (sa->sa_family == AF_INET) {
5271 sa4 = (struct sockaddr_in *)sa;
5272 if (sa4->sin_addr.s_addr == sin4.sin_addr.s_addr) {
5279 if (sa->sa_family == AF_INET6) {
5280 sa6 = (struct sockaddr_in6 *)sa;
5281 if (SCTP6_ARE_ADDR_EQUAL(sa6, &sin6)) {
5290 /* New address added! no need to look futher. */
5293 /* Ok so far lets munge through the rest of the packet */
5294 offset += sizeof(struct sctp_init_chunk);
5295 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5298 ptype = ntohs(phdr->param_type);
5299 plen = ntohs(phdr->param_length);
5302 case SCTP_IPV4_ADDRESS:
5304 struct sctp_ipv4addr_param *p4, p4_buf;
5306 phdr = sctp_get_next_param(in_initpkt, offset,
5307 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5308 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5312 p4 = (struct sctp_ipv4addr_param *)phdr;
5313 sin4.sin_addr.s_addr = p4->addr;
5314 sa_touse = (struct sockaddr *)&sin4;
5319 case SCTP_IPV6_ADDRESS:
5321 struct sctp_ipv6addr_param *p6, p6_buf;
5323 phdr = sctp_get_next_param(in_initpkt, offset,
5324 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5325 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5329 p6 = (struct sctp_ipv6addr_param *)phdr;
5330 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5332 sa_touse = (struct sockaddr *)&sin6;
5341 /* ok, sa_touse points to one to check */
5343 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5344 sa = (struct sockaddr *)&net->ro._l_addr;
5345 if (sa->sa_family != sa_touse->sa_family) {
5349 if (sa->sa_family == AF_INET) {
5350 sa4 = (struct sockaddr_in *)sa;
5351 if (sa4->sin_addr.s_addr ==
5352 sin4.sin_addr.s_addr) {
5359 if (sa->sa_family == AF_INET6) {
5360 sa6 = (struct sockaddr_in6 *)sa;
5361 if (SCTP6_ARE_ADDR_EQUAL(
5370 /* New addr added! no need to look further */
5374 offset += SCTP_SIZE32(plen);
5375 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5381 * Given a MBUF chain that was sent into us containing an INIT. Build a
5382 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5383 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5384 * message (i.e. the struct sctp_init_msg).
5387 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5388 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
5389 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5391 struct sctp_association *asoc;
5392 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5393 struct sctp_init_ack_chunk *initack;
5394 struct sctp_adaptation_layer_indication *ali;
5395 struct sctp_ecn_supported_param *ecn;
5396 struct sctp_prsctp_supported_param *prsctp;
5397 struct sctp_supported_chunk_types_param *pr_supported;
5398 union sctp_sockstore store, store1, *over_addr;
5401 struct sockaddr_in *sin, *to_sin;
5405 struct sockaddr_in6 *sin6, *to_sin6;
5411 struct ip6_hdr *ip6;
5414 struct sockaddr *to;
5415 struct sctp_state_cookie stc;
5416 struct sctp_nets *net = NULL;
5417 uint8_t *signature = NULL;
5418 int cnt_inits_to = 0;
5419 uint16_t his_limit, i_want;
5420 int abort_flag, padval;
5423 int nat_friendly = 0;
5431 if ((asoc != NULL) &&
5432 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5433 (sctp_are_there_new_addresses(asoc, init_pkt, offset))) {
5434 /* new addresses, out of here in non-cookie-wait states */
5436 * Send a ABORT, we don't add the new address error clause
5437 * though we even set the T bit and copy in the 0 tag.. this
5438 * looks no different than if no listener was present.
5440 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
5444 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5445 (offset + sizeof(struct sctp_init_chunk)),
5446 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5449 sctp_send_abort(init_pkt, iphlen, sh,
5450 init_chk->init.initiate_tag, op_err, vrf_id, port);
5453 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5455 /* No memory, INIT timer will re-attempt. */
5457 sctp_m_freem(op_err);
5460 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5462 /* the time I built cookie */
5463 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5465 /* populate any tie tags */
5467 /* unlock before tag selections */
5468 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5469 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5470 stc.cookie_life = asoc->cookie_life;
5471 net = asoc->primary_destination;
5473 stc.tie_tag_my_vtag = 0;
5474 stc.tie_tag_peer_vtag = 0;
5475 /* life I will award this cookie */
5476 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5479 /* copy in the ports for later check */
5480 stc.myport = sh->dest_port;
5481 stc.peerport = sh->src_port;
5484 * If we wanted to honor cookie life extentions, we would add to
5485 * stc.cookie_life. For now we should NOT honor any extension
5487 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5488 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5489 struct inpcb *in_inp;
5491 /* Its a V6 socket */
5492 in_inp = (struct inpcb *)inp;
5493 stc.ipv6_addr_legal = 1;
5494 /* Now look at the binding flag to see if V4 will be legal */
5495 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5496 stc.ipv4_addr_legal = 1;
5498 /* V4 addresses are NOT legal on the association */
5499 stc.ipv4_addr_legal = 0;
5502 /* Its a V4 socket, no - V6 */
5503 stc.ipv4_addr_legal = 1;
5504 stc.ipv6_addr_legal = 0;
5507 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5512 /* now for scope setup */
5513 memset((caddr_t)&store, 0, sizeof(store));
5514 memset((caddr_t)&store1, 0, sizeof(store1));
5517 to_sin = &store1.sin;
5521 to_sin6 = &store1.sin6;
5523 iph = mtod(init_pkt, struct ip *);
5524 /* establish the to_addr's */
5525 switch (iph->ip_v) {
5528 to_sin->sin_port = sh->dest_port;
5529 to_sin->sin_family = AF_INET;
5530 to_sin->sin_len = sizeof(struct sockaddr_in);
5531 to_sin->sin_addr = iph->ip_dst;
5535 case IPV6_VERSION >> 4:
5536 ip6 = mtod(init_pkt, struct ip6_hdr *);
5537 to_sin6->sin6_addr = ip6->ip6_dst;
5538 to_sin6->sin6_scope_id = 0;
5539 to_sin6->sin6_port = sh->dest_port;
5540 to_sin6->sin6_family = AF_INET6;
5541 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5550 to = (struct sockaddr *)&store;
5551 switch (iph->ip_v) {
5555 sin->sin_family = AF_INET;
5556 sin->sin_len = sizeof(struct sockaddr_in);
5557 sin->sin_port = sh->src_port;
5558 sin->sin_addr = iph->ip_src;
5559 /* lookup address */
5560 stc.address[0] = sin->sin_addr.s_addr;
5564 stc.addr_type = SCTP_IPV4_ADDRESS;
5565 /* local from address */
5566 stc.laddress[0] = to_sin->sin_addr.s_addr;
5567 stc.laddress[1] = 0;
5568 stc.laddress[2] = 0;
5569 stc.laddress[3] = 0;
5570 stc.laddr_type = SCTP_IPV4_ADDRESS;
5571 /* scope_id is only for v6 */
5573 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5574 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5579 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5580 /* Must use the address in this case */
5581 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5582 stc.loopback_scope = 1;
5585 stc.local_scope = 0;
5591 case IPV6_VERSION >> 4:
5593 ip6 = mtod(init_pkt, struct ip6_hdr *);
5594 sin6->sin6_family = AF_INET6;
5595 sin6->sin6_len = sizeof(struct sockaddr_in6);
5596 sin6->sin6_port = sh->src_port;
5597 sin6->sin6_addr = ip6->ip6_src;
5598 /* lookup address */
5599 memcpy(&stc.address, &sin6->sin6_addr,
5600 sizeof(struct in6_addr));
5601 sin6->sin6_scope_id = 0;
5602 stc.addr_type = SCTP_IPV6_ADDRESS;
5604 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5606 * FIX ME: does this have scope from
5609 (void)sa6_recoverscope(sin6);
5610 stc.scope_id = sin6->sin6_scope_id;
5611 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5612 stc.loopback_scope = 1;
5613 stc.local_scope = 0;
5616 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5618 * If the new destination is a
5619 * LINK_LOCAL we must have common
5620 * both site and local scope. Don't
5621 * set local scope though since we
5622 * must depend on the source to be
5623 * added implicitly. We cannot
5624 * assure just because we share one
5625 * link that all links are common.
5627 stc.local_scope = 0;
5631 * we start counting for the private
5632 * address stuff at 1. since the
5633 * link local we source from won't
5634 * show up in our scoped count.
5638 * pull out the scope_id from
5642 * FIX ME: does this have scope from
5645 (void)sa6_recoverscope(sin6);
5646 stc.scope_id = sin6->sin6_scope_id;
5647 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5648 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5650 * If the new destination is
5651 * SITE_LOCAL then we must have site
5656 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5657 stc.laddr_type = SCTP_IPV6_ADDRESS;
5667 /* set the scope per the existing tcb */
5670 struct sctp_nets *lnet;
5674 stc.loopback_scope = asoc->loopback_scope;
5675 stc.ipv4_scope = asoc->ipv4_local_scope;
5676 stc.site_scope = asoc->site_scope;
5677 stc.local_scope = asoc->local_scope;
5679 /* Why do we not consider IPv4 LL addresses? */
5680 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5681 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5682 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5684 * if we have a LL address, start
5692 /* use the net pointer */
5693 to = (struct sockaddr *)&net->ro._l_addr;
5694 switch (to->sa_family) {
5697 sin = (struct sockaddr_in *)to;
5698 stc.address[0] = sin->sin_addr.s_addr;
5702 stc.addr_type = SCTP_IPV4_ADDRESS;
5703 if (net->src_addr_selected == 0) {
5705 * strange case here, the INIT should have
5706 * did the selection.
5708 net->ro._s_addr = sctp_source_address_selection(inp,
5709 stcb, (sctp_route_t *) & net->ro,
5711 if (net->ro._s_addr == NULL)
5714 net->src_addr_selected = 1;
5717 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5718 stc.laddress[1] = 0;
5719 stc.laddress[2] = 0;
5720 stc.laddress[3] = 0;
5721 stc.laddr_type = SCTP_IPV4_ADDRESS;
5722 /* scope_id is only for v6 */
5728 sin6 = (struct sockaddr_in6 *)to;
5729 memcpy(&stc.address, &sin6->sin6_addr,
5730 sizeof(struct in6_addr));
5731 stc.addr_type = SCTP_IPV6_ADDRESS;
5732 stc.scope_id = sin6->sin6_scope_id;
5733 if (net->src_addr_selected == 0) {
5735 * strange case here, the INIT should have
5736 * did the selection.
5738 net->ro._s_addr = sctp_source_address_selection(inp,
5739 stcb, (sctp_route_t *) & net->ro,
5741 if (net->ro._s_addr == NULL)
5744 net->src_addr_selected = 1;
5746 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5747 sizeof(struct in6_addr));
5748 stc.laddr_type = SCTP_IPV6_ADDRESS;
5753 /* Now lets put the SCTP header in place */
5754 initack = mtod(m, struct sctp_init_ack_chunk *);
5755 /* Save it off for quick ref */
5756 stc.peers_vtag = init_chk->init.initiate_tag;
5758 memcpy(stc.identification, SCTP_VERSION_STRING,
5759 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5760 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5761 /* now the chunk header */
5762 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5763 initack->ch.chunk_flags = 0;
5764 /* fill in later from mbuf we build */
5765 initack->ch.chunk_length = 0;
5766 /* place in my tag */
5767 if ((asoc != NULL) &&
5768 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5769 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5770 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5771 /* re-use the v-tags and init-seq here */
5772 initack->init.initiate_tag = htonl(asoc->my_vtag);
5773 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5775 uint32_t vtag, itsn;
5777 if (hold_inp_lock) {
5778 SCTP_INP_INCR_REF(inp);
5779 SCTP_INP_RUNLOCK(inp);
5782 atomic_add_int(&asoc->refcnt, 1);
5783 SCTP_TCB_UNLOCK(stcb);
5785 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5786 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5788 * Got a duplicate vtag on some guy behind a
5789 * nat make sure we don't use it.
5793 initack->init.initiate_tag = htonl(vtag);
5794 /* get a TSN to use too */
5795 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5796 initack->init.initial_tsn = htonl(itsn);
5797 SCTP_TCB_LOCK(stcb);
5798 atomic_add_int(&asoc->refcnt, -1);
5800 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5801 initack->init.initiate_tag = htonl(vtag);
5802 /* get a TSN to use too */
5803 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5805 if (hold_inp_lock) {
5806 SCTP_INP_RLOCK(inp);
5807 SCTP_INP_DECR_REF(inp);
5810 /* save away my tag to */
5811 stc.my_vtag = initack->init.initiate_tag;
5813 /* set up some of the credits. */
5814 so = inp->sctp_socket;
5816 /* memory problem */
5820 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5822 /* set what I want */
5823 his_limit = ntohs(init_chk->init.num_inbound_streams);
5824 /* choose what I want */
5826 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5827 i_want = asoc->streamoutcnt;
5829 i_want = inp->sctp_ep.pre_open_stream_count;
5832 i_want = inp->sctp_ep.pre_open_stream_count;
5834 if (his_limit < i_want) {
5835 /* I Want more :< */
5836 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5838 /* I can have what I want :> */
5839 initack->init.num_outbound_streams = htons(i_want);
5841 /* tell him his limt. */
5842 initack->init.num_inbound_streams =
5843 htons(inp->sctp_ep.max_open_streams_intome);
5845 /* adaptation layer indication parameter */
5846 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5847 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5848 ali->ph.param_length = htons(sizeof(*ali));
5849 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5850 SCTP_BUF_LEN(m) += sizeof(*ali);
5851 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5854 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5855 (inp->sctp_ecn_enable == 1)) {
5856 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5857 ecn->ph.param_length = htons(sizeof(*ecn));
5858 SCTP_BUF_LEN(m) += sizeof(*ecn);
5860 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5863 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5865 /* And now tell the peer we do pr-sctp */
5866 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5867 prsctp->ph.param_length = htons(sizeof(*prsctp));
5868 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5870 /* Add NAT friendly parameter */
5871 struct sctp_paramhdr *ph;
5873 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5874 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5875 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5876 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5878 /* And now tell the peer we do all the extensions */
5879 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5880 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5882 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5883 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5884 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5885 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5886 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5887 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5888 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5889 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5890 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5891 p_len = sizeof(*pr_supported) + num_ext;
5892 pr_supported->ph.param_length = htons(p_len);
5893 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5894 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5896 /* add authentication parameters */
5897 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5898 struct sctp_auth_random *randp;
5899 struct sctp_auth_hmac_algo *hmacs;
5900 struct sctp_auth_chunk_list *chunks;
5901 uint16_t random_len;
5903 /* generate and add RANDOM parameter */
5904 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5905 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5906 randp->ph.param_type = htons(SCTP_RANDOM);
5907 p_len = sizeof(*randp) + random_len;
5908 randp->ph.param_length = htons(p_len);
5909 SCTP_READ_RANDOM(randp->random_data, random_len);
5910 /* zero out any padding required */
5911 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5912 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5914 /* add HMAC_ALGO parameter */
5915 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5916 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5917 (uint8_t *) hmacs->hmac_ids);
5919 p_len += sizeof(*hmacs);
5920 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5921 hmacs->ph.param_length = htons(p_len);
5922 /* zero out any padding required */
5923 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5924 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5926 /* add CHUNKS parameter */
5927 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5928 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5929 chunks->chunk_types);
5931 p_len += sizeof(*chunks);
5932 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5933 chunks->ph.param_length = htons(p_len);
5934 /* zero out any padding required */
5935 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5936 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5940 /* now the addresses */
5942 struct sctp_scoping scp;
5945 * To optimize this we could put the scoping stuff into a
5946 * structure and remove the individual uint8's from the stc
5947 * structure. Then we could just sifa in the address within
5948 * the stc.. but for now this is a quick hack to get the
5949 * address stuff teased apart.
5951 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5952 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5953 scp.loopback_scope = stc.loopback_scope;
5954 scp.ipv4_local_scope = stc.ipv4_scope;
5955 scp.local_scope = stc.local_scope;
5956 scp.site_scope = stc.site_scope;
5957 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to);
5960 /* tack on the operational error if present */
5969 llen += SCTP_BUF_LEN(ol);
5970 ol = SCTP_BUF_NEXT(ol);
5973 /* must add a pad to the param */
5974 uint32_t cpthis = 0;
5977 padlen = 4 - (llen % 4);
5978 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5980 while (SCTP_BUF_NEXT(m_at) != NULL) {
5981 m_at = SCTP_BUF_NEXT(m_at);
5983 SCTP_BUF_NEXT(m_at) = op_err;
5984 while (SCTP_BUF_NEXT(m_at) != NULL) {
5985 m_at = SCTP_BUF_NEXT(m_at);
5988 /* pre-calulate the size and update pkt header and chunk header */
5990 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5991 p_len += SCTP_BUF_LEN(m_tmp);
5992 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5993 /* m_tmp should now point to last one */
5998 /* Now we must build a cookie */
5999 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6000 if (m_cookie == NULL) {
6001 /* memory problem */
6005 /* Now append the cookie to the end and update the space/size */
6006 SCTP_BUF_NEXT(m_tmp) = m_cookie;
6008 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6009 p_len += SCTP_BUF_LEN(m_tmp);
6010 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6011 /* m_tmp should now point to last one */
6017 * Place in the size, but we don't include the last pad (if any) in
6020 initack->ch.chunk_length = htons(p_len);
6023 * Time to sign the cookie, we don't sign over the cookie signature
6024 * though thus we set trailer.
6026 (void)sctp_hmac_m(SCTP_HMAC,
6027 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6028 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6029 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6031 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6032 * here since the timer will drive a retranmission.
6035 if ((padval) && (mp_last)) {
6036 /* see my previous comments on mp_last */
6037 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
6038 /* Houston we have a problem, no space */
6043 if (stc.loopback_scope) {
6044 over_addr = &store1;
6049 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6051 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6052 port, SCTP_SO_NOT_LOCKED, over_addr, init_pkt);
6053 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6058 sctp_prune_prsctp(struct sctp_tcb *stcb,
6059 struct sctp_association *asoc,
6060 struct sctp_sndrcvinfo *srcv,
6064 struct sctp_tmit_chunk *chk, *nchk;
6066 SCTP_TCB_LOCK_ASSERT(stcb);
6067 if ((asoc->peer_supports_prsctp) &&
6068 (asoc->sent_queue_cnt_removeable > 0)) {
6069 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6071 * Look for chunks marked with the PR_SCTP flag AND
6072 * the buffer space flag. If the one being sent is
6073 * equal or greater priority then purge the old one
6074 * and free some space.
6076 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6078 * This one is PR-SCTP AND buffer space
6081 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6083 * Lower numbers equates to higher
6084 * priority so if the one we are
6085 * looking at has a larger or equal
6086 * priority we want to drop the data
6087 * and NOT retransmit it.
6091 * We release the book_size
6092 * if the mbuf is here
6097 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6098 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
6100 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
6101 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6104 freed_spc += ret_spc;
6105 if (freed_spc >= dataout) {
6108 } /* if chunk was present */
6109 } /* if of sufficent priority */
6110 } /* if chunk has enabled */
6111 } /* tailqforeach */
6113 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6114 /* Here we must move to the sent queue and mark */
6115 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6116 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6119 * We release the book_size
6120 * if the mbuf is here
6124 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6125 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
6128 freed_spc += ret_spc;
6129 if (freed_spc >= dataout) {
6132 } /* end if chk->data */
6133 } /* end if right class */
6134 } /* end if chk pr-sctp */
6135 } /* tailqforeachsafe (chk) */
6136 } /* if enabled in asoc */
6140 sctp_get_frag_point(struct sctp_tcb *stcb,
6141 struct sctp_association *asoc)
6146 * For endpoints that have both v6 and v4 addresses we must reserve
6147 * room for the ipv6 header, for those that are only dealing with V4
6148 * we use a larger frag point.
6150 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6151 ovh = SCTP_MED_OVERHEAD;
6153 ovh = SCTP_MED_V4_OVERHEAD;
6156 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6157 siz = asoc->smallest_mtu - ovh;
6159 siz = (stcb->asoc.sctp_frag_point - ovh);
6161 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6163 /* A data chunk MUST fit in a cluster */
6164 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6167 /* adjust for an AUTH chunk if DATA requires auth */
6168 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6169 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6172 /* make it an even word boundary please */
6179 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6183 * We assume that the user wants PR_SCTP_TTL if the user provides a
6184 * positive lifetime but does not specify any PR_SCTP policy. This
6185 * is a BAD assumption and causes problems at least with the
6186 * U-Vancovers MPI folks. I will change this to be no policy means
6189 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6190 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6195 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6196 case CHUNK_FLAGS_PR_SCTP_BUF:
6198 * Time to live is a priority stored in tv_sec when doing
6199 * the buffer drop thing.
6201 sp->ts.tv_sec = sp->timetolive;
6204 case CHUNK_FLAGS_PR_SCTP_TTL:
6208 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6209 tv.tv_sec = sp->timetolive / 1000;
6210 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6212 * TODO sctp_constants.h needs alternative time
6213 * macros when _KERNEL is undefined.
6215 timevaladd(&sp->ts, &tv);
6218 case CHUNK_FLAGS_PR_SCTP_RTX:
6220 * Time to live is a the number or retransmissions stored in
6223 sp->ts.tv_sec = sp->timetolive;
6227 SCTPDBG(SCTP_DEBUG_USRREQ1,
6228 "Unknown PR_SCTP policy %u.\n",
6229 PR_SCTP_POLICY(sp->sinfo_flags));
6235 sctp_msg_append(struct sctp_tcb *stcb,
6236 struct sctp_nets *net,
6238 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6242 struct sctp_stream_queue_pending *sp = NULL;
6243 struct sctp_stream_out *strm;
6246 * Given an mbuf chain, put it into the association send queue and
6247 * place it on the wheel
6249 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6250 /* Invalid stream number */
6251 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6255 if ((stcb->asoc.stream_locked) &&
6256 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6257 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6261 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6262 /* Now can we send this? */
6263 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6264 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6265 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6266 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6267 /* got data while shutting down */
6268 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6272 sctp_alloc_a_strmoq(stcb, sp);
6274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6278 sp->sinfo_flags = srcv->sinfo_flags;
6279 sp->timetolive = srcv->sinfo_timetolive;
6280 sp->ppid = srcv->sinfo_ppid;
6281 sp->context = srcv->sinfo_context;
6283 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6285 atomic_add_int(&sp->net->ref_count, 1);
6289 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6290 sp->stream = srcv->sinfo_stream;
6291 sp->msg_is_complete = 1;
6292 sp->sender_all_done = 1;
6295 sp->tail_mbuf = NULL;
6296 sctp_set_prsctp_policy(sp);
6298 * We could in theory (for sendall) sifa the length in, but we would
6299 * still have to hunt through the chain since we need to setup the
6303 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6304 if (SCTP_BUF_NEXT(at) == NULL)
6306 sp->length += SCTP_BUF_LEN(at);
6308 if (srcv->sinfo_keynumber_valid) {
6309 sp->auth_keyid = srcv->sinfo_keynumber;
6311 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6313 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6314 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6315 sp->holds_key_ref = 1;
6317 if (hold_stcb_lock == 0) {
6318 SCTP_TCB_SEND_LOCK(stcb);
6320 sctp_snd_sb_alloc(stcb, sp->length);
6321 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6322 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6323 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
6324 sp->strseq = strm->next_sequence_sent;
6325 strm->next_sequence_sent++;
6327 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6329 if (hold_stcb_lock == 0) {
6330 SCTP_TCB_SEND_UNLOCK(stcb);
6340 static struct mbuf *
6341 sctp_copy_mbufchain(struct mbuf *clonechain,
6342 struct mbuf *outchain,
6343 struct mbuf **endofchain,
6346 uint8_t copy_by_ref)
6349 struct mbuf *appendchain;
6353 if (endofchain == NULL) {
6357 sctp_m_freem(outchain);
6360 if (can_take_mbuf) {
6361 appendchain = clonechain;
6364 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6366 /* Its not in a cluster */
6367 if (*endofchain == NULL) {
6368 /* lets get a mbuf cluster */
6369 if (outchain == NULL) {
6370 /* This is the general case */
6372 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6373 if (outchain == NULL) {
6376 SCTP_BUF_LEN(outchain) = 0;
6377 *endofchain = outchain;
6378 /* get the prepend space */
6379 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6382 * We really should not get a NULL
6388 if (SCTP_BUF_NEXT(m) == NULL) {
6392 m = SCTP_BUF_NEXT(m);
6395 if (*endofchain == NULL) {
6397 * huh, TSNH XXX maybe we
6400 sctp_m_freem(outchain);
6404 /* get the new end of length */
6405 len = M_TRAILINGSPACE(*endofchain);
6407 /* how much is left at the end? */
6408 len = M_TRAILINGSPACE(*endofchain);
6410 /* Find the end of the data, for appending */
6411 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6413 /* Now lets copy it out */
6414 if (len >= sizeofcpy) {
6415 /* It all fits, copy it in */
6416 m_copydata(clonechain, 0, sizeofcpy, cp);
6417 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6419 /* fill up the end of the chain */
6421 m_copydata(clonechain, 0, len, cp);
6422 SCTP_BUF_LEN((*endofchain)) += len;
6423 /* now we need another one */
6426 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6431 SCTP_BUF_NEXT((*endofchain)) = m;
6433 cp = mtod((*endofchain), caddr_t);
6434 m_copydata(clonechain, len, sizeofcpy, cp);
6435 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6439 /* copy the old fashion way */
6440 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6441 #ifdef SCTP_MBUF_LOGGING
6442 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6445 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6446 if (SCTP_BUF_IS_EXTENDED(mat)) {
6447 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6454 if (appendchain == NULL) {
6457 sctp_m_freem(outchain);
6461 /* tack on to the end */
6462 if (*endofchain != NULL) {
6463 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6467 if (SCTP_BUF_NEXT(m) == NULL) {
6468 SCTP_BUF_NEXT(m) = appendchain;
6471 m = SCTP_BUF_NEXT(m);
6475 * save off the end and update the end-chain postion
6479 if (SCTP_BUF_NEXT(m) == NULL) {
6483 m = SCTP_BUF_NEXT(m);
6487 /* save off the end and update the end-chain postion */
6490 if (SCTP_BUF_NEXT(m) == NULL) {
6494 m = SCTP_BUF_NEXT(m);
6496 return (appendchain);
6501 sctp_med_chunk_output(struct sctp_inpcb *inp,
6502 struct sctp_tcb *stcb,
6503 struct sctp_association *asoc,
6506 int control_only, int from_where,
6507 struct timeval *now, int *now_filled, int frag_point, int so_locked
6508 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6514 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6515 uint32_t val SCTP_UNUSED)
6517 struct sctp_copy_all *ca;
6520 int added_control = 0;
6521 int un_sent, do_chunk_output = 1;
6522 struct sctp_association *asoc;
6523 struct sctp_nets *net;
6525 ca = (struct sctp_copy_all *)ptr;
6526 if (ca->m == NULL) {
6529 if (ca->inp != inp) {
6533 if ((ca->m) && ca->sndlen) {
6534 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6536 /* can't copy so we are done */
6540 #ifdef SCTP_MBUF_LOGGING
6541 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6544 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6545 if (SCTP_BUF_IS_EXTENDED(mat)) {
6546 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6554 SCTP_TCB_LOCK_ASSERT(stcb);
6555 if (stcb->asoc.alternate) {
6556 net = stcb->asoc.alternate;
6558 net = stcb->asoc.primary_destination;
6560 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6561 /* Abort this assoc with m as the user defined reason */
6563 struct sctp_paramhdr *ph;
6565 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6567 ph = mtod(m, struct sctp_paramhdr *);
6568 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6569 ph->param_length = htons(ca->sndlen);
6572 * We add one here to keep the assoc from
6573 * dis-appearing on us.
6575 atomic_add_int(&stcb->asoc.refcnt, 1);
6576 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6578 * sctp_abort_an_association calls sctp_free_asoc()
6579 * free association will NOT free it since we
6580 * incremented the refcnt .. we do this to prevent
6581 * it being freed and things getting tricky since we
6582 * could end up (from free_asoc) calling inpcb_free
6583 * which would get a recursive lock call to the
6584 * iterator lock.. But as a consequence of that the
6585 * stcb will return to us un-locked.. since
6586 * free_asoc returns with either no TCB or the TCB
6587 * unlocked, we must relock.. to unlock in the
6588 * iterator timer :-0
6590 SCTP_TCB_LOCK(stcb);
6591 atomic_add_int(&stcb->asoc.refcnt, -1);
6592 goto no_chunk_output;
6596 ret = sctp_msg_append(stcb, net, m,
6600 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6601 /* shutdown this assoc */
6604 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6606 if (TAILQ_EMPTY(&asoc->send_queue) &&
6607 TAILQ_EMPTY(&asoc->sent_queue) &&
6609 if (asoc->locked_on_sending) {
6613 * there is nothing queued to send, so I'm
6616 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6617 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6618 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6620 * only send SHUTDOWN the first time
6623 sctp_send_shutdown(stcb, net);
6624 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6625 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6627 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6628 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6629 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6631 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6632 asoc->primary_destination);
6634 do_chunk_output = 0;
6638 * we still got (or just got) data to send,
6639 * so set SHUTDOWN_PENDING
6642 * XXX sockets draft says that SCTP_EOF
6643 * should be sent with no data. currently,
6644 * we will allow user data to be sent first
6645 * and move to SHUTDOWN-PENDING
6647 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6648 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6649 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6650 if (asoc->locked_on_sending) {
6652 * Locked to send out the
6655 struct sctp_stream_queue_pending *sp;
6657 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6659 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6660 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6663 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6664 if (TAILQ_EMPTY(&asoc->send_queue) &&
6665 TAILQ_EMPTY(&asoc->sent_queue) &&
6666 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6668 atomic_add_int(&stcb->asoc.refcnt, 1);
6669 sctp_abort_an_association(stcb->sctp_ep, stcb,
6670 NULL, SCTP_SO_NOT_LOCKED);
6671 atomic_add_int(&stcb->asoc.refcnt, -1);
6672 goto no_chunk_output;
6674 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6675 asoc->primary_destination);
6681 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6682 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6684 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6685 (stcb->asoc.total_flight > 0) &&
6686 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6688 do_chunk_output = 0;
6690 if (do_chunk_output)
6691 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6692 else if (added_control) {
6693 int num_out = 0, reason = 0, now_filled = 0;
6697 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6698 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6699 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6710 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6712 struct sctp_copy_all *ca;
6714 ca = (struct sctp_copy_all *)ptr;
6716 * Do a notify here? Kacheong suggests that the notify be done at
6717 * the send time.. so you would push up a notification if any send
6718 * failed. Don't know if this is feasable since the only failures we
6719 * have is "memory" related and if you cannot get an mbuf to send
6720 * the data you surely can't get an mbuf to send up to notify the
6721 * user you can't send the data :->
6724 /* now free everything */
6725 sctp_m_freem(ca->m);
6726 SCTP_FREE(ca, SCTP_M_COPYAL);
6730 #define MC_ALIGN(m, len) do { \
6731 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6736 static struct mbuf *
6737 sctp_copy_out_all(struct uio *uio, int len)
6739 struct mbuf *ret, *at;
6740 int left, willcpy, cancpy, error;
6742 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6748 SCTP_BUF_LEN(ret) = 0;
6749 /* save space for the data chunk header */
6750 cancpy = M_TRAILINGSPACE(ret);
6751 willcpy = min(cancpy, left);
6754 /* Align data to the end */
6755 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6761 SCTP_BUF_LEN(at) = willcpy;
6762 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6765 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6766 if (SCTP_BUF_NEXT(at) == NULL) {
6769 at = SCTP_BUF_NEXT(at);
6770 SCTP_BUF_LEN(at) = 0;
6771 cancpy = M_TRAILINGSPACE(at);
6772 willcpy = min(cancpy, left);
6779 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6780 struct sctp_sndrcvinfo *srcv)
6783 struct sctp_copy_all *ca;
6785 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6789 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6792 memset(ca, 0, sizeof(struct sctp_copy_all));
6796 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6799 * take off the sendall flag, it would be bad if we failed to do
6802 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6803 /* get length and mbuf chain */
6805 ca->sndlen = uio->uio_resid;
6806 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6807 if (ca->m == NULL) {
6808 SCTP_FREE(ca, SCTP_M_COPYAL);
6809 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6813 /* Gather the length of the send */
6819 ca->sndlen += SCTP_BUF_LEN(m);
6820 m = SCTP_BUF_NEXT(m);
6824 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6825 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6826 SCTP_ASOC_ANY_STATE,
6828 sctp_sendall_completes, inp, 1);
6830 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6831 SCTP_FREE(ca, SCTP_M_COPYAL);
6832 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6840 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6842 struct sctp_tmit_chunk *chk, *nchk;
6844 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6845 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6846 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6848 sctp_m_freem(chk->data);
6851 asoc->ctrl_queue_cnt--;
6852 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6858 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6860 struct sctp_association *asoc;
6861 struct sctp_tmit_chunk *chk, *nchk;
6862 struct sctp_asconf_chunk *acp;
6865 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6866 /* find SCTP_ASCONF chunk in queue */
6867 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6869 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6870 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6875 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6877 sctp_m_freem(chk->data);
6880 asoc->ctrl_queue_cnt--;
6881 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6888 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6889 struct sctp_association *asoc,
6890 struct sctp_tmit_chunk **data_list,
6892 struct sctp_nets *net)
6895 struct sctp_tmit_chunk *tp1;
6897 for (i = 0; i < bundle_at; i++) {
6898 /* off of the send queue */
6899 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6900 asoc->send_queue_cnt--;
6903 * Any chunk NOT 0 you zap the time chunk 0 gets
6904 * zapped or set based on if a RTO measurment is
6907 data_list[i]->do_rtt = 0;
6910 data_list[i]->sent_rcv_time = net->last_sent_time;
6911 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6912 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6913 if (data_list[i]->whoTo == NULL) {
6914 data_list[i]->whoTo = net;
6915 atomic_add_int(&net->ref_count, 1);
6917 /* on to the sent queue */
6918 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6919 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6920 struct sctp_tmit_chunk *tpp;
6922 /* need to move back */
6924 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6926 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6930 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6933 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6935 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6940 /* This does not lower until the cum-ack passes it */
6941 asoc->sent_queue_cnt++;
6942 if ((asoc->peers_rwnd <= 0) &&
6943 (asoc->total_flight == 0) &&
6945 /* Mark the chunk as being a window probe */
6946 SCTP_STAT_INCR(sctps_windowprobed);
6948 #ifdef SCTP_AUDITING_ENABLED
6949 sctp_audit_log(0xC2, 3);
6951 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6952 data_list[i]->snd_count = 1;
6953 data_list[i]->rec.data.chunk_was_revoked = 0;
6954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6955 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6956 data_list[i]->whoTo->flight_size,
6957 data_list[i]->book_size,
6958 (uintptr_t) data_list[i]->whoTo,
6959 data_list[i]->rec.data.TSN_seq);
6961 sctp_flight_size_increase(data_list[i]);
6962 sctp_total_flight_increase(stcb, data_list[i]);
6963 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6964 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6965 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6967 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6968 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6969 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6970 /* SWS sender side engages */
6971 asoc->peers_rwnd = 0;
6974 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6975 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6980 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6981 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6986 struct sctp_tmit_chunk *chk, *nchk;
6988 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6989 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6990 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6991 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6992 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6993 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6994 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6995 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6996 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6997 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6998 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6999 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7000 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7001 /* Stray chunks must be cleaned up */
7003 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7005 sctp_m_freem(chk->data);
7008 asoc->ctrl_queue_cnt--;
7009 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7010 asoc->fwd_tsn_cnt--;
7011 sctp_free_a_chunk(stcb, chk, so_locked);
7012 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7013 /* special handling, we must look into the param */
7014 if (chk != asoc->str_reset) {
7015 goto clean_up_anyway;
7023 sctp_can_we_split_this(struct sctp_tcb *stcb,
7025 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7028 * Make a decision on if I should split a msg into multiple parts.
7029 * This is only asked of incomplete messages.
7033 * If we are doing EEOR we need to always send it if its the
7034 * entire thing, since it might be all the guy is putting in
7037 if (goal_mtu >= length) {
7039 * If we have data outstanding,
7040 * we get another chance when the sack
7041 * arrives to transmit - wait for more data
7043 if (stcb->asoc.total_flight == 0) {
7045 * If nothing is in flight, we zero the
7053 /* You can fill the rest */
7058 * For those strange folk that make the send buffer
7059 * smaller than our fragmentation point, we can't
7060 * get a full msg in so we have to allow splitting.
7062 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7065 if ((length <= goal_mtu) ||
7066 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7067 /* Sub-optimial residual don't split in non-eeor mode. */
7071 * If we reach here length is larger than the goal_mtu. Do we wish
7072 * to split it for the sake of packet putting together?
7074 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7075 /* Its ok to split it */
7076 return (min(goal_mtu, frag_point));
7078 /* Nope, can't split */
7084 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7085 struct sctp_stream_out *strq,
7087 uint32_t frag_point,
7093 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7098 /* Move from the stream to the send_queue keeping track of the total */
7099 struct sctp_association *asoc;
7100 struct sctp_stream_queue_pending *sp;
7101 struct sctp_tmit_chunk *chk;
7102 struct sctp_data_chunk *dchkh;
7103 uint32_t to_move, length;
7104 uint8_t rcv_flags = 0;
7106 uint8_t send_lock_up = 0;
7108 SCTP_TCB_LOCK_ASSERT(stcb);
7111 /* sa_ignore FREED_MEMORY */
7112 sp = TAILQ_FIRST(&strq->outqueue);
7115 if (send_lock_up == 0) {
7116 SCTP_TCB_SEND_LOCK(stcb);
7119 sp = TAILQ_FIRST(&strq->outqueue);
7123 if (strq->last_msg_incomplete) {
7124 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7126 strq->last_msg_incomplete);
7127 strq->last_msg_incomplete = 0;
7131 SCTP_TCB_SEND_UNLOCK(stcb);
7136 if ((sp->msg_is_complete) && (sp->length == 0)) {
7137 if (sp->sender_all_done) {
7139 * We are doing differed cleanup. Last time through
7140 * when we took all the data the sender_all_done was
7143 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7144 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7145 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7146 sp->sender_all_done,
7148 sp->msg_is_complete,
7152 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7153 SCTP_TCB_SEND_LOCK(stcb);
7156 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7157 TAILQ_REMOVE(&strq->outqueue, sp, next);
7158 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7160 sctp_free_remote_addr(sp->net);
7164 sctp_m_freem(sp->data);
7167 sctp_free_a_strmoq(stcb, sp, so_locked);
7168 /* we can't be locked to it */
7170 stcb->asoc.locked_on_sending = NULL;
7172 SCTP_TCB_SEND_UNLOCK(stcb);
7175 /* back to get the next msg */
7179 * sender just finished this but still holds a
7188 /* is there some to get */
7189 if (sp->length == 0) {
7195 } else if (sp->discard_rest) {
7196 if (send_lock_up == 0) {
7197 SCTP_TCB_SEND_LOCK(stcb);
7200 /* Whack down the size */
7201 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7202 if ((stcb->sctp_socket != NULL) && \
7203 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7204 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7205 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7208 sctp_m_freem(sp->data);
7210 sp->tail_mbuf = NULL;
7220 some_taken = sp->some_taken;
7221 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7222 sp->msg_is_complete = 1;
7225 length = sp->length;
7226 if (sp->msg_is_complete) {
7227 /* The message is complete */
7228 to_move = min(length, frag_point);
7229 if (to_move == length) {
7230 /* All of it fits in the MTU */
7231 if (sp->some_taken) {
7232 rcv_flags |= SCTP_DATA_LAST_FRAG;
7233 sp->put_last_out = 1;
7235 rcv_flags |= SCTP_DATA_NOT_FRAG;
7236 sp->put_last_out = 1;
7239 /* Not all of it fits, we fragment */
7240 if (sp->some_taken == 0) {
7241 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7246 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7249 * We use a snapshot of length in case it
7250 * is expanding during the compare.
7255 if (to_move >= llen) {
7257 if (send_lock_up == 0) {
7259 * We are taking all of an incomplete msg
7260 * thus we need a send lock.
7262 SCTP_TCB_SEND_LOCK(stcb);
7264 if (sp->msg_is_complete) {
7266 * the sender finished the
7273 if (sp->some_taken == 0) {
7274 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7278 /* Nothing to take. */
7279 if (sp->some_taken) {
7288 /* If we reach here, we can copy out a chunk */
7289 sctp_alloc_a_chunk(stcb, chk);
7291 /* No chunk memory */
7297 * Setup for unordered if needed by looking at the user sent info
7300 if (sp->sinfo_flags & SCTP_UNORDERED) {
7301 rcv_flags |= SCTP_DATA_UNORDERED;
7303 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7304 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7305 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7307 /* clear out the chunk before setting up */
7308 memset(chk, 0, sizeof(*chk));
7309 chk->rec.data.rcv_flags = rcv_flags;
7311 if (to_move >= length) {
7312 /* we think we can steal the whole thing */
7313 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7314 SCTP_TCB_SEND_LOCK(stcb);
7317 if (to_move < sp->length) {
7318 /* bail, it changed */
7321 chk->data = sp->data;
7322 chk->last_mbuf = sp->tail_mbuf;
7323 /* register the stealing */
7324 sp->data = sp->tail_mbuf = NULL;
7329 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
7330 chk->last_mbuf = NULL;
7331 if (chk->data == NULL) {
7332 sp->some_taken = some_taken;
7333 sctp_free_a_chunk(stcb, chk, so_locked);
7338 #ifdef SCTP_MBUF_LOGGING
7339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7342 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7343 if (SCTP_BUF_IS_EXTENDED(mat)) {
7344 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7349 /* Pull off the data */
7350 m_adj(sp->data, to_move);
7351 /* Now lets work our way down and compact it */
7353 while (m && (SCTP_BUF_LEN(m) == 0)) {
7354 sp->data = SCTP_BUF_NEXT(m);
7355 SCTP_BUF_NEXT(m) = NULL;
7356 if (sp->tail_mbuf == m) {
7358 * Freeing tail? TSNH since
7359 * we supposedly were taking less
7360 * than the sp->length.
7363 panic("Huh, freing tail? - TSNH");
7365 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7366 sp->tail_mbuf = sp->data = NULL;
7375 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7376 chk->copy_by_ref = 1;
7378 chk->copy_by_ref = 0;
7381 * get last_mbuf and counts of mb useage This is ugly but hopefully
7382 * its only one mbuf.
7384 if (chk->last_mbuf == NULL) {
7385 chk->last_mbuf = chk->data;
7386 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7387 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7390 if (to_move > length) {
7391 /*- This should not happen either
7392 * since we always lower to_move to the size
7393 * of sp->length if its larger.
7396 panic("Huh, how can to_move be larger?");
7398 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7402 atomic_subtract_int(&sp->length, to_move);
7404 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7405 /* Not enough room for a chunk header, get some */
7408 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7411 * we're in trouble here. _PREPEND below will free
7412 * all the data if there is no leading space, so we
7413 * must put the data back and restore.
7415 if (send_lock_up == 0) {
7416 SCTP_TCB_SEND_LOCK(stcb);
7419 if (chk->data == NULL) {
7420 /* unsteal the data */
7421 sp->data = chk->data;
7422 sp->tail_mbuf = chk->last_mbuf;
7426 /* reassemble the data */
7428 sp->data = chk->data;
7429 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7431 sp->some_taken = some_taken;
7432 atomic_add_int(&sp->length, to_move);
7435 sctp_free_a_chunk(stcb, chk, so_locked);
7439 SCTP_BUF_LEN(m) = 0;
7440 SCTP_BUF_NEXT(m) = chk->data;
7442 M_ALIGN(chk->data, 4);
7445 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7446 if (chk->data == NULL) {
7447 /* HELP, TSNH since we assured it would not above? */
7449 panic("prepend failes HELP?");
7451 SCTP_PRINTF("prepend fails HELP?\n");
7452 sctp_free_a_chunk(stcb, chk, so_locked);
7458 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7459 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7460 chk->book_size_scale = 0;
7461 chk->sent = SCTP_DATAGRAM_UNSENT;
7464 chk->asoc = &stcb->asoc;
7465 chk->pad_inplace = 0;
7466 chk->no_fr_allowed = 0;
7467 chk->rec.data.stream_seq = sp->strseq;
7468 chk->rec.data.stream_number = sp->stream;
7469 chk->rec.data.payloadtype = sp->ppid;
7470 chk->rec.data.context = sp->context;
7471 chk->rec.data.doing_fast_retransmit = 0;
7473 chk->rec.data.timetodrop = sp->ts;
7474 chk->flags = sp->act_flags;
7477 chk->whoTo = sp->net;
7478 atomic_add_int(&chk->whoTo->ref_count, 1);
7482 if (sp->holds_key_ref) {
7483 chk->auth_keyid = sp->auth_keyid;
7484 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7485 chk->holds_key_ref = 1;
7487 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7489 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7490 (uintptr_t) stcb, sp->length,
7491 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7492 chk->rec.data.TSN_seq);
7494 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7496 * Put the rest of the things in place now. Size was done earlier in
7497 * previous loop prior to padding.
7500 #ifdef SCTP_ASOCLOG_OF_TSNS
7501 SCTP_TCB_LOCK_ASSERT(stcb);
7502 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7503 asoc->tsn_out_at = 0;
7504 asoc->tsn_out_wrapped = 1;
7506 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7507 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7508 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7509 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7510 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7511 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7512 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7513 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7517 dchkh->ch.chunk_type = SCTP_DATA;
7518 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7519 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7520 dchkh->dp.stream_id = htons(strq->stream_no);
7521 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7522 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7523 dchkh->ch.chunk_length = htons(chk->send_size);
7524 /* Now advance the chk->send_size by the actual pad needed. */
7525 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7530 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7531 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7532 chk->pad_inplace = 1;
7534 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7535 /* pad added an mbuf */
7536 chk->last_mbuf = lm;
7538 chk->send_size += pads;
7540 /* We only re-set the policy if it is on */
7541 if (sp->pr_sctp_on) {
7542 sctp_set_prsctp_policy(sp);
7543 asoc->pr_sctp_cnt++;
7544 chk->pr_sctp_on = 1;
7546 chk->pr_sctp_on = 0;
7548 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7549 /* All done pull and kill the message */
7550 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7551 if (sp->put_last_out == 0) {
7552 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7553 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7554 sp->sender_all_done,
7556 sp->msg_is_complete,
7560 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7561 SCTP_TCB_SEND_LOCK(stcb);
7564 TAILQ_REMOVE(&strq->outqueue, sp, next);
7565 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7567 sctp_free_remote_addr(sp->net);
7571 sctp_m_freem(sp->data);
7574 sctp_free_a_strmoq(stcb, sp, so_locked);
7576 /* we can't be locked to it */
7578 stcb->asoc.locked_on_sending = NULL;
7580 /* more to go, we are locked */
7583 asoc->chunks_on_out_queue++;
7584 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7585 asoc->send_queue_cnt++;
7588 SCTP_TCB_SEND_UNLOCK(stcb);
7595 sctp_fill_outqueue(struct sctp_tcb *stcb,
7596 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7597 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7602 struct sctp_association *asoc;
7603 struct sctp_stream_out *strq;
7604 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7607 SCTP_TCB_LOCK_ASSERT(stcb);
7609 switch (net->ro._l_addr.sa.sa_family) {
7612 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7617 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7622 goal_mtu = net->mtu;
7625 /* Need an allowance for the data chunk header too */
7626 goal_mtu -= sizeof(struct sctp_data_chunk);
7628 /* must make even word boundary */
7629 goal_mtu &= 0xfffffffc;
7630 if (asoc->locked_on_sending) {
7631 /* We are stuck on one stream until the message completes. */
7632 strq = asoc->locked_on_sending;
7635 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7638 while ((goal_mtu > 0) && strq) {
7641 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7642 &giveup, eeor_mode, &bail, so_locked);
7644 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7647 asoc->locked_on_sending = strq;
7648 if ((moved_how_much == 0) || (giveup) || bail)
7649 /* no more to move for now */
7652 asoc->locked_on_sending = NULL;
7653 if ((giveup) || bail) {
7656 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7661 total_moved += moved_how_much;
7662 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7663 goal_mtu &= 0xfffffffc;
7668 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7670 if (total_moved == 0) {
7671 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7672 (net == stcb->asoc.primary_destination)) {
7673 /* ran dry for primary network net */
7674 SCTP_STAT_INCR(sctps_primary_randry);
7675 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7676 /* ran dry with CMT on */
7677 SCTP_STAT_INCR(sctps_cmt_randry);
7683 sctp_fix_ecn_echo(struct sctp_association *asoc)
7685 struct sctp_tmit_chunk *chk;
7687 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7688 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7689 chk->sent = SCTP_DATAGRAM_UNSENT;
7695 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7697 struct sctp_association *asoc;
7698 struct sctp_tmit_chunk *chk;
7699 struct sctp_stream_queue_pending *sp;
7706 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7707 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7708 if (sp->net == net) {
7709 sctp_free_remote_addr(sp->net);
7714 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7715 if (chk->whoTo == net) {
7716 sctp_free_remote_addr(chk->whoTo);
7723 sctp_med_chunk_output(struct sctp_inpcb *inp,
7724 struct sctp_tcb *stcb,
7725 struct sctp_association *asoc,
7728 int control_only, int from_where,
7729 struct timeval *now, int *now_filled, int frag_point, int so_locked
7730 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7736 * Ok this is the generic chunk service queue. we must do the
7737 * following: - Service the stream queue that is next, moving any
7738 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7739 * LAST to the out queue in one pass) and assigning TSN's - Check to
7740 * see if the cwnd/rwnd allows any output, if so we go ahead and
7741 * fomulate and send the low level chunks. Making sure to combine
7742 * any control in the control chunk queue also.
7744 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7745 struct mbuf *outchain, *endoutchain;
7746 struct sctp_tmit_chunk *chk, *nchk;
7748 /* temp arrays for unlinking */
7749 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7750 int no_fragmentflg, error;
7751 unsigned int max_rwnd_per_dest, max_send_per_dest;
7752 int one_chunk, hbflag, skip_data_for_this_net;
7753 int asconf, cookie, no_out_cnt;
7754 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7755 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7757 uint32_t auth_offset = 0;
7758 struct sctp_auth_chunk *auth = NULL;
7759 uint16_t auth_keyid;
7760 int override_ok = 1;
7761 int skip_fill_up = 0;
7762 int data_auth_reqd = 0;
7765 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7771 auth_keyid = stcb->asoc.authinfo.active_keyid;
7773 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7774 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7775 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7780 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7782 * First lets prime the pump. For each destination, if there is room
7783 * in the flight size, attempt to pull an MTU's worth out of the
7784 * stream queues into the general send_queue
7786 #ifdef SCTP_AUDITING_ENABLED
7787 sctp_audit_log(0xC2, 2);
7789 SCTP_TCB_LOCK_ASSERT(stcb);
7791 if ((control_only) || (asoc->stream_reset_outstanding))
7796 /* Nothing to possible to send? */
7797 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7798 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7799 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7800 TAILQ_EMPTY(&asoc->send_queue) &&
7801 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7806 if (asoc->peers_rwnd == 0) {
7807 /* No room in peers rwnd */
7809 if (asoc->total_flight > 0) {
7810 /* we are allowed one chunk in flight */
7814 if (stcb->asoc.ecn_echo_cnt_onq) {
7815 /* Record where a sack goes, if any */
7816 if (no_data_chunks &&
7817 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7818 /* Nothing but ECNe to send - we don't do that */
7819 goto nothing_to_send;
7821 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7822 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7823 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7824 sack_goes_to = chk->whoTo;
7829 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7830 if (stcb->sctp_socket)
7831 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7833 max_send_per_dest = 0;
7834 if (no_data_chunks == 0) {
7835 /* How many non-directed chunks are there? */
7836 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7837 if (chk->whoTo == NULL) {
7839 * We already have non-directed chunks on
7840 * the queue, no need to do a fill-up.
7848 if ((no_data_chunks == 0) &&
7849 (skip_fill_up == 0) &&
7850 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7851 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7853 * This for loop we are in takes in each net, if
7854 * its's got space in cwnd and has data sent to it
7855 * (when CMT is off) then it calls
7856 * sctp_fill_outqueue for the net. This gets data on
7857 * the send queue for that network.
7859 * In sctp_fill_outqueue TSN's are assigned and data is
7860 * copied out of the stream buffers. Note mostly
7861 * copy by reference (we hope).
7863 net->window_probe = 0;
7864 if ((net != stcb->asoc.alternate) &&
7865 ((net->dest_state & SCTP_ADDR_PF) ||
7866 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7867 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7869 sctp_log_cwnd(stcb, net, 1,
7870 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7874 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7875 (net->flight_size == 0)) {
7876 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7878 if (net->flight_size >= net->cwnd) {
7879 /* skip this network, no room - can't fill */
7880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7881 sctp_log_cwnd(stcb, net, 3,
7882 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7887 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7889 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7891 /* memory alloc failure */
7897 /* now service each destination and send out what we can for it */
7898 /* Nothing to send? */
7899 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7900 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7901 TAILQ_EMPTY(&asoc->send_queue)) {
7905 if (asoc->sctp_cmt_on_off > 0) {
7906 /* get the last start point */
7907 start_at = asoc->last_net_cmt_send_started;
7908 if (start_at == NULL) {
7909 /* null so to beginning */
7910 start_at = TAILQ_FIRST(&asoc->nets);
7912 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7913 if (start_at == NULL) {
7914 start_at = TAILQ_FIRST(&asoc->nets);
7917 asoc->last_net_cmt_send_started = start_at;
7919 start_at = TAILQ_FIRST(&asoc->nets);
7921 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7922 if (chk->whoTo == NULL) {
7923 if (asoc->alternate) {
7924 chk->whoTo = asoc->alternate;
7926 chk->whoTo = asoc->primary_destination;
7928 atomic_add_int(&chk->whoTo->ref_count, 1);
7931 old_start_at = NULL;
7932 again_one_more_time:
7933 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7934 /* how much can we send? */
7935 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7936 if (old_start_at && (old_start_at == net)) {
7937 /* through list ocmpletely. */
7941 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7942 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7943 (net->flight_size >= net->cwnd)) {
7945 * Nothing on control or asconf and flight is full,
7946 * we can skip even in the CMT case.
7951 endoutchain = outchain = NULL;
7954 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7955 skip_data_for_this_net = 1;
7957 skip_data_for_this_net = 0;
7959 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7961 * if we have a route and an ifp check to see if we
7962 * have room to send to this guy
7966 ifp = net->ro.ro_rt->rt_ifp;
7967 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7968 SCTP_STAT_INCR(sctps_ifnomemqueued);
7969 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7970 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7975 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7978 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7983 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7993 if (mtu > asoc->peers_rwnd) {
7994 if (asoc->total_flight > 0) {
7995 /* We have a packet in flight somewhere */
7996 r_mtu = asoc->peers_rwnd;
7998 /* We are always allowed to send one MTU out */
8005 /************************/
8006 /* ASCONF transmission */
8007 /************************/
8008 /* Now first lets go through the asconf queue */
8009 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8010 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8013 if (chk->whoTo == NULL) {
8014 if (asoc->alternate == NULL) {
8015 if (asoc->primary_destination != net) {
8019 if (asoc->alternate != net) {
8024 if (chk->whoTo != net) {
8028 if (chk->data == NULL) {
8031 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8032 chk->sent != SCTP_DATAGRAM_RESEND) {
8036 * if no AUTH is yet included and this chunk
8037 * requires it, make sure to account for it. We
8038 * don't apply the size until the AUTH chunk is
8039 * actually added below in case there is no room for
8040 * this chunk. NOTE: we overload the use of "omtu"
8043 if ((auth == NULL) &&
8044 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8045 stcb->asoc.peer_auth_chunks)) {
8046 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8049 /* Here we do NOT factor the r_mtu */
8050 if ((chk->send_size < (int)(mtu - omtu)) ||
8051 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8053 * We probably should glom the mbuf chain
8054 * from the chk->data for control but the
8055 * problem is it becomes yet one more level
8056 * of tracking to do if for some reason
8057 * output fails. Then I have got to
8058 * reconstruct the merged control chain.. el
8059 * yucko.. for now we take the easy way and
8063 * Add an AUTH chunk, if chunk requires it
8064 * save the offset into the chain for AUTH
8066 if ((auth == NULL) &&
8067 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8068 stcb->asoc.peer_auth_chunks))) {
8069 outchain = sctp_add_auth_chunk(outchain,
8074 chk->rec.chunk_id.id);
8075 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8077 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8078 (int)chk->rec.chunk_id.can_take_data,
8079 chk->send_size, chk->copy_by_ref);
8080 if (outchain == NULL) {
8082 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8085 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8086 /* update our MTU size */
8087 if (mtu > (chk->send_size + omtu))
8088 mtu -= (chk->send_size + omtu);
8091 to_out += (chk->send_size + omtu);
8092 /* Do clear IP_DF ? */
8093 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8096 if (chk->rec.chunk_id.can_take_data)
8099 * set hb flag since we can use these for
8105 * should sysctl this: don't bundle data
8106 * with ASCONF since it requires AUTH
8109 chk->sent = SCTP_DATAGRAM_SENT;
8110 if (chk->whoTo == NULL) {
8112 atomic_add_int(&net->ref_count, 1);
8117 * Ok we are out of room but we can
8118 * output without effecting the
8119 * flight size since this little guy
8120 * is a control only packet.
8122 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8124 * do NOT clear the asconf flag as
8125 * it is used to do appropriate
8126 * source address selection.
8128 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8129 (struct sockaddr *)&net->ro._l_addr,
8130 outchain, auth_offset, auth,
8131 stcb->asoc.authinfo.active_keyid,
8132 no_fragmentflg, 0, asconf,
8133 inp->sctp_lport, stcb->rport,
8134 htonl(stcb->asoc.peer_vtag),
8135 net->port, so_locked, NULL, NULL))) {
8136 if (error == ENOBUFS) {
8137 asoc->ifp_had_enobuf = 1;
8138 SCTP_STAT_INCR(sctps_lowlevelerr);
8140 if (from_where == 0) {
8141 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8143 if (*now_filled == 0) {
8144 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8146 *now = net->last_sent_time;
8148 net->last_sent_time = *now;
8151 /* error, could not output */
8152 if (error == EHOSTUNREACH) {
8158 sctp_move_chunks_from_net(stcb, net);
8163 asoc->ifp_had_enobuf = 0;
8164 if (*now_filled == 0) {
8165 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8167 *now = net->last_sent_time;
8169 net->last_sent_time = *now;
8173 * increase the number we sent, if a
8174 * cookie is sent we don't tell them
8177 outchain = endoutchain = NULL;
8181 *num_out += ctl_cnt;
8182 /* recalc a clean slate and setup */
8183 switch (net->ro._l_addr.sa.sa_family) {
8186 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8191 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8204 /************************/
8205 /* Control transmission */
8206 /************************/
8207 /* Now first lets go through the control queue */
8208 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8209 if ((sack_goes_to) &&
8210 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8211 (chk->whoTo != sack_goes_to)) {
8213 * if we have a sack in queue, and we are
8214 * looking at an ecn echo that is NOT queued
8215 * to where the sack is going..
8217 if (chk->whoTo == net) {
8219 * Don't transmit it to where its
8220 * going (current net)
8223 } else if (sack_goes_to == net) {
8225 * But do transmit it to this
8228 goto skip_net_check;
8231 if (chk->whoTo == NULL) {
8232 if (asoc->alternate == NULL) {
8233 if (asoc->primary_destination != net) {
8237 if (asoc->alternate != net) {
8242 if (chk->whoTo != net) {
8247 if (chk->data == NULL) {
8250 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8252 * It must be unsent. Cookies and ASCONF's
8253 * hang around but there timers will force
8254 * when marked for resend.
8259 * if no AUTH is yet included and this chunk
8260 * requires it, make sure to account for it. We
8261 * don't apply the size until the AUTH chunk is
8262 * actually added below in case there is no room for
8263 * this chunk. NOTE: we overload the use of "omtu"
8266 if ((auth == NULL) &&
8267 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8268 stcb->asoc.peer_auth_chunks)) {
8269 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8272 /* Here we do NOT factor the r_mtu */
8273 if ((chk->send_size <= (int)(mtu - omtu)) ||
8274 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8276 * We probably should glom the mbuf chain
8277 * from the chk->data for control but the
8278 * problem is it becomes yet one more level
8279 * of tracking to do if for some reason
8280 * output fails. Then I have got to
8281 * reconstruct the merged control chain.. el
8282 * yucko.. for now we take the easy way and
8286 * Add an AUTH chunk, if chunk requires it
8287 * save the offset into the chain for AUTH
8289 if ((auth == NULL) &&
8290 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8291 stcb->asoc.peer_auth_chunks))) {
8292 outchain = sctp_add_auth_chunk(outchain,
8297 chk->rec.chunk_id.id);
8298 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8300 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8301 (int)chk->rec.chunk_id.can_take_data,
8302 chk->send_size, chk->copy_by_ref);
8303 if (outchain == NULL) {
8305 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8308 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8309 /* update our MTU size */
8310 if (mtu > (chk->send_size + omtu))
8311 mtu -= (chk->send_size + omtu);
8314 to_out += (chk->send_size + omtu);
8315 /* Do clear IP_DF ? */
8316 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8319 if (chk->rec.chunk_id.can_take_data)
8321 /* Mark things to be removed, if needed */
8322 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8323 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8324 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8325 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8326 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8327 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8328 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8329 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8330 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8331 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8332 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8333 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8336 /* remove these chunks at the end */
8337 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8338 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8339 /* turn off the timer */
8340 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8341 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8342 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8348 * Other chunks, since they have
8349 * timers running (i.e. COOKIE) we
8350 * just "trust" that it gets sent or
8354 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8357 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8359 * Increment ecne send count
8360 * here this means we may be
8361 * over-zealous in our
8362 * counting if the send
8363 * fails, but its the best
8364 * place to do it (we used
8365 * to do it in the queue of
8366 * the chunk, but that did
8367 * not tell how many times
8370 SCTP_STAT_INCR(sctps_sendecne);
8372 chk->sent = SCTP_DATAGRAM_SENT;
8373 if (chk->whoTo == NULL) {
8375 atomic_add_int(&net->ref_count, 1);
8381 * Ok we are out of room but we can
8382 * output without effecting the
8383 * flight size since this little guy
8384 * is a control only packet.
8387 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8389 * do NOT clear the asconf
8390 * flag as it is used to do
8391 * appropriate source
8392 * address selection.
8396 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8399 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8400 (struct sockaddr *)&net->ro._l_addr,
8403 stcb->asoc.authinfo.active_keyid,
8404 no_fragmentflg, 0, asconf,
8405 inp->sctp_lport, stcb->rport,
8406 htonl(stcb->asoc.peer_vtag),
8407 net->port, so_locked, NULL, NULL))) {
8408 if (error == ENOBUFS) {
8409 asoc->ifp_had_enobuf = 1;
8410 SCTP_STAT_INCR(sctps_lowlevelerr);
8412 if (from_where == 0) {
8413 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8415 /* error, could not output */
8417 if (*now_filled == 0) {
8418 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8420 *now = net->last_sent_time;
8422 net->last_sent_time = *now;
8426 if (error == EHOSTUNREACH) {
8432 sctp_move_chunks_from_net(stcb, net);
8437 asoc->ifp_had_enobuf = 0;
8438 /* Only HB or ASCONF advances time */
8440 if (*now_filled == 0) {
8441 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8443 *now = net->last_sent_time;
8445 net->last_sent_time = *now;
8450 * increase the number we sent, if a
8451 * cookie is sent we don't tell them
8454 outchain = endoutchain = NULL;
8458 *num_out += ctl_cnt;
8459 /* recalc a clean slate and setup */
8460 switch (net->ro._l_addr.sa.sa_family) {
8463 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8468 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8481 /* JRI: if dest is in PF state, do not send data to it */
8482 if ((asoc->sctp_cmt_on_off > 0) &&
8483 (net != stcb->asoc.alternate) &&
8484 (net->dest_state & SCTP_ADDR_PF)) {
8487 if (net->flight_size >= net->cwnd) {
8490 if ((asoc->sctp_cmt_on_off > 0) &&
8491 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8492 (net->flight_size > max_rwnd_per_dest)) {
8496 * We need a specific accounting for the usage of the send
8497 * buffer. We also need to check the number of messages per
8498 * net. For now, this is better than nothing and it disabled
8501 if ((asoc->sctp_cmt_on_off > 0) &&
8502 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8503 (max_send_per_dest > 0) &&
8504 (net->flight_size > max_send_per_dest)) {
8507 /*********************/
8508 /* Data transmission */
8509 /*********************/
8511 * if AUTH for DATA is required and no AUTH has been added
8512 * yet, account for this in the mtu now... if no data can be
8513 * bundled, this adjustment won't matter anyways since the
8514 * packet will be going out...
8516 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8517 stcb->asoc.peer_auth_chunks);
8518 if (data_auth_reqd && (auth == NULL)) {
8519 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8521 /* now lets add any data within the MTU constraints */
8522 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8524 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8525 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8531 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8532 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8542 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8543 (skip_data_for_this_net == 0)) ||
8545 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8546 if (no_data_chunks) {
8547 /* let only control go out */
8551 if (net->flight_size >= net->cwnd) {
8552 /* skip this net, no room for data */
8556 if ((chk->whoTo != NULL) &&
8557 (chk->whoTo != net)) {
8558 /* Don't send the chunk on this net */
8561 if (asoc->sctp_cmt_on_off == 0) {
8562 if ((asoc->alternate) &&
8563 (asoc->alternate != net) &&
8564 (chk->whoTo == NULL)) {
8566 } else if ((net != asoc->primary_destination) &&
8567 (asoc->alternate == NULL) &&
8568 (chk->whoTo == NULL)) {
8572 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8574 * strange, we have a chunk that is
8575 * to big for its destination and
8576 * yet no fragment ok flag.
8577 * Something went wrong when the
8578 * PMTU changed...we did not mark
8579 * this chunk for some reason?? I
8580 * will fix it here by letting IP
8581 * fragment it for now and printing
8582 * a warning. This really should not
8585 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8586 chk->send_size, mtu);
8587 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8589 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8590 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8591 struct sctp_data_chunk *dchkh;
8593 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8594 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8596 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8597 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8598 /* ok we will add this one */
8601 * Add an AUTH chunk, if chunk
8602 * requires it, save the offset into
8603 * the chain for AUTH
8605 if (data_auth_reqd) {
8607 outchain = sctp_add_auth_chunk(outchain,
8613 auth_keyid = chk->auth_keyid;
8615 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8616 } else if (override_ok) {
8621 auth_keyid = chk->auth_keyid;
8623 } else if (auth_keyid != chk->auth_keyid) {
8631 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8632 chk->send_size, chk->copy_by_ref);
8633 if (outchain == NULL) {
8634 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8635 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8636 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8639 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8642 /* upate our MTU size */
8643 /* Do clear IP_DF ? */
8644 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8647 /* unsigned subtraction of mtu */
8648 if (mtu > chk->send_size)
8649 mtu -= chk->send_size;
8652 /* unsigned subtraction of r_mtu */
8653 if (r_mtu > chk->send_size)
8654 r_mtu -= chk->send_size;
8658 to_out += chk->send_size;
8659 if ((to_out > mx_mtu) && no_fragmentflg) {
8661 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8663 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8667 chk->window_probe = 0;
8668 data_list[bundle_at++] = chk;
8669 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8672 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8673 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8674 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8676 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8678 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8679 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8689 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8691 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8692 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8693 data_list[0]->window_probe = 1;
8694 net->window_probe = 1;
8700 * Must be sent in order of the
8701 * TSN's (on a network)
8705 } /* for (chunk gather loop for this net) */
8706 } /* if asoc.state OPEN */
8708 /* Is there something to send for this destination? */
8710 /* We may need to start a control timer or two */
8712 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8715 * do NOT clear the asconf flag as it is
8716 * used to do appropriate source address
8721 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8724 /* must start a send timer if data is being sent */
8725 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8727 * no timer running on this destination
8730 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8732 /* Now send it, if there is anything to send :> */
8733 if ((error = sctp_lowlevel_chunk_output(inp,
8736 (struct sockaddr *)&net->ro._l_addr,
8744 inp->sctp_lport, stcb->rport,
8745 htonl(stcb->asoc.peer_vtag),
8746 net->port, so_locked, NULL, NULL))) {
8747 /* error, we could not output */
8748 if (error == ENOBUFS) {
8749 SCTP_STAT_INCR(sctps_lowlevelerr);
8750 asoc->ifp_had_enobuf = 1;
8752 if (from_where == 0) {
8753 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8755 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8757 if (*now_filled == 0) {
8758 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8760 *now = net->last_sent_time;
8762 net->last_sent_time = *now;
8766 if (error == EHOSTUNREACH) {
8768 * Destination went unreachable
8771 sctp_move_chunks_from_net(stcb, net);
8775 * I add this line to be paranoid. As far as
8776 * I can tell the continue, takes us back to
8777 * the top of the for, but just to make sure
8778 * I will reset these again here.
8780 ctl_cnt = bundle_at = 0;
8781 continue; /* This takes us back to the
8782 * for() for the nets. */
8784 asoc->ifp_had_enobuf = 0;
8789 if (bundle_at || hbflag) {
8790 /* For data/asconf and hb set time */
8791 if (*now_filled == 0) {
8792 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8794 *now = net->last_sent_time;
8796 net->last_sent_time = *now;
8800 *num_out += (ctl_cnt + bundle_at);
8803 /* setup for a RTO measurement */
8804 tsns_sent = data_list[0]->rec.data.TSN_seq;
8805 /* fill time if not already filled */
8806 if (*now_filled == 0) {
8807 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8809 *now = asoc->time_last_sent;
8811 asoc->time_last_sent = *now;
8813 if (net->rto_needed) {
8814 data_list[0]->do_rtt = 1;
8815 net->rto_needed = 0;
8817 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8818 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8825 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8828 if (old_start_at == NULL) {
8829 old_start_at = start_at;
8830 start_at = TAILQ_FIRST(&asoc->nets);
8832 goto again_one_more_time;
8835 * At the end there should be no NON timed chunks hanging on this
8838 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8839 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8841 if ((*num_out == 0) && (*reason_code == 0)) {
8846 sctp_clean_up_ctl(stcb, asoc, so_locked);
8851 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8854 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8855 * the control chunk queue.
8857 struct sctp_chunkhdr *hdr;
8858 struct sctp_tmit_chunk *chk;
8861 SCTP_TCB_LOCK_ASSERT(stcb);
8862 sctp_alloc_a_chunk(stcb, chk);
8865 sctp_m_freem(op_err);
8868 chk->copy_by_ref = 0;
8869 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8870 if (op_err == NULL) {
8871 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8876 while (mat != NULL) {
8877 chk->send_size += SCTP_BUF_LEN(mat);
8878 mat = SCTP_BUF_NEXT(mat);
8880 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8881 chk->rec.chunk_id.can_take_data = 1;
8882 chk->sent = SCTP_DATAGRAM_UNSENT;
8885 chk->asoc = &stcb->asoc;
8888 hdr = mtod(op_err, struct sctp_chunkhdr *);
8889 hdr->chunk_type = SCTP_OPERATION_ERROR;
8890 hdr->chunk_flags = 0;
8891 hdr->chunk_length = htons(chk->send_size);
8892 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8895 chk->asoc->ctrl_queue_cnt++;
8899 sctp_send_cookie_echo(struct mbuf *m,
8901 struct sctp_tcb *stcb,
8902 struct sctp_nets *net)
8905 * pull out the cookie and put it at the front of the control chunk
8909 struct mbuf *cookie;
8910 struct sctp_paramhdr parm, *phdr;
8911 struct sctp_chunkhdr *hdr;
8912 struct sctp_tmit_chunk *chk;
8913 uint16_t ptype, plen;
8915 /* First find the cookie in the param area */
8917 at = offset + sizeof(struct sctp_init_chunk);
8919 SCTP_TCB_LOCK_ASSERT(stcb);
8921 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8925 ptype = ntohs(phdr->param_type);
8926 plen = ntohs(phdr->param_length);
8927 if (ptype == SCTP_STATE_COOKIE) {
8930 /* found the cookie */
8931 if ((pad = (plen % 4))) {
8934 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8935 if (cookie == NULL) {
8939 #ifdef SCTP_MBUF_LOGGING
8940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8943 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
8944 if (SCTP_BUF_IS_EXTENDED(mat)) {
8945 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8952 at += SCTP_SIZE32(plen);
8954 if (cookie == NULL) {
8955 /* Did not find the cookie */
8958 /* ok, we got the cookie lets change it into a cookie echo chunk */
8960 /* first the change from param to cookie */
8961 hdr = mtod(cookie, struct sctp_chunkhdr *);
8962 hdr->chunk_type = SCTP_COOKIE_ECHO;
8963 hdr->chunk_flags = 0;
8964 /* get the chunk stuff now and place it in the FRONT of the queue */
8965 sctp_alloc_a_chunk(stcb, chk);
8968 sctp_m_freem(cookie);
8971 chk->copy_by_ref = 0;
8972 chk->send_size = plen;
8973 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8974 chk->rec.chunk_id.can_take_data = 0;
8975 chk->sent = SCTP_DATAGRAM_UNSENT;
8977 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8978 chk->asoc = &stcb->asoc;
8981 atomic_add_int(&chk->whoTo->ref_count, 1);
8982 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8983 chk->asoc->ctrl_queue_cnt++;
8988 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8992 struct sctp_nets *net)
8995 * take a HB request and make it into a HB ack and send it.
8997 struct mbuf *outchain;
8998 struct sctp_chunkhdr *chdr;
8999 struct sctp_tmit_chunk *chk;
9003 /* must have a net pointer */
9006 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
9007 if (outchain == NULL) {
9008 /* gak out of memory */
9011 #ifdef SCTP_MBUF_LOGGING
9012 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9015 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
9016 if (SCTP_BUF_IS_EXTENDED(mat)) {
9017 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9022 chdr = mtod(outchain, struct sctp_chunkhdr *);
9023 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9024 chdr->chunk_flags = 0;
9025 if (chk_length % 4) {
9027 uint32_t cpthis = 0;
9030 padlen = 4 - (chk_length % 4);
9031 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9033 sctp_alloc_a_chunk(stcb, chk);
9036 sctp_m_freem(outchain);
9039 chk->copy_by_ref = 0;
9040 chk->send_size = chk_length;
9041 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9042 chk->rec.chunk_id.can_take_data = 1;
9043 chk->sent = SCTP_DATAGRAM_UNSENT;
9046 chk->asoc = &stcb->asoc;
9047 chk->data = outchain;
9049 atomic_add_int(&chk->whoTo->ref_count, 1);
9050 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9051 chk->asoc->ctrl_queue_cnt++;
9055 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9057 /* formulate and queue a cookie-ack back to sender */
9058 struct mbuf *cookie_ack;
9059 struct sctp_chunkhdr *hdr;
9060 struct sctp_tmit_chunk *chk;
9063 SCTP_TCB_LOCK_ASSERT(stcb);
9065 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
9066 if (cookie_ack == NULL) {
9070 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9071 sctp_alloc_a_chunk(stcb, chk);
9074 sctp_m_freem(cookie_ack);
9077 chk->copy_by_ref = 0;
9078 chk->send_size = sizeof(struct sctp_chunkhdr);
9079 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9080 chk->rec.chunk_id.can_take_data = 1;
9081 chk->sent = SCTP_DATAGRAM_UNSENT;
9084 chk->asoc = &stcb->asoc;
9085 chk->data = cookie_ack;
9086 if (chk->asoc->last_control_chunk_from != NULL) {
9087 chk->whoTo = chk->asoc->last_control_chunk_from;
9088 atomic_add_int(&chk->whoTo->ref_count, 1);
9092 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9093 hdr->chunk_type = SCTP_COOKIE_ACK;
9094 hdr->chunk_flags = 0;
9095 hdr->chunk_length = htons(chk->send_size);
9096 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9097 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9098 chk->asoc->ctrl_queue_cnt++;
9104 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9106 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9107 struct mbuf *m_shutdown_ack;
9108 struct sctp_shutdown_ack_chunk *ack_cp;
9109 struct sctp_tmit_chunk *chk;
9111 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9112 if (m_shutdown_ack == NULL) {
9116 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9117 sctp_alloc_a_chunk(stcb, chk);
9120 sctp_m_freem(m_shutdown_ack);
9123 chk->copy_by_ref = 0;
9124 chk->send_size = sizeof(struct sctp_chunkhdr);
9125 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9126 chk->rec.chunk_id.can_take_data = 1;
9127 chk->sent = SCTP_DATAGRAM_UNSENT;
9130 chk->asoc = &stcb->asoc;
9131 chk->data = m_shutdown_ack;
9134 atomic_add_int(&chk->whoTo->ref_count, 1);
9136 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9137 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9138 ack_cp->ch.chunk_flags = 0;
9139 ack_cp->ch.chunk_length = htons(chk->send_size);
9140 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9141 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9142 chk->asoc->ctrl_queue_cnt++;
9147 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9149 /* formulate and queue a SHUTDOWN to the sender */
9150 struct mbuf *m_shutdown;
9151 struct sctp_shutdown_chunk *shutdown_cp;
9152 struct sctp_tmit_chunk *chk;
9154 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9155 if (m_shutdown == NULL) {
9159 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9160 sctp_alloc_a_chunk(stcb, chk);
9163 sctp_m_freem(m_shutdown);
9166 chk->copy_by_ref = 0;
9167 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9168 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9169 chk->rec.chunk_id.can_take_data = 1;
9170 chk->sent = SCTP_DATAGRAM_UNSENT;
9173 chk->asoc = &stcb->asoc;
9174 chk->data = m_shutdown;
9177 atomic_add_int(&chk->whoTo->ref_count, 1);
9179 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9180 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9181 shutdown_cp->ch.chunk_flags = 0;
9182 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9183 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9184 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9185 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9186 chk->asoc->ctrl_queue_cnt++;
9191 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9194 * formulate and queue an ASCONF to the peer. ASCONF parameters
9195 * should be queued on the assoc queue.
9197 struct sctp_tmit_chunk *chk;
9198 struct mbuf *m_asconf;
9201 SCTP_TCB_LOCK_ASSERT(stcb);
9203 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9204 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9205 /* can't send a new one if there is one in flight already */
9208 /* compose an ASCONF chunk, maximum length is PMTU */
9209 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9210 if (m_asconf == NULL) {
9213 sctp_alloc_a_chunk(stcb, chk);
9216 sctp_m_freem(m_asconf);
9219 chk->copy_by_ref = 0;
9220 chk->data = m_asconf;
9221 chk->send_size = len;
9222 chk->rec.chunk_id.id = SCTP_ASCONF;
9223 chk->rec.chunk_id.can_take_data = 0;
9224 chk->sent = SCTP_DATAGRAM_UNSENT;
9226 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9227 chk->asoc = &stcb->asoc;
9230 atomic_add_int(&chk->whoTo->ref_count, 1);
9232 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9233 chk->asoc->ctrl_queue_cnt++;
9238 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9241 * formulate and queue a asconf-ack back to sender. the asconf-ack
9242 * must be stored in the tcb.
9244 struct sctp_tmit_chunk *chk;
9245 struct sctp_asconf_ack *ack, *latest_ack;
9247 struct sctp_nets *net = NULL;
9249 SCTP_TCB_LOCK_ASSERT(stcb);
9250 /* Get the latest ASCONF-ACK */
9251 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9252 if (latest_ack == NULL) {
9255 if (latest_ack->last_sent_to != NULL &&
9256 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9257 /* we're doing a retransmission */
9258 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9261 if (stcb->asoc.last_control_chunk_from == NULL) {
9262 if (stcb->asoc.alternate) {
9263 net = stcb->asoc.alternate;
9265 net = stcb->asoc.primary_destination;
9268 net = stcb->asoc.last_control_chunk_from;
9273 if (stcb->asoc.last_control_chunk_from == NULL) {
9274 if (stcb->asoc.alternate) {
9275 net = stcb->asoc.alternate;
9277 net = stcb->asoc.primary_destination;
9280 net = stcb->asoc.last_control_chunk_from;
9283 latest_ack->last_sent_to = net;
9285 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9286 if (ack->data == NULL) {
9289 /* copy the asconf_ack */
9290 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
9291 if (m_ack == NULL) {
9292 /* couldn't copy it */
9295 #ifdef SCTP_MBUF_LOGGING
9296 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9299 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9300 if (SCTP_BUF_IS_EXTENDED(mat)) {
9301 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9307 sctp_alloc_a_chunk(stcb, chk);
9311 sctp_m_freem(m_ack);
9314 chk->copy_by_ref = 0;
9318 atomic_add_int(&chk->whoTo->ref_count, 1);
9323 chk->send_size = ack->len;
9324 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9325 chk->rec.chunk_id.can_take_data = 1;
9326 chk->sent = SCTP_DATAGRAM_UNSENT;
9328 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9329 chk->asoc = &stcb->asoc;
9331 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9332 chk->asoc->ctrl_queue_cnt++;
9339 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9340 struct sctp_tcb *stcb,
9341 struct sctp_association *asoc,
9342 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9343 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9349 * send out one MTU of retransmission. If fast_retransmit is
9350 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9351 * rwnd. For a Cookie or Asconf in the control chunk queue we
9352 * retransmit them by themselves.
9354 * For data chunks we will pick out the lowest TSN's in the sent_queue
9355 * marked for resend and bundle them all together (up to a MTU of
9356 * destination). The address to send to should have been
9357 * selected/changed where the retransmission was marked (i.e. in FR
9358 * or t3-timeout routines).
9360 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9361 struct sctp_tmit_chunk *chk, *fwd;
9362 struct mbuf *m, *endofchain;
9363 struct sctp_nets *net = NULL;
9364 uint32_t tsns_sent = 0;
9365 int no_fragmentflg, bundle_at, cnt_thru;
9367 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9368 struct sctp_auth_chunk *auth = NULL;
9369 uint32_t auth_offset = 0;
9370 uint16_t auth_keyid;
9371 int override_ok = 1;
9372 int data_auth_reqd = 0;
9375 SCTP_TCB_LOCK_ASSERT(stcb);
9376 tmr_started = ctl_cnt = bundle_at = error = 0;
9381 endofchain = m = NULL;
9382 auth_keyid = stcb->asoc.authinfo.active_keyid;
9383 #ifdef SCTP_AUDITING_ENABLED
9384 sctp_audit_log(0xC3, 1);
9386 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9387 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9388 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9389 asoc->sent_queue_retran_cnt);
9390 asoc->sent_queue_cnt = 0;
9391 asoc->sent_queue_cnt_removeable = 0;
9392 /* send back 0/0 so we enter normal transmission */
9396 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9397 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9398 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9399 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9400 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9403 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9404 if (chk != asoc->str_reset) {
9406 * not eligible for retran if its
9413 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9417 * Add an AUTH chunk, if chunk requires it save the
9418 * offset into the chain for AUTH
9420 if ((auth == NULL) &&
9421 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9422 stcb->asoc.peer_auth_chunks))) {
9423 m = sctp_add_auth_chunk(m, &endofchain,
9424 &auth, &auth_offset,
9426 chk->rec.chunk_id.id);
9427 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9429 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9435 /* do we have control chunks to retransmit? */
9437 /* Start a timer no matter if we suceed or fail */
9438 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9439 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9440 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9441 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9442 chk->snd_count++; /* update our count */
9443 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9444 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9445 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9446 no_fragmentflg, 0, 0,
9447 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9448 chk->whoTo->port, so_locked, NULL, NULL))) {
9449 SCTP_STAT_INCR(sctps_lowlevelerr);
9456 * We don't want to mark the net->sent time here since this
9457 * we use this for HB and retrans cannot measure RTT
9459 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9461 chk->sent = SCTP_DATAGRAM_SENT;
9462 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9466 /* Clean up the fwd-tsn list */
9467 sctp_clean_up_ctl(stcb, asoc, so_locked);
9472 * Ok, it is just data retransmission we need to do or that and a
9473 * fwd-tsn with it all.
9475 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9476 return (SCTP_RETRAN_DONE);
9478 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9479 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9480 /* not yet open, resend the cookie and that is it */
9483 #ifdef SCTP_AUDITING_ENABLED
9484 sctp_auditing(20, inp, stcb, NULL);
9486 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9487 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9488 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9489 /* No, not sent to this net or not ready for rtx */
9492 if (chk->data == NULL) {
9493 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9494 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9497 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9498 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9499 /* Gak, we have exceeded max unlucky retran, abort! */
9500 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9502 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9503 atomic_add_int(&stcb->asoc.refcnt, 1);
9504 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9505 SCTP_TCB_LOCK(stcb);
9506 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9507 return (SCTP_RETRAN_EXIT);
9509 /* pick up the net */
9511 switch (net->ro._l_addr.sa.sa_family) {
9514 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9519 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9528 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9529 /* No room in peers rwnd */
9532 tsn = asoc->last_acked_seq + 1;
9533 if (tsn == chk->rec.data.TSN_seq) {
9535 * we make a special exception for this
9536 * case. The peer has no rwnd but is missing
9537 * the lowest chunk.. which is probably what
9538 * is holding up the rwnd.
9540 goto one_chunk_around;
9545 if (asoc->peers_rwnd < mtu) {
9547 if ((asoc->peers_rwnd == 0) &&
9548 (asoc->total_flight == 0)) {
9549 chk->window_probe = 1;
9550 chk->whoTo->window_probe = 1;
9553 #ifdef SCTP_AUDITING_ENABLED
9554 sctp_audit_log(0xC3, 2);
9558 net->fast_retran_ip = 0;
9559 if (chk->rec.data.doing_fast_retransmit == 0) {
9561 * if no FR in progress skip destination that have
9562 * flight_size > cwnd.
9564 if (net->flight_size >= net->cwnd) {
9569 * Mark the destination net to have FR recovery
9573 net->fast_retran_ip = 1;
9577 * if no AUTH is yet included and this chunk requires it,
9578 * make sure to account for it. We don't apply the size
9579 * until the AUTH chunk is actually added below in case
9580 * there is no room for this chunk.
9582 if (data_auth_reqd && (auth == NULL)) {
9583 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9587 if ((chk->send_size <= (mtu - dmtu)) ||
9588 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9589 /* ok we will add this one */
9590 if (data_auth_reqd) {
9592 m = sctp_add_auth_chunk(m,
9598 auth_keyid = chk->auth_keyid;
9600 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9601 } else if (override_ok) {
9602 auth_keyid = chk->auth_keyid;
9604 } else if (chk->auth_keyid != auth_keyid) {
9605 /* different keyid, so done bundling */
9609 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9611 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9614 /* Do clear IP_DF ? */
9615 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9618 /* upate our MTU size */
9619 if (mtu > (chk->send_size + dmtu))
9620 mtu -= (chk->send_size + dmtu);
9623 data_list[bundle_at++] = chk;
9624 if (one_chunk && (asoc->total_flight <= 0)) {
9625 SCTP_STAT_INCR(sctps_windowprobed);
9628 if (one_chunk == 0) {
9630 * now are there anymore forward from chk to pick
9633 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9634 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9635 /* Nope, not for retran */
9638 if (fwd->whoTo != net) {
9639 /* Nope, not the net in question */
9642 if (data_auth_reqd && (auth == NULL)) {
9643 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9646 if (fwd->send_size <= (mtu - dmtu)) {
9647 if (data_auth_reqd) {
9649 m = sctp_add_auth_chunk(m,
9655 auth_keyid = fwd->auth_keyid;
9657 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9658 } else if (override_ok) {
9659 auth_keyid = fwd->auth_keyid;
9661 } else if (fwd->auth_keyid != auth_keyid) {
9669 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9671 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9674 /* Do clear IP_DF ? */
9675 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9678 /* upate our MTU size */
9679 if (mtu > (fwd->send_size + dmtu))
9680 mtu -= (fwd->send_size + dmtu);
9683 data_list[bundle_at++] = fwd;
9684 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9688 /* can't fit so we are done */
9693 /* Is there something to send for this destination? */
9696 * No matter if we fail/or suceed we should start a
9697 * timer. A failure is like a lost IP packet :-)
9699 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9701 * no timer running on this destination
9704 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9707 /* Now lets send it, if there is anything to send :> */
9708 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9709 (struct sockaddr *)&net->ro._l_addr, m,
9710 auth_offset, auth, auth_keyid,
9711 no_fragmentflg, 0, 0,
9712 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9713 net->port, so_locked, NULL, NULL))) {
9714 /* error, we could not output */
9715 SCTP_STAT_INCR(sctps_lowlevelerr);
9723 * We don't want to mark the net->sent time here
9724 * since this we use this for HB and retrans cannot
9727 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9729 /* For auto-close */
9731 if (*now_filled == 0) {
9732 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9733 *now = asoc->time_last_sent;
9736 asoc->time_last_sent = *now;
9738 *cnt_out += bundle_at;
9739 #ifdef SCTP_AUDITING_ENABLED
9740 sctp_audit_log(0xC4, bundle_at);
9743 tsns_sent = data_list[0]->rec.data.TSN_seq;
9745 for (i = 0; i < bundle_at; i++) {
9746 SCTP_STAT_INCR(sctps_sendretransdata);
9747 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9749 * When we have a revoked data, and we
9750 * retransmit it, then we clear the revoked
9751 * flag since this flag dictates if we
9752 * subtracted from the fs
9754 if (data_list[i]->rec.data.chunk_was_revoked) {
9755 /* Deflate the cwnd */
9756 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9757 data_list[i]->rec.data.chunk_was_revoked = 0;
9759 data_list[i]->snd_count++;
9760 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9761 /* record the time */
9762 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9763 if (data_list[i]->book_size_scale) {
9765 * need to double the book size on
9768 data_list[i]->book_size_scale = 0;
9770 * Since we double the booksize, we
9771 * must also double the output queue
9772 * size, since this get shrunk when
9773 * we free by this amount.
9775 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9776 data_list[i]->book_size *= 2;
9780 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9781 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9782 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9784 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9785 (uint32_t) (data_list[i]->send_size +
9786 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9788 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9789 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9790 data_list[i]->whoTo->flight_size,
9791 data_list[i]->book_size,
9792 (uintptr_t) data_list[i]->whoTo,
9793 data_list[i]->rec.data.TSN_seq);
9795 sctp_flight_size_increase(data_list[i]);
9796 sctp_total_flight_increase(stcb, data_list[i]);
9797 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9798 /* SWS sender side engages */
9799 asoc->peers_rwnd = 0;
9802 (data_list[i]->rec.data.doing_fast_retransmit)) {
9803 SCTP_STAT_INCR(sctps_sendfastretrans);
9804 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9805 (tmr_started == 0)) {
9807 * ok we just fast-retrans'd
9808 * the lowest TSN, i.e the
9809 * first on the list. In
9810 * this case we want to give
9811 * some more time to get a
9812 * SACK back without a
9815 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9816 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9817 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9821 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9822 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9824 #ifdef SCTP_AUDITING_ENABLED
9825 sctp_auditing(21, inp, stcb, NULL);
9831 if (asoc->sent_queue_retran_cnt <= 0) {
9832 /* all done we have no more to retran */
9833 asoc->sent_queue_retran_cnt = 0;
9837 /* No more room in rwnd */
9840 /* stop the for loop here. we sent out a packet */
9847 sctp_timer_validation(struct sctp_inpcb *inp,
9848 struct sctp_tcb *stcb,
9849 struct sctp_association *asoc)
9851 struct sctp_nets *net;
9853 /* Validate that a timer is running somewhere */
9854 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9855 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9856 /* Here is a timer */
9860 SCTP_TCB_LOCK_ASSERT(stcb);
9861 /* Gak, we did not have a timer somewhere */
9862 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9863 if (asoc->alternate) {
9864 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9866 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9872 sctp_chunk_output(struct sctp_inpcb *inp,
9873 struct sctp_tcb *stcb,
9876 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9882 * Ok this is the generic chunk service queue. we must do the
9884 * - See if there are retransmits pending, if so we must
9886 * - Service the stream queue that is next, moving any
9887 * message (note I must get a complete message i.e.
9888 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9890 * - Check to see if the cwnd/rwnd allows any output, if so we
9891 * go ahead and fomulate and send the low level chunks. Making sure
9892 * to combine any control in the control chunk queue also.
9894 struct sctp_association *asoc;
9895 struct sctp_nets *net;
9896 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9897 unsigned int burst_cnt = 0;
9901 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9904 unsigned int tot_frs = 0;
9907 /* The Nagle algorithm is only applied when handling a send call. */
9908 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9909 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9917 SCTP_TCB_LOCK_ASSERT(stcb);
9919 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9921 if ((un_sent <= 0) &&
9922 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9923 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9924 (asoc->sent_queue_retran_cnt == 0)) {
9925 /* Nothing to do unless there is something to be sent left */
9929 * Do we have something to send, data or control AND a sack timer
9930 * running, if so piggy-back the sack.
9932 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9933 sctp_send_sack(stcb, so_locked);
9934 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9936 while (asoc->sent_queue_retran_cnt) {
9938 * Ok, it is retransmission time only, we send out only ONE
9939 * packet with a single call off to the retran code.
9941 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9943 * Special hook for handling cookiess discarded
9944 * by peer that carried data. Send cookie-ack only
9945 * and then the next call with get the retran's.
9947 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9949 &now, &now_filled, frag_point, so_locked);
9951 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9952 /* if its not from a HB then do it */
9954 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9960 * its from any other place, we don't allow retran
9961 * output (only control)
9966 /* Can't send anymore */
9968 * now lets push out control by calling med-level
9969 * output once. this assures that we WILL send HB's
9972 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9974 &now, &now_filled, frag_point, so_locked);
9975 #ifdef SCTP_AUDITING_ENABLED
9976 sctp_auditing(8, inp, stcb, NULL);
9978 sctp_timer_validation(inp, stcb, asoc);
9983 * The count was off.. retran is not happening so do
9984 * the normal retransmission.
9986 #ifdef SCTP_AUDITING_ENABLED
9987 sctp_auditing(9, inp, stcb, NULL);
9989 if (ret == SCTP_RETRAN_EXIT) {
9994 if (from_where == SCTP_OUTPUT_FROM_T3) {
9995 /* Only one transmission allowed out of a timeout */
9996 #ifdef SCTP_AUDITING_ENABLED
9997 sctp_auditing(10, inp, stcb, NULL);
9999 /* Push out any control */
10000 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10001 &now, &now_filled, frag_point, so_locked);
10004 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10005 /* Hit FR burst limit */
10008 if ((num_out == 0) && (ret == 0)) {
10009 /* No more retrans to send */
10013 #ifdef SCTP_AUDITING_ENABLED
10014 sctp_auditing(12, inp, stcb, NULL);
10016 /* Check for bad destinations, if they exist move chunks around. */
10017 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10018 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10020 * if possible move things off of this address we
10021 * still may send below due to the dormant state but
10022 * we try to find an alternate address to send to
10023 * and if we have one we move all queued data on the
10024 * out wheel to this alternate address.
10026 if (net->ref_count > 1)
10027 sctp_move_chunks_from_net(stcb, net);
10030 * if ((asoc->sat_network) || (net->addr_is_local))
10031 * { burst_limit = asoc->max_burst *
10032 * SCTP_SAT_NETWORK_BURST_INCR; }
10034 if (asoc->max_burst > 0) {
10035 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10036 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10038 * JRS - Use the congestion
10039 * control given in the
10040 * congestion control module
10042 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10043 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10044 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10046 SCTP_STAT_INCR(sctps_maxburstqueued);
10048 net->fast_retran_ip = 0;
10050 if (net->flight_size == 0) {
10052 * Should be decaying the
10064 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10065 &reason_code, 0, from_where,
10066 &now, &now_filled, frag_point, so_locked);
10068 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10070 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10072 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10073 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10074 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10078 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10080 tot_out += num_out;
10082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10083 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10084 if (num_out == 0) {
10085 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10090 * When the Nagle algorithm is used, look at how
10091 * much is unsent, then if its smaller than an MTU
10092 * and we have data in flight we stop, except if we
10093 * are handling a fragmented user message.
10095 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10096 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10097 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10098 (stcb->asoc.total_flight > 0) &&
10099 ((stcb->asoc.locked_on_sending == NULL) ||
10100 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10104 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10105 TAILQ_EMPTY(&asoc->send_queue) &&
10106 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10107 /* Nothing left to send */
10110 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10111 /* Nothing left to send */
10114 } while (num_out &&
10115 ((asoc->max_burst == 0) ||
10116 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10117 (burst_cnt < asoc->max_burst)));
10119 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10120 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10121 SCTP_STAT_INCR(sctps_maxburstqueued);
10122 asoc->burst_limit_applied = 1;
10123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10124 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10127 asoc->burst_limit_applied = 0;
10130 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10131 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10133 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10137 * Now we need to clean up the control chunk chain if a ECNE is on
10138 * it. It must be marked as UNSENT again so next call will continue
10139 * to send it until such time that we get a CWR, to remove it.
10141 if (stcb->asoc.ecn_echo_cnt_onq)
10142 sctp_fix_ecn_echo(asoc);
10149 struct sctp_inpcb *inp,
10151 struct sockaddr *addr,
10152 struct mbuf *control,
10157 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10160 if (inp->sctp_socket == NULL) {
10161 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10164 return (sctp_sosend(inp->sctp_socket,
10166 (struct uio *)NULL,
10174 send_forward_tsn(struct sctp_tcb *stcb,
10175 struct sctp_association *asoc)
10177 struct sctp_tmit_chunk *chk;
10178 struct sctp_forward_tsn_chunk *fwdtsn;
10179 uint32_t advance_peer_ack_point;
10181 SCTP_TCB_LOCK_ASSERT(stcb);
10182 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10183 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10184 /* mark it to unsent */
10185 chk->sent = SCTP_DATAGRAM_UNSENT;
10186 chk->snd_count = 0;
10187 /* Do we correct its output location? */
10189 sctp_free_remote_addr(chk->whoTo);
10192 goto sctp_fill_in_rest;
10195 /* Ok if we reach here we must build one */
10196 sctp_alloc_a_chunk(stcb, chk);
10200 asoc->fwd_tsn_cnt++;
10201 chk->copy_by_ref = 0;
10202 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10203 chk->rec.chunk_id.can_take_data = 0;
10206 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10207 if (chk->data == NULL) {
10208 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10211 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10212 chk->sent = SCTP_DATAGRAM_UNSENT;
10213 chk->snd_count = 0;
10214 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10215 asoc->ctrl_queue_cnt++;
10218 * Here we go through and fill out the part that deals with
10219 * stream/seq of the ones we skip.
10221 SCTP_BUF_LEN(chk->data) = 0;
10223 struct sctp_tmit_chunk *at, *tp1, *last;
10224 struct sctp_strseq *strseq;
10225 unsigned int cnt_of_space, i, ovh;
10226 unsigned int space_needed;
10227 unsigned int cnt_of_skipped = 0;
10229 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10230 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
10231 /* no more to look at */
10234 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10235 /* We don't report these */
10240 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10241 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10243 cnt_of_space = M_TRAILINGSPACE(chk->data);
10245 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10246 ovh = SCTP_MIN_OVERHEAD;
10248 ovh = SCTP_MIN_V4_OVERHEAD;
10250 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10251 /* trim to a mtu size */
10252 cnt_of_space = asoc->smallest_mtu - ovh;
10254 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10255 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10256 0xff, 0, cnt_of_skipped,
10257 asoc->advanced_peer_ack_point);
10260 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10261 if (cnt_of_space < space_needed) {
10263 * ok we must trim down the chunk by lowering the
10264 * advance peer ack point.
10266 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10267 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10268 0xff, 0xff, cnt_of_space,
10271 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10272 cnt_of_skipped /= sizeof(struct sctp_strseq);
10274 * Go through and find the TSN that will be the one
10277 at = TAILQ_FIRST(&asoc->sent_queue);
10279 for (i = 0; i < cnt_of_skipped; i++) {
10280 tp1 = TAILQ_NEXT(at, sctp_next);
10287 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10288 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10289 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10290 asoc->advanced_peer_ack_point);
10294 * last now points to last one I can report, update
10298 advance_peer_ack_point = last->rec.data.TSN_seq;
10299 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10300 cnt_of_skipped * sizeof(struct sctp_strseq);
10302 chk->send_size = space_needed;
10303 /* Setup the chunk */
10304 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10305 fwdtsn->ch.chunk_length = htons(chk->send_size);
10306 fwdtsn->ch.chunk_flags = 0;
10307 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10308 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10309 SCTP_BUF_LEN(chk->data) = chk->send_size;
10312 * Move pointer to after the fwdtsn and transfer to the
10315 strseq = (struct sctp_strseq *)fwdtsn;
10317 * Now populate the strseq list. This is done blindly
10318 * without pulling out duplicate stream info. This is
10319 * inefficent but won't harm the process since the peer will
10320 * look at these in sequence and will thus release anything.
10321 * It could mean we exceed the PMTU and chop off some that
10322 * we could have included.. but this is unlikely (aka 1432/4
10323 * would mean 300+ stream seq's would have to be reported in
10324 * one FWD-TSN. With a bit of work we can later FIX this to
10325 * optimize and pull out duplcates.. but it does add more
10326 * overhead. So for now... not!
10328 at = TAILQ_FIRST(&asoc->sent_queue);
10329 for (i = 0; i < cnt_of_skipped; i++) {
10330 tp1 = TAILQ_NEXT(at, sctp_next);
10333 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10334 /* We don't report these */
10339 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10340 at->rec.data.fwd_tsn_cnt = 0;
10342 strseq->stream = ntohs(at->rec.data.stream_number);
10343 strseq->sequence = ntohs(at->rec.data.stream_seq);
10352 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10353 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10359 * Queue up a SACK or NR-SACK in the control queue.
10360 * We must first check to see if a SACK or NR-SACK is
10361 * somehow on the control queue.
10362 * If so, we will take and and remove the old one.
10364 struct sctp_association *asoc;
10365 struct sctp_tmit_chunk *chk, *a_chk;
10366 struct sctp_sack_chunk *sack;
10367 struct sctp_nr_sack_chunk *nr_sack;
10368 struct sctp_gap_ack_block *gap_descriptor;
10369 struct sack_track *selector;
10374 int limit_reached = 0;
10375 unsigned int i, siz, j;
10376 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10379 uint32_t highest_tsn;
10384 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10385 (stcb->asoc.peer_supports_nr_sack == 1)) {
10386 type = SCTP_NR_SELECTIVE_ACK;
10388 type = SCTP_SELECTIVE_ACK;
10391 asoc = &stcb->asoc;
10392 SCTP_TCB_LOCK_ASSERT(stcb);
10393 if (asoc->last_data_chunk_from == NULL) {
10394 /* Hmm we never received anything */
10397 sctp_slide_mapping_arrays(stcb);
10398 sctp_set_rwnd(stcb, asoc);
10399 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10400 if (chk->rec.chunk_id.id == type) {
10401 /* Hmm, found a sack already on queue, remove it */
10402 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10403 asoc->ctrl_queue_cnt--;
10406 sctp_m_freem(a_chk->data);
10407 a_chk->data = NULL;
10409 if (a_chk->whoTo) {
10410 sctp_free_remote_addr(a_chk->whoTo);
10411 a_chk->whoTo = NULL;
10416 if (a_chk == NULL) {
10417 sctp_alloc_a_chunk(stcb, a_chk);
10418 if (a_chk == NULL) {
10419 /* No memory so we drop the idea, and set a timer */
10420 if (stcb->asoc.delayed_ack) {
10421 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10422 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10423 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10424 stcb->sctp_ep, stcb, NULL);
10426 stcb->asoc.send_sack = 1;
10430 a_chk->copy_by_ref = 0;
10431 a_chk->rec.chunk_id.id = type;
10432 a_chk->rec.chunk_id.can_take_data = 1;
10434 /* Clear our pkt counts */
10435 asoc->data_pkts_seen = 0;
10437 a_chk->asoc = asoc;
10438 a_chk->snd_count = 0;
10439 a_chk->send_size = 0; /* fill in later */
10440 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10441 a_chk->whoTo = NULL;
10443 if ((asoc->numduptsns) ||
10444 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10446 * Ok, we have some duplicates or the destination for the
10447 * sack is unreachable, lets see if we can select an
10448 * alternate than asoc->last_data_chunk_from
10450 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10451 (asoc->used_alt_onsack > asoc->numnets)) {
10452 /* We used an alt last time, don't this time */
10453 a_chk->whoTo = NULL;
10455 asoc->used_alt_onsack++;
10456 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10458 if (a_chk->whoTo == NULL) {
10459 /* Nope, no alternate */
10460 a_chk->whoTo = asoc->last_data_chunk_from;
10461 asoc->used_alt_onsack = 0;
10465 * No duplicates so we use the last place we received data
10468 asoc->used_alt_onsack = 0;
10469 a_chk->whoTo = asoc->last_data_chunk_from;
10471 if (a_chk->whoTo) {
10472 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10474 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10475 highest_tsn = asoc->highest_tsn_inside_map;
10477 highest_tsn = asoc->highest_tsn_inside_nr_map;
10479 if (highest_tsn == asoc->cumulative_tsn) {
10481 if (type == SCTP_SELECTIVE_ACK) {
10482 space_req = sizeof(struct sctp_sack_chunk);
10484 space_req = sizeof(struct sctp_nr_sack_chunk);
10487 /* gaps get a cluster */
10488 space_req = MCLBYTES;
10490 /* Ok now lets formulate a MBUF with our sack */
10491 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10492 if ((a_chk->data == NULL) ||
10493 (a_chk->whoTo == NULL)) {
10494 /* rats, no mbuf memory */
10496 /* was a problem with the destination */
10497 sctp_m_freem(a_chk->data);
10498 a_chk->data = NULL;
10500 sctp_free_a_chunk(stcb, a_chk, so_locked);
10501 /* sa_ignore NO_NULL_CHK */
10502 if (stcb->asoc.delayed_ack) {
10503 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10504 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10505 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10506 stcb->sctp_ep, stcb, NULL);
10508 stcb->asoc.send_sack = 1;
10512 /* ok, lets go through and fill it in */
10513 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10514 space = M_TRAILINGSPACE(a_chk->data);
10515 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10516 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10518 limit = mtod(a_chk->data, caddr_t);
10523 if ((asoc->sctp_cmt_on_off > 0) &&
10524 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10526 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10527 * received, then set high bit to 1, else 0. Reset
10530 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10531 asoc->cmt_dac_pkts_rcvd = 0;
10533 #ifdef SCTP_ASOCLOG_OF_TSNS
10534 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10535 stcb->asoc.cumack_log_atsnt++;
10536 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10537 stcb->asoc.cumack_log_atsnt = 0;
10540 /* reset the readers interpretation */
10541 stcb->freed_by_sorcv_sincelast = 0;
10543 if (type == SCTP_SELECTIVE_ACK) {
10544 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10546 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10547 if (highest_tsn > asoc->mapping_array_base_tsn) {
10548 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10550 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10554 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10555 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10556 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10557 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10559 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10563 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10566 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10568 if (((type == SCTP_SELECTIVE_ACK) &&
10569 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10570 ((type == SCTP_NR_SELECTIVE_ACK) &&
10571 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10572 /* we have a gap .. maybe */
10573 for (i = 0; i < siz; i++) {
10574 tsn_map = asoc->mapping_array[i];
10575 if (type == SCTP_SELECTIVE_ACK) {
10576 tsn_map |= asoc->nr_mapping_array[i];
10580 * Clear all bits corresponding to TSNs
10581 * smaller or equal to the cumulative TSN.
10583 tsn_map &= (~0 << (1 - offset));
10585 selector = &sack_array[tsn_map];
10586 if (mergeable && selector->right_edge) {
10588 * Backup, left and right edges were ok to
10594 if (selector->num_entries == 0)
10597 for (j = 0; j < selector->num_entries; j++) {
10598 if (mergeable && selector->right_edge) {
10600 * do a merge by NOT setting
10606 * no merge, set the left
10610 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10612 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10615 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10621 if (selector->left_edge) {
10625 if (limit_reached) {
10626 /* Reached the limit stop */
10632 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10633 (limit_reached == 0)) {
10637 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10638 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10640 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10643 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10646 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10648 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10649 /* we have a gap .. maybe */
10650 for (i = 0; i < siz; i++) {
10651 tsn_map = asoc->nr_mapping_array[i];
10654 * Clear all bits corresponding to
10655 * TSNs smaller or equal to the
10658 tsn_map &= (~0 << (1 - offset));
10660 selector = &sack_array[tsn_map];
10661 if (mergeable && selector->right_edge) {
10663 * Backup, left and right edges were
10666 num_nr_gap_blocks--;
10669 if (selector->num_entries == 0)
10672 for (j = 0; j < selector->num_entries; j++) {
10673 if (mergeable && selector->right_edge) {
10675 * do a merge by NOT
10682 * no merge, set the
10686 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10688 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10689 num_nr_gap_blocks++;
10691 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10697 if (selector->left_edge) {
10701 if (limit_reached) {
10702 /* Reached the limit stop */
10709 /* now we must add any dups we are going to report. */
10710 if ((limit_reached == 0) && (asoc->numduptsns)) {
10711 dup = (uint32_t *) gap_descriptor;
10712 for (i = 0; i < asoc->numduptsns; i++) {
10713 *dup = htonl(asoc->dup_tsns[i]);
10716 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10721 asoc->numduptsns = 0;
10724 * now that the chunk is prepared queue it to the control chunk
10727 if (type == SCTP_SELECTIVE_ACK) {
10728 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10729 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10730 num_dups * sizeof(int32_t);
10731 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10732 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10733 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10734 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10735 sack->sack.num_dup_tsns = htons(num_dups);
10736 sack->ch.chunk_type = type;
10737 sack->ch.chunk_flags = flags;
10738 sack->ch.chunk_length = htons(a_chk->send_size);
10740 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10741 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10742 num_dups * sizeof(int32_t);
10743 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10744 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10745 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10746 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10747 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10748 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10749 nr_sack->nr_sack.reserved = 0;
10750 nr_sack->ch.chunk_type = type;
10751 nr_sack->ch.chunk_flags = flags;
10752 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10754 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10755 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10756 asoc->ctrl_queue_cnt++;
10757 asoc->send_sack = 0;
10758 SCTP_STAT_INCR(sctps_sendsacks);
10763 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10764 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10769 struct mbuf *m_abort;
10770 struct mbuf *m_out = NULL, *m_end = NULL;
10771 struct sctp_abort_chunk *abort = NULL;
10773 uint32_t auth_offset = 0;
10774 struct sctp_auth_chunk *auth = NULL;
10775 struct sctp_nets *net;
10778 * Add an AUTH chunk, if chunk requires it and save the offset into
10779 * the chain for AUTH
10781 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10782 stcb->asoc.peer_auth_chunks)) {
10783 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10784 stcb, SCTP_ABORT_ASSOCIATION);
10785 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10787 SCTP_TCB_LOCK_ASSERT(stcb);
10788 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10789 if (m_abort == NULL) {
10792 sctp_m_freem(m_out);
10795 /* link in any error */
10796 SCTP_BUF_NEXT(m_abort) = operr;
10803 sz += SCTP_BUF_LEN(n);
10804 n = SCTP_BUF_NEXT(n);
10807 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10808 if (m_out == NULL) {
10809 /* NO Auth chunk prepended, so reserve space in front */
10810 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10813 /* Put AUTH chunk at the front of the chain */
10814 SCTP_BUF_NEXT(m_end) = m_abort;
10816 if (stcb->asoc.alternate) {
10817 net = stcb->asoc.alternate;
10819 net = stcb->asoc.primary_destination;
10821 /* fill in the ABORT chunk */
10822 abort = mtod(m_abort, struct sctp_abort_chunk *);
10823 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10824 abort->ch.chunk_flags = 0;
10825 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10827 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10828 (struct sockaddr *)&net->ro._l_addr,
10829 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10830 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10831 stcb->asoc.primary_destination->port, so_locked, NULL, NULL);
10832 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10836 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10837 struct sctp_nets *net,
10840 /* formulate and SEND a SHUTDOWN-COMPLETE */
10841 struct mbuf *m_shutdown_comp;
10842 struct sctp_shutdown_complete_chunk *shutdown_complete;
10846 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10847 if (m_shutdown_comp == NULL) {
10851 if (reflect_vtag) {
10852 flags = SCTP_HAD_NO_TCB;
10853 vtag = stcb->asoc.my_vtag;
10856 vtag = stcb->asoc.peer_vtag;
10858 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10859 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10860 shutdown_complete->ch.chunk_flags = flags;
10861 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10862 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10863 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10864 (struct sockaddr *)&net->ro._l_addr,
10865 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10866 stcb->sctp_ep->sctp_lport, stcb->rport,
10868 net->port, SCTP_SO_NOT_LOCKED, NULL, NULL);
10869 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10874 sctp_send_shutdown_complete2(struct mbuf *m, struct sctphdr *sh,
10875 uint32_t vrf_id, uint16_t port)
10877 /* formulate and SEND a SHUTDOWN-COMPLETE */
10878 struct mbuf *o_pak;
10881 struct udphdr *udp = NULL;
10882 int offset_out, len, mlen;
10883 struct sctp_shutdown_complete_msg *comp_cp;
10886 struct ip *iph_out;
10890 struct ip6_hdr *ip6, *ip6_out;
10894 iph = mtod(m, struct ip *);
10895 switch (iph->ip_v) {
10898 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10902 case IPV6_VERSION >> 4:
10903 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10910 len += sizeof(struct udphdr);
10912 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10913 if (mout == NULL) {
10916 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10917 SCTP_BUF_LEN(mout) = len;
10918 SCTP_BUF_NEXT(mout) = NULL;
10919 if (m->m_flags & M_FLOWID) {
10920 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
10921 mout->m_flags |= M_FLOWID;
10931 switch (iph->ip_v) {
10934 iph_out = mtod(mout, struct ip *);
10936 /* Fill in the IP header for the ABORT */
10937 iph_out->ip_v = IPVERSION;
10938 iph_out->ip_hl = (sizeof(struct ip) / 4);
10939 iph_out->ip_tos = (u_char)0;
10940 iph_out->ip_id = 0;
10941 iph_out->ip_off = 0;
10942 iph_out->ip_ttl = MAXTTL;
10944 iph_out->ip_p = IPPROTO_UDP;
10946 iph_out->ip_p = IPPROTO_SCTP;
10948 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10949 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10951 /* let IP layer calculate this */
10952 iph_out->ip_sum = 0;
10953 offset_out += sizeof(*iph_out);
10954 comp_cp = (struct sctp_shutdown_complete_msg *)(
10955 (caddr_t)iph_out + offset_out);
10959 case IPV6_VERSION >> 4:
10960 ip6 = (struct ip6_hdr *)iph;
10961 ip6_out = mtod(mout, struct ip6_hdr *);
10963 /* Fill in the IPv6 header for the ABORT */
10964 ip6_out->ip6_flow = ip6->ip6_flow;
10965 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10967 ip6_out->ip6_nxt = IPPROTO_UDP;
10969 ip6_out->ip6_nxt = IPPROTO_SCTP;
10971 ip6_out->ip6_src = ip6->ip6_dst;
10972 ip6_out->ip6_dst = ip6->ip6_src;
10974 * ?? The old code had both the iph len + payload, I think
10975 * this is wrong and would never have worked
10977 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10978 offset_out += sizeof(*ip6_out);
10979 comp_cp = (struct sctp_shutdown_complete_msg *)(
10980 (caddr_t)ip6_out + offset_out);
10984 /* Currently not supported. */
10985 sctp_m_freem(mout);
10989 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
10990 sctp_m_freem(mout);
10993 udp = (struct udphdr *)comp_cp;
10994 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10995 udp->uh_dport = port;
10996 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
11000 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11006 offset_out += sizeof(struct udphdr);
11007 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
11009 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11011 sctp_m_freem(mout);
11014 /* Now copy in and fill in the ABORT tags etc. */
11015 comp_cp->sh.src_port = sh->dest_port;
11016 comp_cp->sh.dest_port = sh->src_port;
11017 comp_cp->sh.checksum = 0;
11018 comp_cp->sh.v_tag = sh->v_tag;
11019 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
11020 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11021 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11024 if (iph_out != NULL) {
11028 mlen = SCTP_BUF_LEN(mout);
11029 bzero(&ro, sizeof ro);
11030 /* set IPv4 length */
11031 iph_out->ip_len = mlen;
11032 #ifdef SCTP_PACKET_LOGGING
11033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11034 sctp_packet_log(mout, mlen);
11037 #if defined(SCTP_WITH_NO_CSUM)
11038 SCTP_STAT_INCR(sctps_sendnocrc);
11040 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
11041 SCTP_STAT_INCR(sctps_sendswcrc);
11044 SCTP_ENABLE_UDP_CSUM(mout);
11047 #if defined(SCTP_WITH_NO_CSUM)
11048 SCTP_STAT_INCR(sctps_sendnocrc);
11050 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11051 mout->m_pkthdr.csum_data = 0;
11052 SCTP_STAT_INCR(sctps_sendhwcrc);
11055 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
11057 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11059 /* Free the route if we got one back */
11065 if (ip6_out != NULL) {
11066 struct route_in6 ro;
11068 struct ifnet *ifp = NULL;
11070 bzero(&ro, sizeof(ro));
11071 mlen = SCTP_BUF_LEN(mout);
11072 #ifdef SCTP_PACKET_LOGGING
11073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11074 sctp_packet_log(mout, mlen);
11076 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
11078 #if defined(SCTP_WITH_NO_CSUM)
11079 SCTP_STAT_INCR(sctps_sendnocrc);
11081 comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11082 SCTP_STAT_INCR(sctps_sendswcrc);
11084 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
11085 udp->uh_sum = 0xffff;
11088 #if defined(SCTP_WITH_NO_CSUM)
11089 SCTP_STAT_INCR(sctps_sendnocrc);
11091 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11092 mout->m_pkthdr.csum_data = 0;
11093 SCTP_STAT_INCR(sctps_sendhwcrc);
11096 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
11098 /* Free the route if we got one back */
11103 SCTP_STAT_INCR(sctps_sendpackets);
11104 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11105 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11111 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11112 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11117 struct sctp_tmit_chunk *chk;
11118 struct sctp_heartbeat_chunk *hb;
11119 struct timeval now;
11121 SCTP_TCB_LOCK_ASSERT(stcb);
11125 (void)SCTP_GETTIME_TIMEVAL(&now);
11126 switch (net->ro._l_addr.sa.sa_family) {
11138 sctp_alloc_a_chunk(stcb, chk);
11140 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11143 chk->copy_by_ref = 0;
11144 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11145 chk->rec.chunk_id.can_take_data = 1;
11146 chk->asoc = &stcb->asoc;
11147 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11149 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11150 if (chk->data == NULL) {
11151 sctp_free_a_chunk(stcb, chk, so_locked);
11154 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11155 SCTP_BUF_LEN(chk->data) = chk->send_size;
11156 chk->sent = SCTP_DATAGRAM_UNSENT;
11157 chk->snd_count = 0;
11159 atomic_add_int(&chk->whoTo->ref_count, 1);
11160 /* Now we have a mbuf that we can fill in with the details */
11161 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11162 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11163 /* fill out chunk header */
11164 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11165 hb->ch.chunk_flags = 0;
11166 hb->ch.chunk_length = htons(chk->send_size);
11167 /* Fill out hb parameter */
11168 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11169 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11170 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11171 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11172 /* Did our user request this one, put it in */
11173 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11174 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11175 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11177 * we only take from the entropy pool if the address is not
11180 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11181 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11183 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11184 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11186 switch (net->ro._l_addr.sa.sa_family) {
11189 memcpy(hb->heartbeat.hb_info.address,
11190 &net->ro._l_addr.sin.sin_addr,
11191 sizeof(net->ro._l_addr.sin.sin_addr));
11196 memcpy(hb->heartbeat.hb_info.address,
11197 &net->ro._l_addr.sin6.sin6_addr,
11198 sizeof(net->ro._l_addr.sin6.sin6_addr));
11205 net->hb_responded = 0;
11206 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11207 stcb->asoc.ctrl_queue_cnt++;
11208 SCTP_STAT_INCR(sctps_sendheartbeat);
11213 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11216 struct sctp_association *asoc;
11217 struct sctp_ecne_chunk *ecne;
11218 struct sctp_tmit_chunk *chk;
11223 asoc = &stcb->asoc;
11224 SCTP_TCB_LOCK_ASSERT(stcb);
11225 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11226 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11227 /* found a previous ECN_ECHO update it if needed */
11228 uint32_t cnt, ctsn;
11230 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11231 ctsn = ntohl(ecne->tsn);
11232 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11233 ecne->tsn = htonl(high_tsn);
11234 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11236 cnt = ntohl(ecne->num_pkts_since_cwr);
11238 ecne->num_pkts_since_cwr = htonl(cnt);
11242 /* nope could not find one to update so we must build one */
11243 sctp_alloc_a_chunk(stcb, chk);
11247 chk->copy_by_ref = 0;
11248 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11249 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11250 chk->rec.chunk_id.can_take_data = 0;
11251 chk->asoc = &stcb->asoc;
11252 chk->send_size = sizeof(struct sctp_ecne_chunk);
11253 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11254 if (chk->data == NULL) {
11255 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11258 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11259 SCTP_BUF_LEN(chk->data) = chk->send_size;
11260 chk->sent = SCTP_DATAGRAM_UNSENT;
11261 chk->snd_count = 0;
11263 atomic_add_int(&chk->whoTo->ref_count, 1);
11265 stcb->asoc.ecn_echo_cnt_onq++;
11266 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11267 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11268 ecne->ch.chunk_flags = 0;
11269 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11270 ecne->tsn = htonl(high_tsn);
11271 ecne->num_pkts_since_cwr = htonl(1);
11272 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11273 asoc->ctrl_queue_cnt++;
11277 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11278 struct mbuf *m, int iphlen, int bad_crc)
11280 struct sctp_association *asoc;
11281 struct sctp_pktdrop_chunk *drp;
11282 struct sctp_tmit_chunk *chk;
11289 struct ip6_hdr *ip6h;
11292 int fullsz = 0, extra = 0;
11295 struct sctp_chunkhdr *ch, chunk_buf;
11296 unsigned int chk_length;
11301 asoc = &stcb->asoc;
11302 SCTP_TCB_LOCK_ASSERT(stcb);
11303 if (asoc->peer_supports_pktdrop == 0) {
11305 * peer must declare support before I send one.
11309 if (stcb->sctp_socket == NULL) {
11312 sctp_alloc_a_chunk(stcb, chk);
11316 chk->copy_by_ref = 0;
11317 iph = mtod(m, struct ip *);
11319 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11322 switch (iph->ip_v) {
11326 len = chk->send_size = iph->ip_len;
11330 case IPV6_VERSION >> 4:
11332 ip6h = mtod(m, struct ip6_hdr *);
11333 len = chk->send_size = htons(ip6h->ip6_plen);
11339 /* Validate that we do not have an ABORT in here. */
11340 offset = iphlen + sizeof(struct sctphdr);
11341 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11342 sizeof(*ch), (uint8_t *) & chunk_buf);
11343 while (ch != NULL) {
11344 chk_length = ntohs(ch->chunk_length);
11345 if (chk_length < sizeof(*ch)) {
11346 /* break to abort land */
11349 switch (ch->chunk_type) {
11350 case SCTP_PACKET_DROPPED:
11351 case SCTP_ABORT_ASSOCIATION:
11352 case SCTP_INITIATION_ACK:
11354 * We don't respond with an PKT-DROP to an ABORT
11355 * or PKT-DROP. We also do not respond to an
11356 * INIT-ACK, because we can't know if the initiation
11357 * tag is correct or not.
11359 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11364 offset += SCTP_SIZE32(chk_length);
11365 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11366 sizeof(*ch), (uint8_t *) & chunk_buf);
11369 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11370 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11372 * only send 1 mtu worth, trim off the excess on the end.
11374 fullsz = len - extra;
11375 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11378 chk->asoc = &stcb->asoc;
11379 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11380 if (chk->data == NULL) {
11382 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11385 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11386 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11388 sctp_m_freem(chk->data);
11392 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11393 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11394 chk->book_size_scale = 0;
11396 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11397 drp->trunc_len = htons(fullsz);
11399 * Len is already adjusted to size minus overhead above take
11400 * out the pkt_drop chunk itself from it.
11402 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11403 len = chk->send_size;
11405 /* no truncation needed */
11406 drp->ch.chunk_flags = 0;
11407 drp->trunc_len = htons(0);
11410 drp->ch.chunk_flags |= SCTP_BADCRC;
11412 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11413 SCTP_BUF_LEN(chk->data) = chk->send_size;
11414 chk->sent = SCTP_DATAGRAM_UNSENT;
11415 chk->snd_count = 0;
11417 /* we should hit here */
11419 atomic_add_int(&chk->whoTo->ref_count, 1);
11423 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11424 chk->rec.chunk_id.can_take_data = 1;
11425 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11426 drp->ch.chunk_length = htons(chk->send_size);
11427 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11431 drp->bottle_bw = htonl(spc);
11432 if (asoc->my_rwnd) {
11433 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11434 asoc->size_on_all_streams +
11435 asoc->my_rwnd_control_len +
11436 stcb->sctp_socket->so_rcv.sb_cc);
11439 * If my rwnd is 0, possibly from mbuf depletion as well as
11440 * space used, tell the peer there is NO space aka onq == bw
11442 drp->current_onq = htonl(spc);
11446 m_copydata(m, iphlen, len, (caddr_t)datap);
11447 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11448 asoc->ctrl_queue_cnt++;
11452 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11454 struct sctp_association *asoc;
11455 struct sctp_cwr_chunk *cwr;
11456 struct sctp_tmit_chunk *chk;
11458 asoc = &stcb->asoc;
11459 SCTP_TCB_LOCK_ASSERT(stcb);
11463 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11464 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11466 * found a previous CWR queued to same destination
11467 * update it if needed
11471 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11472 ctsn = ntohl(cwr->tsn);
11473 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11474 cwr->tsn = htonl(high_tsn);
11476 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11477 /* Make sure override is carried */
11478 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11483 sctp_alloc_a_chunk(stcb, chk);
11487 chk->copy_by_ref = 0;
11488 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11489 chk->rec.chunk_id.can_take_data = 1;
11490 chk->asoc = &stcb->asoc;
11491 chk->send_size = sizeof(struct sctp_cwr_chunk);
11492 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11493 if (chk->data == NULL) {
11494 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11497 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11498 SCTP_BUF_LEN(chk->data) = chk->send_size;
11499 chk->sent = SCTP_DATAGRAM_UNSENT;
11500 chk->snd_count = 0;
11502 atomic_add_int(&chk->whoTo->ref_count, 1);
11503 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11504 cwr->ch.chunk_type = SCTP_ECN_CWR;
11505 cwr->ch.chunk_flags = override;
11506 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11507 cwr->tsn = htonl(high_tsn);
11508 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11509 asoc->ctrl_queue_cnt++;
11513 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11514 int number_entries, uint16_t * list,
11515 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11517 int len, old_len, i;
11518 struct sctp_stream_reset_out_request *req_out;
11519 struct sctp_chunkhdr *ch;
11521 ch = mtod(chk->data, struct sctp_chunkhdr *);
11524 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11526 /* get to new offset for the param. */
11527 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11528 /* now how long will this param be? */
11529 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11530 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11531 req_out->ph.param_length = htons(len);
11532 req_out->request_seq = htonl(seq);
11533 req_out->response_seq = htonl(resp_seq);
11534 req_out->send_reset_at_tsn = htonl(last_sent);
11535 if (number_entries) {
11536 for (i = 0; i < number_entries; i++) {
11537 req_out->list_of_streams[i] = htons(list[i]);
11540 if (SCTP_SIZE32(len) > len) {
11542 * Need to worry about the pad we may end up adding to the
11543 * end. This is easy since the struct is either aligned to 4
11544 * bytes or 2 bytes off.
11546 req_out->list_of_streams[number_entries] = 0;
11548 /* now fix the chunk length */
11549 ch->chunk_length = htons(len + old_len);
11550 chk->book_size = len + old_len;
11551 chk->book_size_scale = 0;
11552 chk->send_size = SCTP_SIZE32(chk->book_size);
11553 SCTP_BUF_LEN(chk->data) = chk->send_size;
11559 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11560 int number_entries, uint16_t * list,
11563 int len, old_len, i;
11564 struct sctp_stream_reset_in_request *req_in;
11565 struct sctp_chunkhdr *ch;
11567 ch = mtod(chk->data, struct sctp_chunkhdr *);
11570 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11572 /* get to new offset for the param. */
11573 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11574 /* now how long will this param be? */
11575 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11576 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11577 req_in->ph.param_length = htons(len);
11578 req_in->request_seq = htonl(seq);
11579 if (number_entries) {
11580 for (i = 0; i < number_entries; i++) {
11581 req_in->list_of_streams[i] = htons(list[i]);
11584 if (SCTP_SIZE32(len) > len) {
11586 * Need to worry about the pad we may end up adding to the
11587 * end. This is easy since the struct is either aligned to 4
11588 * bytes or 2 bytes off.
11590 req_in->list_of_streams[number_entries] = 0;
11592 /* now fix the chunk length */
11593 ch->chunk_length = htons(len + old_len);
11594 chk->book_size = len + old_len;
11595 chk->book_size_scale = 0;
11596 chk->send_size = SCTP_SIZE32(chk->book_size);
11597 SCTP_BUF_LEN(chk->data) = chk->send_size;
11603 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11607 struct sctp_stream_reset_tsn_request *req_tsn;
11608 struct sctp_chunkhdr *ch;
11610 ch = mtod(chk->data, struct sctp_chunkhdr *);
11613 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11615 /* get to new offset for the param. */
11616 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11617 /* now how long will this param be? */
11618 len = sizeof(struct sctp_stream_reset_tsn_request);
11619 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11620 req_tsn->ph.param_length = htons(len);
11621 req_tsn->request_seq = htonl(seq);
11623 /* now fix the chunk length */
11624 ch->chunk_length = htons(len + old_len);
11625 chk->send_size = len + old_len;
11626 chk->book_size = SCTP_SIZE32(chk->send_size);
11627 chk->book_size_scale = 0;
11628 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11633 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11634 uint32_t resp_seq, uint32_t result)
11637 struct sctp_stream_reset_response *resp;
11638 struct sctp_chunkhdr *ch;
11640 ch = mtod(chk->data, struct sctp_chunkhdr *);
11643 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11645 /* get to new offset for the param. */
11646 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11647 /* now how long will this param be? */
11648 len = sizeof(struct sctp_stream_reset_response);
11649 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11650 resp->ph.param_length = htons(len);
11651 resp->response_seq = htonl(resp_seq);
11652 resp->result = ntohl(result);
11654 /* now fix the chunk length */
11655 ch->chunk_length = htons(len + old_len);
11656 chk->book_size = len + old_len;
11657 chk->book_size_scale = 0;
11658 chk->send_size = SCTP_SIZE32(chk->book_size);
11659 SCTP_BUF_LEN(chk->data) = chk->send_size;
11666 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11667 uint32_t resp_seq, uint32_t result,
11668 uint32_t send_una, uint32_t recv_next)
11671 struct sctp_stream_reset_response_tsn *resp;
11672 struct sctp_chunkhdr *ch;
11674 ch = mtod(chk->data, struct sctp_chunkhdr *);
11677 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11679 /* get to new offset for the param. */
11680 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11681 /* now how long will this param be? */
11682 len = sizeof(struct sctp_stream_reset_response_tsn);
11683 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11684 resp->ph.param_length = htons(len);
11685 resp->response_seq = htonl(resp_seq);
11686 resp->result = htonl(result);
11687 resp->senders_next_tsn = htonl(send_una);
11688 resp->receivers_next_tsn = htonl(recv_next);
11690 /* now fix the chunk length */
11691 ch->chunk_length = htons(len + old_len);
11692 chk->book_size = len + old_len;
11693 chk->send_size = SCTP_SIZE32(chk->book_size);
11694 chk->book_size_scale = 0;
11695 SCTP_BUF_LEN(chk->data) = chk->send_size;
11700 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11705 struct sctp_chunkhdr *ch;
11706 struct sctp_stream_reset_add_strm *addstr;
11708 ch = mtod(chk->data, struct sctp_chunkhdr *);
11709 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11711 /* get to new offset for the param. */
11712 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11713 /* now how long will this param be? */
11714 len = sizeof(struct sctp_stream_reset_add_strm);
11717 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11718 addstr->ph.param_length = htons(len);
11719 addstr->request_seq = htonl(seq);
11720 addstr->number_of_streams = htons(adding);
11721 addstr->reserved = 0;
11723 /* now fix the chunk length */
11724 ch->chunk_length = htons(len + old_len);
11725 chk->send_size = len + old_len;
11726 chk->book_size = SCTP_SIZE32(chk->send_size);
11727 chk->book_size_scale = 0;
11728 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11733 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11738 struct sctp_chunkhdr *ch;
11739 struct sctp_stream_reset_add_strm *addstr;
11741 ch = mtod(chk->data, struct sctp_chunkhdr *);
11742 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11744 /* get to new offset for the param. */
11745 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11746 /* now how long will this param be? */
11747 len = sizeof(struct sctp_stream_reset_add_strm);
11749 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11750 addstr->ph.param_length = htons(len);
11751 addstr->request_seq = htonl(seq);
11752 addstr->number_of_streams = htons(adding);
11753 addstr->reserved = 0;
11755 /* now fix the chunk length */
11756 ch->chunk_length = htons(len + old_len);
11757 chk->send_size = len + old_len;
11758 chk->book_size = SCTP_SIZE32(chk->send_size);
11759 chk->book_size_scale = 0;
11760 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11767 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11768 int number_entries, uint16_t * list,
11769 uint8_t send_out_req,
11770 uint8_t send_in_req,
11771 uint8_t send_tsn_req,
11772 uint8_t add_stream,
11774 uint16_t adding_i, uint8_t peer_asked
11778 struct sctp_association *asoc;
11779 struct sctp_tmit_chunk *chk;
11780 struct sctp_chunkhdr *ch;
11783 asoc = &stcb->asoc;
11784 if (asoc->stream_reset_outstanding) {
11786 * Already one pending, must get ACK back to clear the flag.
11788 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11791 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11792 (add_stream == 0)) {
11793 /* nothing to do */
11794 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11797 if (send_tsn_req && (send_out_req || send_in_req)) {
11798 /* error, can't do that */
11799 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11802 sctp_alloc_a_chunk(stcb, chk);
11804 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11807 chk->copy_by_ref = 0;
11808 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11809 chk->rec.chunk_id.can_take_data = 0;
11810 chk->asoc = &stcb->asoc;
11811 chk->book_size = sizeof(struct sctp_chunkhdr);
11812 chk->send_size = SCTP_SIZE32(chk->book_size);
11813 chk->book_size_scale = 0;
11815 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11816 if (chk->data == NULL) {
11817 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11818 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11821 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11823 /* setup chunk parameters */
11824 chk->sent = SCTP_DATAGRAM_UNSENT;
11825 chk->snd_count = 0;
11826 if (stcb->asoc.alternate) {
11827 chk->whoTo = stcb->asoc.alternate;
11829 chk->whoTo = stcb->asoc.primary_destination;
11831 atomic_add_int(&chk->whoTo->ref_count, 1);
11832 ch = mtod(chk->data, struct sctp_chunkhdr *);
11833 ch->chunk_type = SCTP_STREAM_RESET;
11834 ch->chunk_flags = 0;
11835 ch->chunk_length = htons(chk->book_size);
11836 SCTP_BUF_LEN(chk->data) = chk->send_size;
11838 seq = stcb->asoc.str_reset_seq_out;
11839 if (send_out_req) {
11840 sctp_add_stream_reset_out(chk, number_entries, list,
11841 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
11842 asoc->stream_reset_out_is_outstanding = 1;
11844 asoc->stream_reset_outstanding++;
11846 if ((add_stream & 1) &&
11847 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
11848 /* Need to allocate more */
11849 struct sctp_stream_out *oldstream;
11850 struct sctp_stream_queue_pending *sp, *nsp;
11853 oldstream = stcb->asoc.strmout;
11854 /* get some more */
11855 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
11856 ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
11858 if (stcb->asoc.strmout == NULL) {
11861 stcb->asoc.strmout = oldstream;
11862 /* Turn off the bit */
11863 x = add_stream & 0xfe;
11868 * Ok now we proceed with copying the old out stuff and
11869 * initializing the new stuff.
11871 SCTP_TCB_SEND_LOCK(stcb);
11872 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
11873 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11874 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11875 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
11876 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
11877 stcb->asoc.strmout[i].stream_no = i;
11878 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
11879 /* now anything on those queues? */
11880 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
11881 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
11882 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
11884 /* Now move assoc pointers too */
11885 if (stcb->asoc.last_out_stream == &oldstream[i]) {
11886 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
11888 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
11889 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
11892 /* now the new streams */
11893 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
11894 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
11895 stcb->asoc.strmout[i].next_sequence_sent = 0x0;
11896 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11897 stcb->asoc.strmout[i].stream_no = i;
11898 stcb->asoc.strmout[i].last_msg_incomplete = 0;
11899 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
11901 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
11902 SCTP_FREE(oldstream, SCTP_M_STRMO);
11903 SCTP_TCB_SEND_UNLOCK(stcb);
11906 if ((add_stream & 1) && (adding_o > 0)) {
11907 asoc->strm_pending_add_size = adding_o;
11908 asoc->peer_req_out = peer_asked;
11909 sctp_add_an_out_stream(chk, seq, adding_o);
11911 asoc->stream_reset_outstanding++;
11913 if ((add_stream & 2) && (adding_i > 0)) {
11914 sctp_add_an_in_stream(chk, seq, adding_i);
11916 asoc->stream_reset_outstanding++;
11919 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11921 asoc->stream_reset_outstanding++;
11923 if (send_tsn_req) {
11924 sctp_add_stream_reset_tsn(chk, seq);
11925 asoc->stream_reset_outstanding++;
11927 asoc->str_reset = chk;
11928 /* insert the chunk for sending */
11929 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11932 asoc->ctrl_queue_cnt++;
11933 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11938 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11939 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11942 * Formulate the abort message, and send it back down.
11944 struct mbuf *o_pak;
11946 struct sctp_abort_msg *abm;
11948 struct udphdr *udp;
11949 int iphlen_out, len;
11952 struct ip *iph_out;
11956 struct ip6_hdr *ip6, *ip6_out;
11960 /* don't respond to ABORT with ABORT */
11961 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11963 sctp_m_freem(err_cause);
11966 iph = mtod(m, struct ip *);
11967 switch (iph->ip_v) {
11970 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11974 case IPV6_VERSION >> 4:
11975 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11980 sctp_m_freem(err_cause);
11985 len += sizeof(struct udphdr);
11987 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11988 if (mout == NULL) {
11990 sctp_m_freem(err_cause);
11994 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11995 SCTP_BUF_LEN(mout) = len;
11996 SCTP_BUF_NEXT(mout) = err_cause;
11997 if (m->m_flags & M_FLOWID) {
11998 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
11999 mout->m_flags |= M_FLOWID;
12007 switch (iph->ip_v) {
12010 iph_out = mtod(mout, struct ip *);
12012 /* Fill in the IP header for the ABORT */
12013 iph_out->ip_v = IPVERSION;
12014 iph_out->ip_hl = (sizeof(struct ip) / 4);
12015 iph_out->ip_tos = (u_char)0;
12016 iph_out->ip_id = 0;
12017 iph_out->ip_off = 0;
12018 iph_out->ip_ttl = MAXTTL;
12020 iph_out->ip_p = IPPROTO_UDP;
12022 iph_out->ip_p = IPPROTO_SCTP;
12024 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
12025 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
12026 /* let IP layer calculate this */
12027 iph_out->ip_sum = 0;
12029 iphlen_out = sizeof(*iph_out);
12030 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
12034 case IPV6_VERSION >> 4:
12035 ip6 = (struct ip6_hdr *)iph;
12036 ip6_out = mtod(mout, struct ip6_hdr *);
12038 /* Fill in the IP6 header for the ABORT */
12039 ip6_out->ip6_flow = ip6->ip6_flow;
12040 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
12042 ip6_out->ip6_nxt = IPPROTO_UDP;
12044 ip6_out->ip6_nxt = IPPROTO_SCTP;
12046 ip6_out->ip6_src = ip6->ip6_dst;
12047 ip6_out->ip6_dst = ip6->ip6_src;
12049 iphlen_out = sizeof(*ip6_out);
12050 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
12054 /* Currently not supported */
12055 sctp_m_freem(mout);
12059 udp = (struct udphdr *)abm;
12061 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
12062 sctp_m_freem(mout);
12065 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
12066 udp->uh_dport = port;
12067 /* set udp->uh_ulen later */
12069 iphlen_out += sizeof(struct udphdr);
12070 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
12072 abm->sh.src_port = sh->dest_port;
12073 abm->sh.dest_port = sh->src_port;
12074 abm->sh.checksum = 0;
12076 abm->sh.v_tag = sh->v_tag;
12077 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
12079 abm->sh.v_tag = htonl(vtag);
12080 abm->msg.ch.chunk_flags = 0;
12082 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
12085 struct mbuf *m_tmp = err_cause;
12088 /* get length of the err_cause chain */
12089 while (m_tmp != NULL) {
12090 err_len += SCTP_BUF_LEN(m_tmp);
12091 m_tmp = SCTP_BUF_NEXT(m_tmp);
12093 len = SCTP_BUF_LEN(mout) + err_len;
12095 /* need pad at end of chunk */
12096 uint32_t cpthis = 0;
12099 padlen = 4 - (len % 4);
12100 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
12103 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
12105 len = SCTP_BUF_LEN(mout);
12106 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
12109 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
12111 sctp_m_freem(mout);
12115 if (iph_out != NULL) {
12119 /* zap the stack pointer to the route */
12120 bzero(&ro, sizeof ro);
12122 udp->uh_ulen = htons(len - sizeof(struct ip));
12124 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
12129 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
12130 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
12131 /* set IPv4 length */
12132 iph_out->ip_len = len;
12134 #ifdef SCTP_PACKET_LOGGING
12135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12136 sctp_packet_log(mout, len);
12138 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12140 #if defined(SCTP_WITH_NO_CSUM)
12141 SCTP_STAT_INCR(sctps_sendnocrc);
12143 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
12144 SCTP_STAT_INCR(sctps_sendswcrc);
12147 SCTP_ENABLE_UDP_CSUM(o_pak);
12150 #if defined(SCTP_WITH_NO_CSUM)
12151 SCTP_STAT_INCR(sctps_sendnocrc);
12153 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12154 mout->m_pkthdr.csum_data = 0;
12155 SCTP_STAT_INCR(sctps_sendhwcrc);
12158 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12160 /* Free the route if we got one back */
12166 if (ip6_out != NULL) {
12167 struct route_in6 ro;
12169 struct ifnet *ifp = NULL;
12171 /* zap the stack pointer to the route */
12172 bzero(&ro, sizeof(ro));
12174 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12176 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
12177 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
12178 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12179 #ifdef SCTP_PACKET_LOGGING
12180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12181 sctp_packet_log(mout, len);
12183 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12185 #if defined(SCTP_WITH_NO_CSUM)
12186 SCTP_STAT_INCR(sctps_sendnocrc);
12188 abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12189 SCTP_STAT_INCR(sctps_sendswcrc);
12191 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12192 udp->uh_sum = 0xffff;
12195 #if defined(SCTP_WITH_NO_CSUM)
12196 SCTP_STAT_INCR(sctps_sendnocrc);
12198 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12199 mout->m_pkthdr.csum_data = 0;
12200 SCTP_STAT_INCR(sctps_sendhwcrc);
12203 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
12205 /* Free the route if we got one back */
12210 SCTP_STAT_INCR(sctps_sendpackets);
12211 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12212 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12216 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
12217 uint32_t vrf_id, uint16_t port)
12219 struct mbuf *o_pak;
12220 struct sctphdr *sh, *sh_out;
12221 struct sctp_chunkhdr *ch;
12223 struct udphdr *udp = NULL;
12225 int iphlen_out, len;
12228 struct ip *iph_out;
12232 struct ip6_hdr *ip6, *ip6_out;
12236 iph = mtod(m, struct ip *);
12237 sh = (struct sctphdr *)((caddr_t)iph + iphlen);
12238 switch (iph->ip_v) {
12241 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
12245 case IPV6_VERSION >> 4:
12246 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
12256 len += sizeof(struct udphdr);
12258 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
12259 if (mout == NULL) {
12265 SCTP_BUF_RESV_UF(mout, max_linkhdr);
12266 SCTP_BUF_LEN(mout) = len;
12267 SCTP_BUF_NEXT(mout) = scm;
12268 if (m->m_flags & M_FLOWID) {
12269 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
12270 mout->m_flags |= M_FLOWID;
12278 switch (iph->ip_v) {
12281 iph_out = mtod(mout, struct ip *);
12283 /* Fill in the IP header for the ABORT */
12284 iph_out->ip_v = IPVERSION;
12285 iph_out->ip_hl = (sizeof(struct ip) / 4);
12286 iph_out->ip_tos = (u_char)0;
12287 iph_out->ip_id = 0;
12288 iph_out->ip_off = 0;
12289 iph_out->ip_ttl = MAXTTL;
12291 iph_out->ip_p = IPPROTO_UDP;
12293 iph_out->ip_p = IPPROTO_SCTP;
12295 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
12296 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
12297 /* let IP layer calculate this */
12298 iph_out->ip_sum = 0;
12300 iphlen_out = sizeof(struct ip);
12301 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
12305 case IPV6_VERSION >> 4:
12306 ip6 = (struct ip6_hdr *)iph;
12307 ip6_out = mtod(mout, struct ip6_hdr *);
12309 /* Fill in the IP6 header for the ABORT */
12310 ip6_out->ip6_flow = ip6->ip6_flow;
12311 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
12313 ip6_out->ip6_nxt = IPPROTO_UDP;
12315 ip6_out->ip6_nxt = IPPROTO_SCTP;
12317 ip6_out->ip6_src = ip6->ip6_dst;
12318 ip6_out->ip6_dst = ip6->ip6_src;
12320 iphlen_out = sizeof(struct ip6_hdr);
12321 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
12325 /* Currently not supported */
12326 sctp_m_freem(mout);
12330 udp = (struct udphdr *)sh_out;
12332 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
12333 sctp_m_freem(mout);
12336 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
12337 udp->uh_dport = port;
12338 /* set udp->uh_ulen later */
12340 iphlen_out += sizeof(struct udphdr);
12341 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
12343 sh_out->src_port = sh->dest_port;
12344 sh_out->dest_port = sh->src_port;
12345 sh_out->v_tag = vtag;
12346 sh_out->checksum = 0;
12348 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
12349 ch->chunk_type = SCTP_OPERATION_ERROR;
12350 ch->chunk_flags = 0;
12353 struct mbuf *m_tmp = scm;
12356 /* get length of the err_cause chain */
12357 while (m_tmp != NULL) {
12358 cause_len += SCTP_BUF_LEN(m_tmp);
12359 m_tmp = SCTP_BUF_NEXT(m_tmp);
12361 len = SCTP_BUF_LEN(mout) + cause_len;
12362 if (cause_len % 4) {
12363 /* need pad at end of chunk */
12364 uint32_t cpthis = 0;
12367 padlen = 4 - (len % 4);
12368 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
12371 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
12373 len = SCTP_BUF_LEN(mout);
12374 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
12377 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
12379 sctp_m_freem(mout);
12383 if (iph_out != NULL) {
12387 /* zap the stack pointer to the route */
12388 bzero(&ro, sizeof ro);
12390 udp->uh_ulen = htons(len - sizeof(struct ip));
12392 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
12397 /* set IPv4 length */
12398 iph_out->ip_len = len;
12400 #ifdef SCTP_PACKET_LOGGING
12401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12402 sctp_packet_log(mout, len);
12404 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12406 #if defined(SCTP_WITH_NO_CSUM)
12407 SCTP_STAT_INCR(sctps_sendnocrc);
12409 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
12410 SCTP_STAT_INCR(sctps_sendswcrc);
12413 SCTP_ENABLE_UDP_CSUM(o_pak);
12416 #if defined(SCTP_WITH_NO_CSUM)
12417 SCTP_STAT_INCR(sctps_sendnocrc);
12419 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12420 mout->m_pkthdr.csum_data = 0;
12421 SCTP_STAT_INCR(sctps_sendhwcrc);
12424 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12426 /* Free the route if we got one back */
12432 if (ip6_out != NULL) {
12433 struct route_in6 ro;
12435 struct ifnet *ifp = NULL;
12437 /* zap the stack pointer to the route */
12438 bzero(&ro, sizeof(ro));
12440 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12442 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12443 #ifdef SCTP_PACKET_LOGGING
12444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12445 sctp_packet_log(mout, len);
12447 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12449 #if defined(SCTP_WITH_NO_CSUM)
12450 SCTP_STAT_INCR(sctps_sendnocrc);
12452 sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12453 SCTP_STAT_INCR(sctps_sendswcrc);
12455 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12456 udp->uh_sum = 0xffff;
12459 #if defined(SCTP_WITH_NO_CSUM)
12460 SCTP_STAT_INCR(sctps_sendnocrc);
12462 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12463 mout->m_pkthdr.csum_data = 0;
12464 SCTP_STAT_INCR(sctps_sendhwcrc);
12467 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
12469 /* Free the route if we got one back */
12474 SCTP_STAT_INCR(sctps_sendpackets);
12475 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12476 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12479 static struct mbuf *
12480 sctp_copy_resume(struct uio *uio,
12482 int user_marks_eor,
12485 struct mbuf **new_tail)
12489 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12490 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12492 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12495 *sndout = m_length(m, NULL);
12496 *new_tail = m_last(m);
12502 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12509 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12511 if (sp->data == NULL) {
12512 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12515 sp->tail_mbuf = m_last(sp->data);
12521 static struct sctp_stream_queue_pending *
12522 sctp_copy_it_in(struct sctp_tcb *stcb,
12523 struct sctp_association *asoc,
12524 struct sctp_sndrcvinfo *srcv,
12526 struct sctp_nets *net,
12528 int user_marks_eor,
12532 * This routine must be very careful in its work. Protocol
12533 * processing is up and running so care must be taken to spl...()
12534 * when you need to do something that may effect the stcb/asoc. The
12535 * sb is locked however. When data is copied the protocol processing
12536 * should be enabled since this is a slower operation...
12538 struct sctp_stream_queue_pending *sp = NULL;
12542 /* Now can we send this? */
12543 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12544 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12545 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12546 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12547 /* got data while shutting down */
12548 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12549 *error = ECONNRESET;
12552 sctp_alloc_a_strmoq(stcb, sp);
12554 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12559 sp->sender_all_done = 0;
12560 sp->sinfo_flags = srcv->sinfo_flags;
12561 sp->timetolive = srcv->sinfo_timetolive;
12562 sp->ppid = srcv->sinfo_ppid;
12563 sp->context = srcv->sinfo_context;
12565 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12567 sp->stream = srcv->sinfo_stream;
12568 sp->length = min(uio->uio_resid, max_send_len);
12569 if ((sp->length == (uint32_t) uio->uio_resid) &&
12570 ((user_marks_eor == 0) ||
12571 (srcv->sinfo_flags & SCTP_EOF) ||
12572 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12573 sp->msg_is_complete = 1;
12575 sp->msg_is_complete = 0;
12577 sp->sender_all_done = 0;
12578 sp->some_taken = 0;
12579 sp->put_last_out = 0;
12580 resv_in_first = sizeof(struct sctp_data_chunk);
12581 sp->data = sp->tail_mbuf = NULL;
12582 if (sp->length == 0) {
12586 if (srcv->sinfo_keynumber_valid) {
12587 sp->auth_keyid = srcv->sinfo_keynumber;
12589 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12591 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12592 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12593 sp->holds_key_ref = 1;
12595 *error = sctp_copy_one(sp, uio, resv_in_first);
12598 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12601 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12603 atomic_add_int(&sp->net->ref_count, 1);
12607 sctp_set_prsctp_policy(sp);
12615 sctp_sosend(struct socket *so,
12616 struct sockaddr *addr,
12619 struct mbuf *control,
12624 int error, use_sndinfo = 0;
12625 struct sctp_sndrcvinfo sndrcvninfo;
12626 struct sockaddr *addr_to_use;
12628 #if defined(INET) && defined(INET6)
12629 struct sockaddr_in sin;
12634 /* process cmsg snd/rcv info (maybe a assoc-id) */
12635 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12636 sizeof(sndrcvninfo))) {
12641 addr_to_use = addr;
12642 #if defined(INET) && defined(INET6)
12643 if ((addr) && (addr->sa_family == AF_INET6)) {
12644 struct sockaddr_in6 *sin6;
12646 sin6 = (struct sockaddr_in6 *)addr;
12647 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12648 in6_sin6_2_sin(&sin, sin6);
12649 addr_to_use = (struct sockaddr *)&sin;
12653 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12656 use_sndinfo ? &sndrcvninfo : NULL
12664 sctp_lower_sosend(struct socket *so,
12665 struct sockaddr *addr,
12667 struct mbuf *i_pak,
12668 struct mbuf *control,
12670 struct sctp_sndrcvinfo *srcv
12675 unsigned int sndlen = 0, max_len;
12677 struct mbuf *top = NULL;
12678 int queue_only = 0, queue_only_for_init = 0;
12679 int free_cnt_applied = 0;
12681 int now_filled = 0;
12682 unsigned int inqueue_bytes = 0;
12683 struct sctp_block_entry be;
12684 struct sctp_inpcb *inp;
12685 struct sctp_tcb *stcb = NULL;
12686 struct timeval now;
12687 struct sctp_nets *net;
12688 struct sctp_association *asoc;
12689 struct sctp_inpcb *t_inp;
12690 int user_marks_eor;
12691 int create_lock_applied = 0;
12692 int nagle_applies = 0;
12693 int some_on_control = 0;
12694 int got_all_of_the_send = 0;
12695 int hold_tcblock = 0;
12696 int non_blocking = 0;
12697 uint32_t local_add_more, local_soresv = 0;
12699 uint16_t sinfo_flags;
12700 sctp_assoc_t sinfo_assoc_id;
12707 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12709 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12712 SCTP_RELEASE_PKT(i_pak);
12716 if ((uio == NULL) && (i_pak == NULL)) {
12717 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12720 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12721 atomic_add_int(&inp->total_sends, 1);
12723 if (uio->uio_resid < 0) {
12724 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12727 sndlen = uio->uio_resid;
12729 top = SCTP_HEADER_TO_CHAIN(i_pak);
12730 sndlen = SCTP_HEADER_LEN(i_pak);
12732 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12735 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12736 (inp->sctp_socket->so_qlimit)) {
12737 /* The listener can NOT send */
12738 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12743 * Pre-screen address, if one is given the sin-len
12744 * must be set correctly!
12747 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12749 switch (raddr->sa.sa_family) {
12752 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12753 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12757 port = raddr->sin.sin_port;
12762 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12763 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12767 port = raddr->sin6.sin6_port;
12771 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12772 error = EAFNOSUPPORT;
12779 sinfo_flags = srcv->sinfo_flags;
12780 sinfo_assoc_id = srcv->sinfo_assoc_id;
12781 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12782 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12783 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12787 if (srcv->sinfo_flags)
12788 SCTP_STAT_INCR(sctps_sends_with_flags);
12790 sinfo_flags = inp->def_send.sinfo_flags;
12791 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12793 if (sinfo_flags & SCTP_SENDALL) {
12794 /* its a sendall */
12795 error = sctp_sendall(inp, uio, top, srcv);
12799 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12800 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12804 /* now we must find the assoc */
12805 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12806 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12807 SCTP_INP_RLOCK(inp);
12808 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12810 SCTP_TCB_LOCK(stcb);
12813 SCTP_INP_RUNLOCK(inp);
12814 } else if (sinfo_assoc_id) {
12815 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12818 * Since we did not use findep we must
12819 * increment it, and if we don't find a tcb
12822 SCTP_INP_WLOCK(inp);
12823 SCTP_INP_INCR_REF(inp);
12824 SCTP_INP_WUNLOCK(inp);
12825 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12826 if (stcb == NULL) {
12827 SCTP_INP_WLOCK(inp);
12828 SCTP_INP_DECR_REF(inp);
12829 SCTP_INP_WUNLOCK(inp);
12834 if ((stcb == NULL) && (addr)) {
12835 /* Possible implicit send? */
12836 SCTP_ASOC_CREATE_LOCK(inp);
12837 create_lock_applied = 1;
12838 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12839 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12840 /* Should I really unlock ? */
12841 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12846 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12847 (addr->sa_family == AF_INET6)) {
12848 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12852 SCTP_INP_WLOCK(inp);
12853 SCTP_INP_INCR_REF(inp);
12854 SCTP_INP_WUNLOCK(inp);
12855 /* With the lock applied look again */
12856 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12857 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12858 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12860 if (stcb == NULL) {
12861 SCTP_INP_WLOCK(inp);
12862 SCTP_INP_DECR_REF(inp);
12863 SCTP_INP_WUNLOCK(inp);
12870 if (t_inp != inp) {
12871 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12876 if (stcb == NULL) {
12877 if (addr == NULL) {
12878 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12882 /* We must go ahead and start the INIT process */
12885 if ((sinfo_flags & SCTP_ABORT) ||
12886 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12888 * User asks to abort a non-existant assoc,
12889 * or EOF a non-existant assoc with no data
12891 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12895 /* get an asoc/stcb struct */
12896 vrf_id = inp->def_vrf_id;
12898 if (create_lock_applied == 0) {
12899 panic("Error, should hold create lock and I don't?");
12902 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12905 if (stcb == NULL) {
12906 /* Error is setup for us in the call */
12909 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12910 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12912 * Set the connected flag so we can queue
12915 soisconnecting(so);
12918 if (create_lock_applied) {
12919 SCTP_ASOC_CREATE_UNLOCK(inp);
12920 create_lock_applied = 0;
12922 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12925 * Turn on queue only flag to prevent data from
12929 asoc = &stcb->asoc;
12930 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12931 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12933 /* initialize authentication params for the assoc */
12934 sctp_initialize_auth_params(inp, stcb);
12937 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12938 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12944 /* out with the INIT */
12945 queue_only_for_init = 1;
12947 * we may want to dig in after this call and adjust the MTU
12948 * value. It defaulted to 1500 (constant) but the ro
12949 * structure may now have an update and thus we may need to
12950 * change it BEFORE we append the message.
12954 asoc = &stcb->asoc;
12956 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12957 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12959 net = sctp_findnet(stcb, addr);
12962 if ((net == NULL) ||
12963 ((port != 0) && (port != stcb->rport))) {
12964 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12969 if (stcb->asoc.alternate) {
12970 net = stcb->asoc.alternate;
12972 net = stcb->asoc.primary_destination;
12975 atomic_add_int(&stcb->total_sends, 1);
12976 /* Keep the stcb from being freed under our feet */
12977 atomic_add_int(&asoc->refcnt, 1);
12978 free_cnt_applied = 1;
12980 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12981 if (sndlen > asoc->smallest_mtu) {
12982 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12987 if (SCTP_SO_IS_NBIO(so)
12988 || (flags & MSG_NBIO)
12992 /* would we block? */
12993 if (non_blocking) {
12994 if (hold_tcblock == 0) {
12995 SCTP_TCB_LOCK(stcb);
12998 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12999 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
13000 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13001 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
13002 if (sndlen > SCTP_SB_LIMIT_SND(so))
13005 error = EWOULDBLOCK;
13008 stcb->asoc.sb_send_resv += sndlen;
13009 SCTP_TCB_UNLOCK(stcb);
13012 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
13014 local_soresv = sndlen;
13015 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13016 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13017 error = ECONNRESET;
13020 if (create_lock_applied) {
13021 SCTP_ASOC_CREATE_UNLOCK(inp);
13022 create_lock_applied = 0;
13024 if (asoc->stream_reset_outstanding) {
13026 * Can't queue any data while stream reset is underway.
13028 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
13032 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13033 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13036 /* we are now done with all control */
13038 sctp_m_freem(control);
13041 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
13042 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
13043 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
13044 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
13045 if (srcv->sinfo_flags & SCTP_ABORT) {
13048 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13049 error = ECONNRESET;
13053 /* Ok, we will attempt a msgsnd :> */
13055 p->td_ru.ru_msgsnd++;
13057 /* Are we aborting? */
13058 if (srcv->sinfo_flags & SCTP_ABORT) {
13060 int tot_demand, tot_out = 0, max_out;
13062 SCTP_STAT_INCR(sctps_sends_with_abort);
13063 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13064 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13065 /* It has to be up before we abort */
13066 /* how big is the user initiated abort? */
13067 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13071 if (hold_tcblock) {
13072 SCTP_TCB_UNLOCK(stcb);
13076 struct mbuf *cntm = NULL;
13078 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAIT, 1, MT_DATA);
13080 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13081 tot_out += SCTP_BUF_LEN(cntm);
13085 /* Must fit in a MTU */
13087 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13088 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13090 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13094 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
13097 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13101 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13102 max_out -= sizeof(struct sctp_abort_msg);
13103 if (tot_out > max_out) {
13107 struct sctp_paramhdr *ph;
13109 /* now move forward the data pointer */
13110 ph = mtod(mm, struct sctp_paramhdr *);
13111 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13112 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
13114 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
13116 error = uiomove((caddr_t)ph, (int)tot_out, uio);
13119 * Here if we can't get his data we
13120 * still abort we just don't get to
13121 * send the users note :-0
13128 SCTP_BUF_NEXT(mm) = top;
13132 if (hold_tcblock == 0) {
13133 SCTP_TCB_LOCK(stcb);
13135 atomic_add_int(&stcb->asoc.refcnt, -1);
13136 free_cnt_applied = 0;
13137 /* release this lock, otherwise we hang on ourselves */
13138 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13139 /* now relock the stcb so everything is sane */
13143 * In this case top is already chained to mm avoid double
13144 * free, since we free it below if top != NULL and driver
13145 * would free it after sending the packet out
13152 /* Calculate the maximum we can send */
13153 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13154 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13155 if (non_blocking) {
13156 /* we already checked for non-blocking above. */
13159 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13164 if (hold_tcblock) {
13165 SCTP_TCB_UNLOCK(stcb);
13168 /* Is the stream no. valid? */
13169 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13170 /* Invalid stream number */
13171 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13175 if (asoc->strmout == NULL) {
13176 /* huh? software error */
13177 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13181 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13182 if ((user_marks_eor == 0) &&
13183 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13184 /* It will NEVER fit */
13185 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13189 if ((uio == NULL) && user_marks_eor) {
13191 * We do not support eeor mode for
13192 * sending with mbuf chains (like sendfile).
13194 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13198 if (user_marks_eor) {
13199 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13202 * For non-eeor the whole message must fit in
13203 * the socket send buffer.
13205 local_add_more = sndlen;
13208 if (non_blocking) {
13209 goto skip_preblock;
13211 if (((max_len <= local_add_more) &&
13212 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13214 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13215 /* No room right now ! */
13216 SOCKBUF_LOCK(&so->so_snd);
13217 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13218 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13219 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13220 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13221 (unsigned int)SCTP_SB_LIMIT_SND(so),
13224 stcb->asoc.stream_queue_cnt,
13225 stcb->asoc.chunks_on_out_queue,
13226 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13228 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13231 stcb->block_entry = &be;
13232 error = sbwait(&so->so_snd);
13233 stcb->block_entry = NULL;
13234 if (error || so->so_error || be.error) {
13237 error = so->so_error;
13242 SOCKBUF_UNLOCK(&so->so_snd);
13245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13246 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13247 asoc, stcb->asoc.total_output_queue_size);
13249 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13252 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13254 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13255 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13259 SOCKBUF_UNLOCK(&so->so_snd);
13262 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13266 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13267 * case NOTE: uio will be null when top/mbuf is passed
13270 if (srcv->sinfo_flags & SCTP_EOF) {
13271 got_all_of_the_send = 1;
13274 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13280 struct sctp_stream_queue_pending *sp;
13281 struct sctp_stream_out *strm;
13284 SCTP_TCB_SEND_LOCK(stcb);
13285 if ((asoc->stream_locked) &&
13286 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13287 SCTP_TCB_SEND_UNLOCK(stcb);
13288 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13292 SCTP_TCB_SEND_UNLOCK(stcb);
13294 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13295 if (strm->last_msg_incomplete == 0) {
13297 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13298 if ((sp == NULL) || (error)) {
13301 SCTP_TCB_SEND_LOCK(stcb);
13302 if (sp->msg_is_complete) {
13303 strm->last_msg_incomplete = 0;
13304 asoc->stream_locked = 0;
13307 * Just got locked to this guy in case of an
13310 strm->last_msg_incomplete = 1;
13311 asoc->stream_locked = 1;
13312 asoc->stream_locked_on = srcv->sinfo_stream;
13313 sp->sender_all_done = 0;
13315 sctp_snd_sb_alloc(stcb, sp->length);
13316 atomic_add_int(&asoc->stream_queue_cnt, 1);
13317 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
13318 sp->strseq = strm->next_sequence_sent;
13319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
13320 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
13321 (uintptr_t) stcb, sp->length,
13322 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
13324 strm->next_sequence_sent++;
13326 SCTP_STAT_INCR(sctps_sends_with_unord);
13328 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13329 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13330 SCTP_TCB_SEND_UNLOCK(stcb);
13332 SCTP_TCB_SEND_LOCK(stcb);
13333 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13334 SCTP_TCB_SEND_UNLOCK(stcb);
13336 /* ???? Huh ??? last msg is gone */
13338 panic("Warning: Last msg marked incomplete, yet nothing left?");
13340 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13341 strm->last_msg_incomplete = 0;
13347 while (uio->uio_resid > 0) {
13348 /* How much room do we have? */
13349 struct mbuf *new_tail, *mm;
13351 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13352 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13356 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13357 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13358 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13361 if (hold_tcblock) {
13362 SCTP_TCB_UNLOCK(stcb);
13365 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13366 if ((mm == NULL) || error) {
13372 /* Update the mbuf and count */
13373 SCTP_TCB_SEND_LOCK(stcb);
13374 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13376 * we need to get out. Peer probably
13380 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13381 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13382 error = ECONNRESET;
13384 SCTP_TCB_SEND_UNLOCK(stcb);
13387 if (sp->tail_mbuf) {
13388 /* tack it to the end */
13389 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13390 sp->tail_mbuf = new_tail;
13392 /* A stolen mbuf */
13394 sp->tail_mbuf = new_tail;
13396 sctp_snd_sb_alloc(stcb, sndout);
13397 atomic_add_int(&sp->length, sndout);
13400 /* Did we reach EOR? */
13401 if ((uio->uio_resid == 0) &&
13402 ((user_marks_eor == 0) ||
13403 (srcv->sinfo_flags & SCTP_EOF) ||
13404 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13405 sp->msg_is_complete = 1;
13407 sp->msg_is_complete = 0;
13409 SCTP_TCB_SEND_UNLOCK(stcb);
13411 if (uio->uio_resid == 0) {
13416 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13418 * This is ugly but we must assure locking
13421 if (hold_tcblock == 0) {
13422 SCTP_TCB_LOCK(stcb);
13425 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13426 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13427 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13428 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13434 SCTP_TCB_UNLOCK(stcb);
13437 /* wait for space now */
13438 if (non_blocking) {
13439 /* Non-blocking io in place out */
13442 /* What about the INIT, send it maybe */
13443 if (queue_only_for_init) {
13444 if (hold_tcblock == 0) {
13445 SCTP_TCB_LOCK(stcb);
13448 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13449 /* a collision took us forward? */
13452 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13453 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13457 if ((net->flight_size > net->cwnd) &&
13458 (asoc->sctp_cmt_on_off == 0)) {
13459 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13461 } else if (asoc->ifp_had_enobuf) {
13462 SCTP_STAT_INCR(sctps_ifnomemqueued);
13463 if (net->flight_size > (2 * net->mtu)) {
13466 asoc->ifp_had_enobuf = 0;
13468 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13469 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13470 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13471 (stcb->asoc.total_flight > 0) &&
13472 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13473 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13476 * Ok, Nagle is set on and we have data outstanding.
13477 * Don't send anything and let SACKs drive out the
13478 * data unless wen have a "full" segment to send.
13480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13481 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13483 SCTP_STAT_INCR(sctps_naglequeued);
13486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13487 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13488 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13490 SCTP_STAT_INCR(sctps_naglesent);
13493 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13495 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13496 nagle_applies, un_sent);
13497 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13498 stcb->asoc.total_flight,
13499 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13501 if (queue_only_for_init)
13502 queue_only_for_init = 0;
13503 if ((queue_only == 0) && (nagle_applies == 0)) {
13505 * need to start chunk output
13506 * before blocking.. note that if
13507 * a lock is already applied, then
13508 * the input via the net is happening
13509 * and I don't need to start output :-D
13511 if (hold_tcblock == 0) {
13512 if (SCTP_TCB_TRYLOCK(stcb)) {
13514 sctp_chunk_output(inp,
13516 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13519 sctp_chunk_output(inp,
13521 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13523 if (hold_tcblock == 1) {
13524 SCTP_TCB_UNLOCK(stcb);
13528 SOCKBUF_LOCK(&so->so_snd);
13530 * This is a bit strange, but I think it will
13531 * work. The total_output_queue_size is locked and
13532 * protected by the TCB_LOCK, which we just released.
13533 * There is a race that can occur between releasing it
13534 * above, and me getting the socket lock, where sacks
13535 * come in but we have not put the SB_WAIT on the
13536 * so_snd buffer to get the wakeup. After the LOCK
13537 * is applied the sack_processing will also need to
13538 * LOCK the so->so_snd to do the actual sowwakeup(). So
13539 * once we have the socket buffer lock if we recheck the
13540 * size we KNOW we will get to sleep safely with the
13541 * wakeup flag in place.
13543 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13544 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13546 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13547 asoc, uio->uio_resid);
13550 stcb->block_entry = &be;
13551 error = sbwait(&so->so_snd);
13552 stcb->block_entry = NULL;
13554 if (error || so->so_error || be.error) {
13557 error = so->so_error;
13562 SOCKBUF_UNLOCK(&so->so_snd);
13565 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13566 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13567 asoc, stcb->asoc.total_output_queue_size);
13570 SOCKBUF_UNLOCK(&so->so_snd);
13571 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13575 SCTP_TCB_SEND_LOCK(stcb);
13577 if (sp->msg_is_complete == 0) {
13578 strm->last_msg_incomplete = 1;
13579 asoc->stream_locked = 1;
13580 asoc->stream_locked_on = srcv->sinfo_stream;
13582 sp->sender_all_done = 1;
13583 strm->last_msg_incomplete = 0;
13584 asoc->stream_locked = 0;
13587 SCTP_PRINTF("Huh no sp TSNH?\n");
13588 strm->last_msg_incomplete = 0;
13589 asoc->stream_locked = 0;
13591 SCTP_TCB_SEND_UNLOCK(stcb);
13592 if (uio->uio_resid == 0) {
13593 got_all_of_the_send = 1;
13596 /* We send in a 0, since we do NOT have any locks */
13597 error = sctp_msg_append(stcb, net, top, srcv, 0);
13599 if (srcv->sinfo_flags & SCTP_EOF) {
13601 * This should only happen for Panda for the mbuf
13602 * send case, which does NOT yet support EEOR mode.
13603 * Thus, we can just set this flag to do the proper
13606 got_all_of_the_send = 1;
13614 if ((srcv->sinfo_flags & SCTP_EOF) &&
13615 (got_all_of_the_send == 1) &&
13616 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
13619 SCTP_STAT_INCR(sctps_sends_with_eof);
13621 if (hold_tcblock == 0) {
13622 SCTP_TCB_LOCK(stcb);
13625 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13626 if (TAILQ_EMPTY(&asoc->send_queue) &&
13627 TAILQ_EMPTY(&asoc->sent_queue) &&
13629 if (asoc->locked_on_sending) {
13632 /* there is nothing queued to send, so I'm done... */
13633 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13634 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13635 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13636 struct sctp_nets *netp;
13638 if (stcb->asoc.alternate) {
13639 netp = stcb->asoc.alternate;
13641 netp = stcb->asoc.primary_destination;
13643 /* only send SHUTDOWN the first time through */
13644 sctp_send_shutdown(stcb, netp);
13645 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13646 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13648 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13649 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13650 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13652 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13653 asoc->primary_destination);
13657 * we still got (or just got) data to send, so set
13661 * XXX sockets draft says that SCTP_EOF should be
13662 * sent with no data. currently, we will allow user
13663 * data to be sent first and move to
13666 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13667 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13668 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13669 if (hold_tcblock == 0) {
13670 SCTP_TCB_LOCK(stcb);
13673 if (asoc->locked_on_sending) {
13674 /* Locked to send out the data */
13675 struct sctp_stream_queue_pending *sp;
13677 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13679 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13680 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13683 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13684 if (TAILQ_EMPTY(&asoc->send_queue) &&
13685 TAILQ_EMPTY(&asoc->sent_queue) &&
13686 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13688 if (free_cnt_applied) {
13689 atomic_add_int(&stcb->asoc.refcnt, -1);
13690 free_cnt_applied = 0;
13692 sctp_abort_an_association(stcb->sctp_ep, stcb,
13693 NULL, SCTP_SO_LOCKED);
13695 * now relock the stcb so everything
13702 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13703 asoc->primary_destination);
13704 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13709 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13710 some_on_control = 1;
13712 if (queue_only_for_init) {
13713 if (hold_tcblock == 0) {
13714 SCTP_TCB_LOCK(stcb);
13717 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13718 /* a collision took us forward? */
13721 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13722 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13726 if ((net->flight_size > net->cwnd) &&
13727 (stcb->asoc.sctp_cmt_on_off == 0)) {
13728 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13730 } else if (asoc->ifp_had_enobuf) {
13731 SCTP_STAT_INCR(sctps_ifnomemqueued);
13732 if (net->flight_size > (2 * net->mtu)) {
13735 asoc->ifp_had_enobuf = 0;
13737 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13738 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13739 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13740 (stcb->asoc.total_flight > 0) &&
13741 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13742 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13744 * Ok, Nagle is set on and we have data outstanding.
13745 * Don't send anything and let SACKs drive out the
13746 * data unless wen have a "full" segment to send.
13748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13749 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13751 SCTP_STAT_INCR(sctps_naglequeued);
13754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13755 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13756 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13758 SCTP_STAT_INCR(sctps_naglesent);
13761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13762 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13763 nagle_applies, un_sent);
13764 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13765 stcb->asoc.total_flight,
13766 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13768 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13769 /* we can attempt to send too. */
13770 if (hold_tcblock == 0) {
13772 * If there is activity recv'ing sacks no need to
13775 if (SCTP_TCB_TRYLOCK(stcb)) {
13776 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13780 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13782 } else if ((queue_only == 0) &&
13783 (stcb->asoc.peers_rwnd == 0) &&
13784 (stcb->asoc.total_flight == 0)) {
13785 /* We get to have a probe outstanding */
13786 if (hold_tcblock == 0) {
13788 SCTP_TCB_LOCK(stcb);
13790 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13791 } else if (some_on_control) {
13792 int num_out, reason, frag_point;
13794 /* Here we do control only */
13795 if (hold_tcblock == 0) {
13797 SCTP_TCB_LOCK(stcb);
13799 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13800 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13801 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13803 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13804 queue_only, stcb->asoc.peers_rwnd, un_sent,
13805 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13806 stcb->asoc.total_output_queue_size, error);
13811 if (local_soresv && stcb) {
13812 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13814 if (create_lock_applied) {
13815 SCTP_ASOC_CREATE_UNLOCK(inp);
13817 if ((stcb) && hold_tcblock) {
13818 SCTP_TCB_UNLOCK(stcb);
13820 if (stcb && free_cnt_applied) {
13821 atomic_add_int(&stcb->asoc.refcnt, -1);
13825 if (mtx_owned(&stcb->tcb_mtx)) {
13826 panic("Leaving with tcb mtx owned?");
13828 if (mtx_owned(&stcb->tcb_send_mtx)) {
13829 panic("Leaving with tcb send mtx owned?");
13835 sctp_validate_no_locks(inp);
13837 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
13844 sctp_m_freem(control);
13851 * generate an AUTHentication chunk, if required
13854 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13855 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13856 struct sctp_tcb *stcb, uint8_t chunk)
13858 struct mbuf *m_auth;
13859 struct sctp_auth_chunk *auth;
13863 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13867 /* sysctl disabled auth? */
13868 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13871 /* peer doesn't do auth... */
13872 if (!stcb->asoc.peer_supports_auth) {
13875 /* does the requested chunk require auth? */
13876 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13879 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13880 if (m_auth == NULL) {
13884 /* reserve some space if this will be the first mbuf */
13886 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13887 /* fill in the AUTH chunk details */
13888 auth = mtod(m_auth, struct sctp_auth_chunk *);
13889 bzero(auth, sizeof(*auth));
13890 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13891 auth->ch.chunk_flags = 0;
13892 chunk_len = sizeof(*auth) +
13893 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13894 auth->ch.chunk_length = htons(chunk_len);
13895 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13896 /* key id and hmac digest will be computed and filled in upon send */
13898 /* save the offset where the auth was inserted into the chain */
13900 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13901 *offset += SCTP_BUF_LEN(cn);
13904 /* update length and return pointer to the auth chunk */
13905 SCTP_BUF_LEN(m_auth) = chunk_len;
13906 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13907 if (auth_ret != NULL)
13915 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13917 struct nd_prefix *pfx = NULL;
13918 struct nd_pfxrouter *pfxrtr = NULL;
13919 struct sockaddr_in6 gw6;
13921 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13924 /* get prefix entry of address */
13925 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13926 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13928 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13929 &src6->sin6_addr, &pfx->ndpr_mask))
13932 /* no prefix entry in the prefix list */
13934 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13935 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13938 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13939 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13941 /* search installed gateway from prefix entry */
13942 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13943 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13944 gw6.sin6_family = AF_INET6;
13945 gw6.sin6_len = sizeof(struct sockaddr_in6);
13946 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13947 sizeof(struct in6_addr));
13948 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13949 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13950 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13951 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13952 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13953 ro->ro_rt->rt_gateway)) {
13954 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13958 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13965 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13968 struct sockaddr_in *sin, *mask;
13969 struct ifaddr *ifa;
13970 struct in_addr srcnetaddr, gwnetaddr;
13972 if (ro == NULL || ro->ro_rt == NULL ||
13973 sifa->address.sa.sa_family != AF_INET) {
13976 ifa = (struct ifaddr *)sifa->ifa;
13977 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13978 sin = (struct sockaddr_in *)&sifa->address.sin;
13979 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13980 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13981 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13982 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13984 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13985 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13986 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13987 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13988 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13989 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {