2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #if defined(INET) || defined(INET6)
54 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <machine/in_cksum.h>
61 #define SCTP_MAX_GAPS_INARRAY 4
63 uint8_t right_edge; /* mergable on the right edge */
64 uint8_t left_edge; /* mergable on the left edge */
67 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
70 const struct sack_track sack_array[256] = {
71 {0, 0, 0, 0, /* 0x00 */
78 {1, 0, 1, 0, /* 0x01 */
85 {0, 0, 1, 0, /* 0x02 */
92 {1, 0, 1, 0, /* 0x03 */
99 {0, 0, 1, 0, /* 0x04 */
106 {1, 0, 2, 0, /* 0x05 */
113 {0, 0, 1, 0, /* 0x06 */
120 {1, 0, 1, 0, /* 0x07 */
127 {0, 0, 1, 0, /* 0x08 */
134 {1, 0, 2, 0, /* 0x09 */
141 {0, 0, 2, 0, /* 0x0a */
148 {1, 0, 2, 0, /* 0x0b */
155 {0, 0, 1, 0, /* 0x0c */
162 {1, 0, 2, 0, /* 0x0d */
169 {0, 0, 1, 0, /* 0x0e */
176 {1, 0, 1, 0, /* 0x0f */
183 {0, 0, 1, 0, /* 0x10 */
190 {1, 0, 2, 0, /* 0x11 */
197 {0, 0, 2, 0, /* 0x12 */
204 {1, 0, 2, 0, /* 0x13 */
211 {0, 0, 2, 0, /* 0x14 */
218 {1, 0, 3, 0, /* 0x15 */
225 {0, 0, 2, 0, /* 0x16 */
232 {1, 0, 2, 0, /* 0x17 */
239 {0, 0, 1, 0, /* 0x18 */
246 {1, 0, 2, 0, /* 0x19 */
253 {0, 0, 2, 0, /* 0x1a */
260 {1, 0, 2, 0, /* 0x1b */
267 {0, 0, 1, 0, /* 0x1c */
274 {1, 0, 2, 0, /* 0x1d */
281 {0, 0, 1, 0, /* 0x1e */
288 {1, 0, 1, 0, /* 0x1f */
295 {0, 0, 1, 0, /* 0x20 */
302 {1, 0, 2, 0, /* 0x21 */
309 {0, 0, 2, 0, /* 0x22 */
316 {1, 0, 2, 0, /* 0x23 */
323 {0, 0, 2, 0, /* 0x24 */
330 {1, 0, 3, 0, /* 0x25 */
337 {0, 0, 2, 0, /* 0x26 */
344 {1, 0, 2, 0, /* 0x27 */
351 {0, 0, 2, 0, /* 0x28 */
358 {1, 0, 3, 0, /* 0x29 */
365 {0, 0, 3, 0, /* 0x2a */
372 {1, 0, 3, 0, /* 0x2b */
379 {0, 0, 2, 0, /* 0x2c */
386 {1, 0, 3, 0, /* 0x2d */
393 {0, 0, 2, 0, /* 0x2e */
400 {1, 0, 2, 0, /* 0x2f */
407 {0, 0, 1, 0, /* 0x30 */
414 {1, 0, 2, 0, /* 0x31 */
421 {0, 0, 2, 0, /* 0x32 */
428 {1, 0, 2, 0, /* 0x33 */
435 {0, 0, 2, 0, /* 0x34 */
442 {1, 0, 3, 0, /* 0x35 */
449 {0, 0, 2, 0, /* 0x36 */
456 {1, 0, 2, 0, /* 0x37 */
463 {0, 0, 1, 0, /* 0x38 */
470 {1, 0, 2, 0, /* 0x39 */
477 {0, 0, 2, 0, /* 0x3a */
484 {1, 0, 2, 0, /* 0x3b */
491 {0, 0, 1, 0, /* 0x3c */
498 {1, 0, 2, 0, /* 0x3d */
505 {0, 0, 1, 0, /* 0x3e */
512 {1, 0, 1, 0, /* 0x3f */
519 {0, 0, 1, 0, /* 0x40 */
526 {1, 0, 2, 0, /* 0x41 */
533 {0, 0, 2, 0, /* 0x42 */
540 {1, 0, 2, 0, /* 0x43 */
547 {0, 0, 2, 0, /* 0x44 */
554 {1, 0, 3, 0, /* 0x45 */
561 {0, 0, 2, 0, /* 0x46 */
568 {1, 0, 2, 0, /* 0x47 */
575 {0, 0, 2, 0, /* 0x48 */
582 {1, 0, 3, 0, /* 0x49 */
589 {0, 0, 3, 0, /* 0x4a */
596 {1, 0, 3, 0, /* 0x4b */
603 {0, 0, 2, 0, /* 0x4c */
610 {1, 0, 3, 0, /* 0x4d */
617 {0, 0, 2, 0, /* 0x4e */
624 {1, 0, 2, 0, /* 0x4f */
631 {0, 0, 2, 0, /* 0x50 */
638 {1, 0, 3, 0, /* 0x51 */
645 {0, 0, 3, 0, /* 0x52 */
652 {1, 0, 3, 0, /* 0x53 */
659 {0, 0, 3, 0, /* 0x54 */
666 {1, 0, 4, 0, /* 0x55 */
673 {0, 0, 3, 0, /* 0x56 */
680 {1, 0, 3, 0, /* 0x57 */
687 {0, 0, 2, 0, /* 0x58 */
694 {1, 0, 3, 0, /* 0x59 */
701 {0, 0, 3, 0, /* 0x5a */
708 {1, 0, 3, 0, /* 0x5b */
715 {0, 0, 2, 0, /* 0x5c */
722 {1, 0, 3, 0, /* 0x5d */
729 {0, 0, 2, 0, /* 0x5e */
736 {1, 0, 2, 0, /* 0x5f */
743 {0, 0, 1, 0, /* 0x60 */
750 {1, 0, 2, 0, /* 0x61 */
757 {0, 0, 2, 0, /* 0x62 */
764 {1, 0, 2, 0, /* 0x63 */
771 {0, 0, 2, 0, /* 0x64 */
778 {1, 0, 3, 0, /* 0x65 */
785 {0, 0, 2, 0, /* 0x66 */
792 {1, 0, 2, 0, /* 0x67 */
799 {0, 0, 2, 0, /* 0x68 */
806 {1, 0, 3, 0, /* 0x69 */
813 {0, 0, 3, 0, /* 0x6a */
820 {1, 0, 3, 0, /* 0x6b */
827 {0, 0, 2, 0, /* 0x6c */
834 {1, 0, 3, 0, /* 0x6d */
841 {0, 0, 2, 0, /* 0x6e */
848 {1, 0, 2, 0, /* 0x6f */
855 {0, 0, 1, 0, /* 0x70 */
862 {1, 0, 2, 0, /* 0x71 */
869 {0, 0, 2, 0, /* 0x72 */
876 {1, 0, 2, 0, /* 0x73 */
883 {0, 0, 2, 0, /* 0x74 */
890 {1, 0, 3, 0, /* 0x75 */
897 {0, 0, 2, 0, /* 0x76 */
904 {1, 0, 2, 0, /* 0x77 */
911 {0, 0, 1, 0, /* 0x78 */
918 {1, 0, 2, 0, /* 0x79 */
925 {0, 0, 2, 0, /* 0x7a */
932 {1, 0, 2, 0, /* 0x7b */
939 {0, 0, 1, 0, /* 0x7c */
946 {1, 0, 2, 0, /* 0x7d */
953 {0, 0, 1, 0, /* 0x7e */
960 {1, 0, 1, 0, /* 0x7f */
967 {0, 1, 1, 0, /* 0x80 */
974 {1, 1, 2, 0, /* 0x81 */
981 {0, 1, 2, 0, /* 0x82 */
988 {1, 1, 2, 0, /* 0x83 */
995 {0, 1, 2, 0, /* 0x84 */
1002 {1, 1, 3, 0, /* 0x85 */
1009 {0, 1, 2, 0, /* 0x86 */
1016 {1, 1, 2, 0, /* 0x87 */
1023 {0, 1, 2, 0, /* 0x88 */
1030 {1, 1, 3, 0, /* 0x89 */
1037 {0, 1, 3, 0, /* 0x8a */
1044 {1, 1, 3, 0, /* 0x8b */
1051 {0, 1, 2, 0, /* 0x8c */
1058 {1, 1, 3, 0, /* 0x8d */
1065 {0, 1, 2, 0, /* 0x8e */
1072 {1, 1, 2, 0, /* 0x8f */
1079 {0, 1, 2, 0, /* 0x90 */
1086 {1, 1, 3, 0, /* 0x91 */
1093 {0, 1, 3, 0, /* 0x92 */
1100 {1, 1, 3, 0, /* 0x93 */
1107 {0, 1, 3, 0, /* 0x94 */
1114 {1, 1, 4, 0, /* 0x95 */
1121 {0, 1, 3, 0, /* 0x96 */
1128 {1, 1, 3, 0, /* 0x97 */
1135 {0, 1, 2, 0, /* 0x98 */
1142 {1, 1, 3, 0, /* 0x99 */
1149 {0, 1, 3, 0, /* 0x9a */
1156 {1, 1, 3, 0, /* 0x9b */
1163 {0, 1, 2, 0, /* 0x9c */
1170 {1, 1, 3, 0, /* 0x9d */
1177 {0, 1, 2, 0, /* 0x9e */
1184 {1, 1, 2, 0, /* 0x9f */
1191 {0, 1, 2, 0, /* 0xa0 */
1198 {1, 1, 3, 0, /* 0xa1 */
1205 {0, 1, 3, 0, /* 0xa2 */
1212 {1, 1, 3, 0, /* 0xa3 */
1219 {0, 1, 3, 0, /* 0xa4 */
1226 {1, 1, 4, 0, /* 0xa5 */
1233 {0, 1, 3, 0, /* 0xa6 */
1240 {1, 1, 3, 0, /* 0xa7 */
1247 {0, 1, 3, 0, /* 0xa8 */
1254 {1, 1, 4, 0, /* 0xa9 */
1261 {0, 1, 4, 0, /* 0xaa */
1268 {1, 1, 4, 0, /* 0xab */
1275 {0, 1, 3, 0, /* 0xac */
1282 {1, 1, 4, 0, /* 0xad */
1289 {0, 1, 3, 0, /* 0xae */
1296 {1, 1, 3, 0, /* 0xaf */
1303 {0, 1, 2, 0, /* 0xb0 */
1310 {1, 1, 3, 0, /* 0xb1 */
1317 {0, 1, 3, 0, /* 0xb2 */
1324 {1, 1, 3, 0, /* 0xb3 */
1331 {0, 1, 3, 0, /* 0xb4 */
1338 {1, 1, 4, 0, /* 0xb5 */
1345 {0, 1, 3, 0, /* 0xb6 */
1352 {1, 1, 3, 0, /* 0xb7 */
1359 {0, 1, 2, 0, /* 0xb8 */
1366 {1, 1, 3, 0, /* 0xb9 */
1373 {0, 1, 3, 0, /* 0xba */
1380 {1, 1, 3, 0, /* 0xbb */
1387 {0, 1, 2, 0, /* 0xbc */
1394 {1, 1, 3, 0, /* 0xbd */
1401 {0, 1, 2, 0, /* 0xbe */
1408 {1, 1, 2, 0, /* 0xbf */
1415 {0, 1, 1, 0, /* 0xc0 */
1422 {1, 1, 2, 0, /* 0xc1 */
1429 {0, 1, 2, 0, /* 0xc2 */
1436 {1, 1, 2, 0, /* 0xc3 */
1443 {0, 1, 2, 0, /* 0xc4 */
1450 {1, 1, 3, 0, /* 0xc5 */
1457 {0, 1, 2, 0, /* 0xc6 */
1464 {1, 1, 2, 0, /* 0xc7 */
1471 {0, 1, 2, 0, /* 0xc8 */
1478 {1, 1, 3, 0, /* 0xc9 */
1485 {0, 1, 3, 0, /* 0xca */
1492 {1, 1, 3, 0, /* 0xcb */
1499 {0, 1, 2, 0, /* 0xcc */
1506 {1, 1, 3, 0, /* 0xcd */
1513 {0, 1, 2, 0, /* 0xce */
1520 {1, 1, 2, 0, /* 0xcf */
1527 {0, 1, 2, 0, /* 0xd0 */
1534 {1, 1, 3, 0, /* 0xd1 */
1541 {0, 1, 3, 0, /* 0xd2 */
1548 {1, 1, 3, 0, /* 0xd3 */
1555 {0, 1, 3, 0, /* 0xd4 */
1562 {1, 1, 4, 0, /* 0xd5 */
1569 {0, 1, 3, 0, /* 0xd6 */
1576 {1, 1, 3, 0, /* 0xd7 */
1583 {0, 1, 2, 0, /* 0xd8 */
1590 {1, 1, 3, 0, /* 0xd9 */
1597 {0, 1, 3, 0, /* 0xda */
1604 {1, 1, 3, 0, /* 0xdb */
1611 {0, 1, 2, 0, /* 0xdc */
1618 {1, 1, 3, 0, /* 0xdd */
1625 {0, 1, 2, 0, /* 0xde */
1632 {1, 1, 2, 0, /* 0xdf */
1639 {0, 1, 1, 0, /* 0xe0 */
1646 {1, 1, 2, 0, /* 0xe1 */
1653 {0, 1, 2, 0, /* 0xe2 */
1660 {1, 1, 2, 0, /* 0xe3 */
1667 {0, 1, 2, 0, /* 0xe4 */
1674 {1, 1, 3, 0, /* 0xe5 */
1681 {0, 1, 2, 0, /* 0xe6 */
1688 {1, 1, 2, 0, /* 0xe7 */
1695 {0, 1, 2, 0, /* 0xe8 */
1702 {1, 1, 3, 0, /* 0xe9 */
1709 {0, 1, 3, 0, /* 0xea */
1716 {1, 1, 3, 0, /* 0xeb */
1723 {0, 1, 2, 0, /* 0xec */
1730 {1, 1, 3, 0, /* 0xed */
1737 {0, 1, 2, 0, /* 0xee */
1744 {1, 1, 2, 0, /* 0xef */
1751 {0, 1, 1, 0, /* 0xf0 */
1758 {1, 1, 2, 0, /* 0xf1 */
1765 {0, 1, 2, 0, /* 0xf2 */
1772 {1, 1, 2, 0, /* 0xf3 */
1779 {0, 1, 2, 0, /* 0xf4 */
1786 {1, 1, 3, 0, /* 0xf5 */
1793 {0, 1, 2, 0, /* 0xf6 */
1800 {1, 1, 2, 0, /* 0xf7 */
1807 {0, 1, 1, 0, /* 0xf8 */
1814 {1, 1, 2, 0, /* 0xf9 */
1821 {0, 1, 2, 0, /* 0xfa */
1828 {1, 1, 2, 0, /* 0xfb */
1835 {0, 1, 1, 0, /* 0xfc */
1842 {1, 1, 2, 0, /* 0xfd */
1849 {0, 1, 1, 0, /* 0xfe */
1856 {1, 1, 1, 0, /* 0xff */
1867 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1868 struct sctp_scoping *scope,
1871 if ((scope->loopback_scope == 0) &&
1872 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1874 * skip loopback if not in scope *
1878 switch (ifa->address.sa.sa_family) {
1881 if (scope->ipv4_addr_legal) {
1882 struct sockaddr_in *sin;
1884 sin = &ifa->address.sin;
1885 if (sin->sin_addr.s_addr == 0) {
1886 /* not in scope , unspecified */
1889 if ((scope->ipv4_local_scope == 0) &&
1890 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1891 /* private address not in scope */
1901 if (scope->ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1909 sctp_gather_internal_ifa_flags(ifa);
1911 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1914 /* ok to use deprecated addresses? */
1915 sin6 = &ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1924 if ((scope->site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1939 static struct mbuf *
1940 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
1942 #if defined(INET) || defined(INET6)
1943 struct sctp_paramhdr *parmh;
1948 switch (ifa->address.sa.sa_family) {
1951 plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
1956 plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
1962 #if defined(INET) || defined(INET6)
1963 if (M_TRAILINGSPACE(m) >= plen) {
1964 /* easy side we just drop it on the end */
1965 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1968 /* Need more space */
1970 while (SCTP_BUF_NEXT(mret) != NULL) {
1971 mret = SCTP_BUF_NEXT(mret);
1973 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1974 if (SCTP_BUF_NEXT(mret) == NULL) {
1975 /* We are hosed, can't add more addresses */
1978 mret = SCTP_BUF_NEXT(mret);
1979 parmh = mtod(mret, struct sctp_paramhdr *);
1981 /* now add the parameter */
1982 switch (ifa->address.sa.sa_family) {
1986 struct sctp_ipv4addr_param *ipv4p;
1987 struct sockaddr_in *sin;
1989 sin = &ifa->address.sin;
1990 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1991 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1992 parmh->param_length = htons(plen);
1993 ipv4p->addr = sin->sin_addr.s_addr;
1994 SCTP_BUF_LEN(mret) += plen;
2001 struct sctp_ipv6addr_param *ipv6p;
2002 struct sockaddr_in6 *sin6;
2004 sin6 = &ifa->address.sin6;
2005 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2006 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2007 parmh->param_length = htons(plen);
2008 memcpy(ipv6p->addr, &sin6->sin6_addr,
2009 sizeof(ipv6p->addr));
2010 /* clear embedded scope in the address */
2011 in6_clearscope((struct in6_addr *)ipv6p->addr);
2012 SCTP_BUF_LEN(mret) += plen;
2028 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2029 struct sctp_scoping *scope,
2030 struct mbuf *m_at, int cnt_inits_to,
2031 uint16_t * padding_len, uint16_t * chunk_len)
2033 struct sctp_vrf *vrf = NULL;
2034 int cnt, limit_out = 0, total_count;
2037 vrf_id = inp->def_vrf_id;
2038 SCTP_IPI_ADDR_RLOCK();
2039 vrf = sctp_find_vrf(vrf_id);
2041 SCTP_IPI_ADDR_RUNLOCK();
2044 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2045 struct sctp_ifa *sctp_ifap;
2046 struct sctp_ifn *sctp_ifnp;
2049 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2051 cnt = SCTP_ADDRESS_LIMIT;
2054 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2055 if ((scope->loopback_scope == 0) &&
2056 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2058 * Skip loopback devices if loopback_scope
2063 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2065 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2066 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2067 &sctp_ifap->address.sin.sin_addr) != 0)) {
2072 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2073 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2074 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2078 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2081 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2085 if (cnt > SCTP_ADDRESS_LIMIT) {
2089 if (cnt > SCTP_ADDRESS_LIMIT) {
2096 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2098 if ((scope->loopback_scope == 0) &&
2099 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2101 * Skip loopback devices if
2102 * loopback_scope not set
2106 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2108 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2109 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2110 &sctp_ifap->address.sin.sin_addr) != 0)) {
2115 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2116 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2117 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2121 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2124 if (sctp_is_address_in_scope(sctp_ifap,
2128 if ((chunk_len != NULL) &&
2129 (padding_len != NULL) &&
2130 (*padding_len > 0)) {
2131 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2132 SCTP_BUF_LEN(m_at) += *padding_len;
2133 *chunk_len += *padding_len;
2136 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2145 if (total_count > SCTP_ADDRESS_LIMIT) {
2146 /* No more addresses */
2154 struct sctp_laddr *laddr;
2157 /* First, how many ? */
2158 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2159 if (laddr->ifa == NULL) {
2162 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2164 * Address being deleted by the system, dont
2168 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2170 * Address being deleted on this ep don't
2175 if (sctp_is_address_in_scope(laddr->ifa,
2182 * To get through a NAT we only list addresses if we have
2183 * more than one. That way if you just bind a single address
2184 * we let the source of the init dictate our address.
2188 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2189 if (laddr->ifa == NULL) {
2192 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2195 if (sctp_is_address_in_scope(laddr->ifa,
2199 if ((chunk_len != NULL) &&
2200 (padding_len != NULL) &&
2201 (*padding_len > 0)) {
2202 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2203 SCTP_BUF_LEN(m_at) += *padding_len;
2204 *chunk_len += *padding_len;
2207 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2209 if (cnt >= SCTP_ADDRESS_LIMIT) {
2215 SCTP_IPI_ADDR_RUNLOCK();
2219 static struct sctp_ifa *
2220 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2221 uint8_t dest_is_loop,
2222 uint8_t dest_is_priv,
2225 uint8_t dest_is_global = 0;
2227 /* dest_is_priv is true if destination is a private address */
2228 /* dest_is_loop is true if destination is a loopback addresses */
2231 * Here we determine if its a preferred address. A preferred address
2232 * means it is the same scope or higher scope then the destination.
2233 * L = loopback, P = private, G = global
2234 * -----------------------------------------
2235 * src | dest | result
2236 * ----------------------------------------
2238 * -----------------------------------------
2239 * P | L | yes-v4 no-v6
2240 * -----------------------------------------
2241 * G | L | yes-v4 no-v6
2242 * -----------------------------------------
2244 * -----------------------------------------
2246 * -----------------------------------------
2248 * -----------------------------------------
2250 * -----------------------------------------
2252 * -----------------------------------------
2254 * -----------------------------------------
2257 if (ifa->address.sa.sa_family != fam) {
2258 /* forget mis-matched family */
2261 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2264 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2265 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2266 /* Ok the address may be ok */
2268 if (fam == AF_INET6) {
2269 /* ok to use deprecated addresses? no lets not! */
2270 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2271 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2274 if (ifa->src_is_priv && !ifa->src_is_loop) {
2276 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2280 if (ifa->src_is_glob) {
2282 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2289 * Now that we know what is what, implement or table this could in
2290 * theory be done slicker (it used to be), but this is
2291 * straightforward and easier to validate :-)
2293 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2294 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2295 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2296 dest_is_loop, dest_is_priv, dest_is_global);
2298 if ((ifa->src_is_loop) && (dest_is_priv)) {
2299 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2302 if ((ifa->src_is_glob) && (dest_is_priv)) {
2303 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2306 if ((ifa->src_is_loop) && (dest_is_global)) {
2307 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2310 if ((ifa->src_is_priv) && (dest_is_global)) {
2311 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2314 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2315 /* its a preferred address */
2319 static struct sctp_ifa *
2320 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2321 uint8_t dest_is_loop,
2322 uint8_t dest_is_priv,
2325 uint8_t dest_is_global = 0;
2328 * Here we determine if its a acceptable address. A acceptable
2329 * address means it is the same scope or higher scope but we can
2330 * allow for NAT which means its ok to have a global dest and a
2333 * L = loopback, P = private, G = global
2334 * -----------------------------------------
2335 * src | dest | result
2336 * -----------------------------------------
2338 * -----------------------------------------
2339 * P | L | yes-v4 no-v6
2340 * -----------------------------------------
2342 * -----------------------------------------
2344 * -----------------------------------------
2346 * -----------------------------------------
2347 * G | P | yes - May not work
2348 * -----------------------------------------
2350 * -----------------------------------------
2351 * P | G | yes - May not work
2352 * -----------------------------------------
2354 * -----------------------------------------
2357 if (ifa->address.sa.sa_family != fam) {
2358 /* forget non matching family */
2359 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2360 ifa->address.sa.sa_family, fam);
2363 /* Ok the address may be ok */
2364 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2365 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2366 dest_is_loop, dest_is_priv);
2367 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2371 if (fam == AF_INET6) {
2372 /* ok to use deprecated addresses? */
2373 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2376 if (ifa->src_is_priv) {
2377 /* Special case, linklocal to loop */
2384 * Now that we know what is what, implement our table. This could in
2385 * theory be done slicker (it used to be), but this is
2386 * straightforward and easier to validate :-)
2388 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2391 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2394 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2397 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2400 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2401 /* its an acceptable address */
2406 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2408 struct sctp_laddr *laddr;
2411 /* There are no restrictions, no TCB :-) */
2414 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2415 if (laddr->ifa == NULL) {
2416 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2420 if (laddr->ifa == ifa) {
2421 /* Yes it is on the list */
2430 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2432 struct sctp_laddr *laddr;
2436 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2437 if (laddr->ifa == NULL) {
2438 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2442 if ((laddr->ifa == ifa) && laddr->action == 0)
2451 static struct sctp_ifa *
2452 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2455 int non_asoc_addr_ok,
2456 uint8_t dest_is_priv,
2457 uint8_t dest_is_loop,
2460 struct sctp_laddr *laddr, *starting_point;
2463 struct sctp_ifn *sctp_ifn;
2464 struct sctp_ifa *sctp_ifa, *sifa;
2465 struct sctp_vrf *vrf;
2468 vrf = sctp_find_vrf(vrf_id);
2472 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2473 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2474 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2476 * first question, is the ifn we will emit on in our list, if so, we
2477 * want such an address. Note that we first looked for a preferred
2481 /* is a preferred one on the interface we route out? */
2482 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2484 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2485 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2486 &sctp_ifa->address.sin.sin_addr) != 0)) {
2491 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2492 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2493 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2497 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2498 (non_asoc_addr_ok == 0))
2500 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2505 if (sctp_is_addr_in_ep(inp, sifa)) {
2506 atomic_add_int(&sifa->refcount, 1);
2512 * ok, now we now need to find one on the list of the addresses. We
2513 * can't get one on the emitting interface so let's find first a
2514 * preferred one. If not that an acceptable one otherwise... we
2517 starting_point = inp->next_addr_touse;
2519 if (inp->next_addr_touse == NULL) {
2520 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2523 for (laddr = inp->next_addr_touse; laddr;
2524 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2525 if (laddr->ifa == NULL) {
2526 /* address has been removed */
2529 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2530 /* address is being deleted */
2533 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2537 atomic_add_int(&sifa->refcount, 1);
2540 if (resettotop == 0) {
2541 inp->next_addr_touse = NULL;
2544 inp->next_addr_touse = starting_point;
2547 if (inp->next_addr_touse == NULL) {
2548 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2551 /* ok, what about an acceptable address in the inp */
2552 for (laddr = inp->next_addr_touse; laddr;
2553 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2554 if (laddr->ifa == NULL) {
2555 /* address has been removed */
2558 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2559 /* address is being deleted */
2562 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2566 atomic_add_int(&sifa->refcount, 1);
2569 if (resettotop == 0) {
2570 inp->next_addr_touse = NULL;
2571 goto once_again_too;
2574 * no address bound can be a source for the destination we are in
2582 static struct sctp_ifa *
2583 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2584 struct sctp_tcb *stcb,
2587 uint8_t dest_is_priv,
2588 uint8_t dest_is_loop,
2589 int non_asoc_addr_ok,
2592 struct sctp_laddr *laddr, *starting_point;
2594 struct sctp_ifn *sctp_ifn;
2595 struct sctp_ifa *sctp_ifa, *sifa;
2596 uint8_t start_at_beginning = 0;
2597 struct sctp_vrf *vrf;
2601 * first question, is the ifn we will emit on in our list, if so, we
2604 vrf = sctp_find_vrf(vrf_id);
2608 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2609 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2610 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2613 * first question, is the ifn we will emit on in our list? If so,
2614 * we want that one. First we look for a preferred. Second, we go
2615 * for an acceptable.
2618 /* first try for a preferred address on the ep */
2619 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2621 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2622 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2623 &sctp_ifa->address.sin.sin_addr) != 0)) {
2628 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2629 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2630 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2634 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2636 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2637 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2640 if (((non_asoc_addr_ok == 0) &&
2641 (sctp_is_addr_restricted(stcb, sifa))) ||
2642 (non_asoc_addr_ok &&
2643 (sctp_is_addr_restricted(stcb, sifa)) &&
2644 (!sctp_is_addr_pending(stcb, sifa)))) {
2645 /* on the no-no list */
2648 atomic_add_int(&sifa->refcount, 1);
2652 /* next try for an acceptable address on the ep */
2653 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2655 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2656 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2657 &sctp_ifa->address.sin.sin_addr) != 0)) {
2662 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2663 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2664 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2668 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2670 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2671 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2674 if (((non_asoc_addr_ok == 0) &&
2675 (sctp_is_addr_restricted(stcb, sifa))) ||
2676 (non_asoc_addr_ok &&
2677 (sctp_is_addr_restricted(stcb, sifa)) &&
2678 (!sctp_is_addr_pending(stcb, sifa)))) {
2679 /* on the no-no list */
2682 atomic_add_int(&sifa->refcount, 1);
2689 * if we can't find one like that then we must look at all addresses
2690 * bound to pick one at first preferable then secondly acceptable.
2692 starting_point = stcb->asoc.last_used_address;
2694 if (stcb->asoc.last_used_address == NULL) {
2695 start_at_beginning = 1;
2696 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2698 /* search beginning with the last used address */
2699 for (laddr = stcb->asoc.last_used_address; laddr;
2700 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2701 if (laddr->ifa == NULL) {
2702 /* address has been removed */
2705 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2706 /* address is being deleted */
2709 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2712 if (((non_asoc_addr_ok == 0) &&
2713 (sctp_is_addr_restricted(stcb, sifa))) ||
2714 (non_asoc_addr_ok &&
2715 (sctp_is_addr_restricted(stcb, sifa)) &&
2716 (!sctp_is_addr_pending(stcb, sifa)))) {
2717 /* on the no-no list */
2720 stcb->asoc.last_used_address = laddr;
2721 atomic_add_int(&sifa->refcount, 1);
2724 if (start_at_beginning == 0) {
2725 stcb->asoc.last_used_address = NULL;
2726 goto sctp_from_the_top;
2728 /* now try for any higher scope than the destination */
2729 stcb->asoc.last_used_address = starting_point;
2730 start_at_beginning = 0;
2732 if (stcb->asoc.last_used_address == NULL) {
2733 start_at_beginning = 1;
2734 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2736 /* search beginning with the last used address */
2737 for (laddr = stcb->asoc.last_used_address; laddr;
2738 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2739 if (laddr->ifa == NULL) {
2740 /* address has been removed */
2743 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2744 /* address is being deleted */
2747 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2751 if (((non_asoc_addr_ok == 0) &&
2752 (sctp_is_addr_restricted(stcb, sifa))) ||
2753 (non_asoc_addr_ok &&
2754 (sctp_is_addr_restricted(stcb, sifa)) &&
2755 (!sctp_is_addr_pending(stcb, sifa)))) {
2756 /* on the no-no list */
2759 stcb->asoc.last_used_address = laddr;
2760 atomic_add_int(&sifa->refcount, 1);
2763 if (start_at_beginning == 0) {
2764 stcb->asoc.last_used_address = NULL;
2765 goto sctp_from_the_top2;
2770 static struct sctp_ifa *
2771 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2772 struct sctp_inpcb *inp,
2773 struct sctp_tcb *stcb,
2774 int non_asoc_addr_ok,
2775 uint8_t dest_is_loop,
2776 uint8_t dest_is_priv,
2782 struct sctp_ifa *ifa, *sifa;
2783 int num_eligible_addr = 0;
2785 struct sockaddr_in6 sin6, lsa6;
2787 if (fam == AF_INET6) {
2788 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2789 (void)sa6_recoverscope(&sin6);
2792 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2794 if ((ifa->address.sa.sa_family == AF_INET) &&
2795 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2796 &ifa->address.sin.sin_addr) != 0)) {
2801 if ((ifa->address.sa.sa_family == AF_INET6) &&
2802 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2803 &ifa->address.sin6.sin6_addr) != 0)) {
2807 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2808 (non_asoc_addr_ok == 0))
2810 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2815 if (fam == AF_INET6 &&
2817 sifa->src_is_loop && sifa->src_is_priv) {
2819 * don't allow fe80::1 to be a src on loop ::1, we
2820 * don't list it to the peer so we will get an
2825 if (fam == AF_INET6 &&
2826 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2827 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2828 /* link-local <-> link-local must belong to the same
2830 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2831 (void)sa6_recoverscope(&lsa6);
2832 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2839 * Check if the IPv6 address matches to next-hop. In the
2840 * mobile case, old IPv6 address may be not deleted from the
2841 * interface. Then, the interface has previous and new
2842 * addresses. We should use one corresponding to the
2843 * next-hop. (by micchie)
2846 if (stcb && fam == AF_INET6 &&
2847 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2848 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2855 /* Avoid topologically incorrect IPv4 address */
2856 if (stcb && fam == AF_INET &&
2857 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2858 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2864 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2867 if (((non_asoc_addr_ok == 0) &&
2868 (sctp_is_addr_restricted(stcb, sifa))) ||
2869 (non_asoc_addr_ok &&
2870 (sctp_is_addr_restricted(stcb, sifa)) &&
2871 (!sctp_is_addr_pending(stcb, sifa)))) {
2873 * It is restricted for some reason..
2874 * probably not yet added.
2879 if (num_eligible_addr >= addr_wanted) {
2882 num_eligible_addr++;
2889 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2890 struct sctp_inpcb *inp,
2891 struct sctp_tcb *stcb,
2892 int non_asoc_addr_ok,
2893 uint8_t dest_is_loop,
2894 uint8_t dest_is_priv,
2897 struct sctp_ifa *ifa, *sifa;
2898 int num_eligible_addr = 0;
2900 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2902 if ((ifa->address.sa.sa_family == AF_INET) &&
2903 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2904 &ifa->address.sin.sin_addr) != 0)) {
2909 if ((ifa->address.sa.sa_family == AF_INET6) &&
2911 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2912 &ifa->address.sin6.sin6_addr) != 0)) {
2916 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2917 (non_asoc_addr_ok == 0)) {
2920 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2926 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2929 if (((non_asoc_addr_ok == 0) &&
2930 (sctp_is_addr_restricted(stcb, sifa))) ||
2931 (non_asoc_addr_ok &&
2932 (sctp_is_addr_restricted(stcb, sifa)) &&
2933 (!sctp_is_addr_pending(stcb, sifa)))) {
2935 * It is restricted for some reason..
2936 * probably not yet added.
2941 num_eligible_addr++;
2943 return (num_eligible_addr);
2946 static struct sctp_ifa *
2947 sctp_choose_boundall(struct sctp_inpcb *inp,
2948 struct sctp_tcb *stcb,
2949 struct sctp_nets *net,
2952 uint8_t dest_is_priv,
2953 uint8_t dest_is_loop,
2954 int non_asoc_addr_ok,
2957 int cur_addr_num = 0, num_preferred = 0;
2959 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2960 struct sctp_ifa *sctp_ifa, *sifa;
2962 struct sctp_vrf *vrf;
2968 * For boundall we can use any address in the association.
2969 * If non_asoc_addr_ok is set we can use any address (at least in
2970 * theory). So we look for preferred addresses first. If we find one,
2971 * we use it. Otherwise we next try to get an address on the
2972 * interface, which we should be able to do (unless non_asoc_addr_ok
2973 * is false and we are routed out that way). In these cases where we
2974 * can't use the address of the interface we go through all the
2975 * ifn's looking for an address we can use and fill that in. Punting
2976 * means we send back address 0, which will probably cause problems
2977 * actually since then IP will fill in the address of the route ifn,
2978 * which means we probably already rejected it.. i.e. here comes an
2981 vrf = sctp_find_vrf(vrf_id);
2985 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2986 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2987 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2988 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2989 if (sctp_ifn == NULL) {
2990 /* ?? We don't have this guy ?? */
2991 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2992 goto bound_all_plan_b;
2994 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2995 ifn_index, sctp_ifn->ifn_name);
2998 cur_addr_num = net->indx_of_eligible_next_to_use;
3000 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3005 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3006 num_preferred, sctp_ifn->ifn_name);
3007 if (num_preferred == 0) {
3009 * no eligible addresses, we must use some other interface
3010 * address if we can find one.
3012 goto bound_all_plan_b;
3015 * Ok we have num_eligible_addr set with how many we can use, this
3016 * may vary from call to call due to addresses being deprecated
3019 if (cur_addr_num >= num_preferred) {
3023 * select the nth address from the list (where cur_addr_num is the
3024 * nth) and 0 is the first one, 1 is the second one etc...
3026 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3028 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3029 dest_is_priv, cur_addr_num, fam, ro);
3031 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3033 atomic_add_int(&sctp_ifa->refcount, 1);
3035 /* save off where the next one we will want */
3036 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3041 * plan_b: Look at all interfaces and find a preferred address. If
3042 * no preferred fall through to plan_c.
3045 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3046 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3047 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3048 sctp_ifn->ifn_name);
3049 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3050 /* wrong base scope */
3051 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3054 if ((sctp_ifn == looked_at) && looked_at) {
3055 /* already looked at this guy */
3056 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3059 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3060 dest_is_loop, dest_is_priv, fam);
3061 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3062 "Found ifn:%p %d preferred source addresses\n",
3063 ifn, num_preferred);
3064 if (num_preferred == 0) {
3065 /* None on this interface. */
3066 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3069 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3070 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3071 num_preferred, (void *)sctp_ifn, cur_addr_num);
3074 * Ok we have num_eligible_addr set with how many we can
3075 * use, this may vary from call to call due to addresses
3076 * being deprecated etc..
3078 if (cur_addr_num >= num_preferred) {
3081 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3082 dest_is_priv, cur_addr_num, fam, ro);
3086 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3087 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3089 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3090 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3091 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3092 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3094 atomic_add_int(&sifa->refcount, 1);
3098 again_with_private_addresses_allowed:
3100 /* plan_c: do we have an acceptable address on the emit interface */
3102 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3103 if (emit_ifn == NULL) {
3104 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3107 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3108 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3110 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3111 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3112 &sctp_ifa->address.sin.sin_addr) != 0)) {
3113 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3118 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3119 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3120 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3121 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3125 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3126 (non_asoc_addr_ok == 0)) {
3127 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3130 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3133 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3137 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3138 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3142 if (((non_asoc_addr_ok == 0) &&
3143 (sctp_is_addr_restricted(stcb, sifa))) ||
3144 (non_asoc_addr_ok &&
3145 (sctp_is_addr_restricted(stcb, sifa)) &&
3146 (!sctp_is_addr_pending(stcb, sifa)))) {
3148 * It is restricted for some reason..
3149 * probably not yet added.
3151 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3156 atomic_add_int(&sifa->refcount, 1);
3161 * plan_d: We are in trouble. No preferred address on the emit
3162 * interface. And not even a preferred address on all interfaces. Go
3163 * out and see if we can find an acceptable address somewhere
3164 * amongst all interfaces.
3166 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3167 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3168 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3169 /* wrong base scope */
3172 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3174 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3175 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3176 &sctp_ifa->address.sin.sin_addr) != 0)) {
3181 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3182 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3183 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3187 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3188 (non_asoc_addr_ok == 0))
3190 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3196 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3200 if (((non_asoc_addr_ok == 0) &&
3201 (sctp_is_addr_restricted(stcb, sifa))) ||
3202 (non_asoc_addr_ok &&
3203 (sctp_is_addr_restricted(stcb, sifa)) &&
3204 (!sctp_is_addr_pending(stcb, sifa)))) {
3206 * It is restricted for some
3207 * reason.. probably not yet added.
3218 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3219 stcb->asoc.scope.ipv4_local_scope = 1;
3221 goto again_with_private_addresses_allowed;
3222 } else if (retried == 1) {
3223 stcb->asoc.scope.ipv4_local_scope = 0;
3231 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3232 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3233 /* wrong base scope */
3236 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3237 struct sctp_ifa *tmp_sifa;
3240 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3241 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3242 &sctp_ifa->address.sin.sin_addr) != 0)) {
3247 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3248 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3249 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3253 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3254 (non_asoc_addr_ok == 0))
3256 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3259 if (tmp_sifa == NULL) {
3262 if (tmp_sifa == sifa) {
3266 if (sctp_is_address_in_scope(tmp_sifa,
3267 &stcb->asoc.scope, 0) == 0) {
3270 if (((non_asoc_addr_ok == 0) &&
3271 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3272 (non_asoc_addr_ok &&
3273 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3274 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3284 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3285 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3286 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3291 atomic_add_int(&sifa->refcount, 1);
3299 /* tcb may be NULL */
3301 sctp_source_address_selection(struct sctp_inpcb *inp,
3302 struct sctp_tcb *stcb,
3304 struct sctp_nets *net,
3305 int non_asoc_addr_ok, uint32_t vrf_id)
3307 struct sctp_ifa *answer;
3308 uint8_t dest_is_priv, dest_is_loop;
3311 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3314 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3319 * - Find the route if needed, cache if I can.
3320 * - Look at interface address in route, Is it in the bound list. If so we
3321 * have the best source.
3322 * - If not we must rotate amongst the addresses.
3326 * Do we need to pay attention to scope. We can have a private address
3327 * or a global address we are sourcing or sending to. So if we draw
3329 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3331 * ------------------------------------------
3332 * source * dest * result
3333 * -----------------------------------------
3334 * <a> Private * Global * NAT
3335 * -----------------------------------------
3336 * <b> Private * Private * No problem
3337 * -----------------------------------------
3338 * <c> Global * Private * Huh, How will this work?
3339 * -----------------------------------------
3340 * <d> Global * Global * No Problem
3341 *------------------------------------------
3342 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3344 *------------------------------------------
3345 * source * dest * result
3346 * -----------------------------------------
3347 * <a> Linklocal * Global *
3348 * -----------------------------------------
3349 * <b> Linklocal * Linklocal * No problem
3350 * -----------------------------------------
3351 * <c> Global * Linklocal * Huh, How will this work?
3352 * -----------------------------------------
3353 * <d> Global * Global * No Problem
3354 *------------------------------------------
3355 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3357 * And then we add to that what happens if there are multiple addresses
3358 * assigned to an interface. Remember the ifa on a ifn is a linked
3359 * list of addresses. So one interface can have more than one IP
3360 * address. What happens if we have both a private and a global
3361 * address? Do we then use context of destination to sort out which
3362 * one is best? And what about NAT's sending P->G may get you a NAT
3363 * translation, or should you select the G thats on the interface in
3368 * - count the number of addresses on the interface.
3369 * - if it is one, no problem except case <c>.
3370 * For <a> we will assume a NAT out there.
3371 * - if there are more than one, then we need to worry about scope P
3372 * or G. We should prefer G -> G and P -> P if possible.
3373 * Then as a secondary fall back to mixed types G->P being a last
3375 * - The above all works for bound all, but bound specific we need to
3376 * use the same concept but instead only consider the bound
3377 * addresses. If the bound set is NOT assigned to the interface then
3378 * we must use rotation amongst the bound addresses..
3380 if (ro->ro_rt == NULL) {
3382 * Need a route to cache.
3384 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3386 if (ro->ro_rt == NULL) {
3389 fam = ro->ro_dst.sa_family;
3390 dest_is_priv = dest_is_loop = 0;
3391 /* Setup our scopes for the destination */
3395 /* Scope based on outbound address */
3396 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3399 /* mark it as local */
3400 net->addr_is_local = 1;
3402 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3409 /* Scope based on outbound address */
3410 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3411 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3413 * If the address is a loopback address, which
3414 * consists of "::1" OR "fe80::1%lo0", we are
3415 * loopback scope. But we don't use dest_is_priv
3416 * (link local addresses).
3420 /* mark it as local */
3421 net->addr_is_local = 1;
3423 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3429 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3430 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3431 SCTP_IPI_ADDR_RLOCK();
3432 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3436 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3437 dest_is_priv, dest_is_loop,
3438 non_asoc_addr_ok, fam);
3439 SCTP_IPI_ADDR_RUNLOCK();
3446 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3447 vrf_id, dest_is_priv,
3449 non_asoc_addr_ok, fam);
3451 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3456 SCTP_IPI_ADDR_RUNLOCK();
3461 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3464 int tlen, at, found;
3465 struct sctp_sndinfo sndinfo;
3466 struct sctp_prinfo prinfo;
3467 struct sctp_authinfo authinfo;
3469 tlen = SCTP_BUF_LEN(control);
3473 * Independent of how many mbufs, find the c_type inside the control
3474 * structure and copy out the data.
3477 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3478 /* There is not enough room for one more. */
3481 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3482 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3483 /* We dont't have a complete CMSG header. */
3486 if (((int)cmh.cmsg_len + at) > tlen) {
3487 /* We don't have the complete CMSG. */
3490 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3491 ((c_type == cmh.cmsg_type) ||
3492 ((c_type == SCTP_SNDRCV) &&
3493 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3494 (cmh.cmsg_type == SCTP_PRINFO) ||
3495 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3496 if (c_type == cmh.cmsg_type) {
3497 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3500 /* It is exactly what we want. Copy it out. */
3501 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), (int)cpsize, (caddr_t)data);
3504 struct sctp_sndrcvinfo *sndrcvinfo;
3506 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3508 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3511 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3513 switch (cmh.cmsg_type) {
3515 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3518 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3519 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3520 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3521 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3522 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3523 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3526 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3529 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3530 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3531 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3533 sndrcvinfo->sinfo_timetolive = 0;
3535 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3538 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3541 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3542 sndrcvinfo->sinfo_keynumber_valid = 1;
3543 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3551 at += CMSG_ALIGN(cmh.cmsg_len);
3557 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3561 struct sctp_initmsg initmsg;
3563 struct sockaddr_in sin;
3566 struct sockaddr_in6 sin6;
3569 tlen = SCTP_BUF_LEN(control);
3572 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3573 /* There is not enough room for one more. */
3577 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3578 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3579 /* We dont't have a complete CMSG header. */
3583 if (((int)cmh.cmsg_len + at) > tlen) {
3584 /* We don't have the complete CMSG. */
3588 if (cmh.cmsg_level == IPPROTO_SCTP) {
3589 switch (cmh.cmsg_type) {
3591 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3595 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3596 if (initmsg.sinit_max_attempts)
3597 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3598 if (initmsg.sinit_num_ostreams)
3599 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3600 if (initmsg.sinit_max_instreams)
3601 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3602 if (initmsg.sinit_max_init_timeo)
3603 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3604 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3605 struct sctp_stream_out *tmp_str;
3607 #if defined(SCTP_DETAILED_STR_STATS)
3611 /* Default is NOT correct */
3612 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3613 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3614 SCTP_TCB_UNLOCK(stcb);
3615 SCTP_MALLOC(tmp_str,
3616 struct sctp_stream_out *,
3617 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3619 SCTP_TCB_LOCK(stcb);
3620 if (tmp_str != NULL) {
3621 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3622 stcb->asoc.strmout = tmp_str;
3623 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3625 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3627 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3628 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3629 stcb->asoc.strmout[i].chunks_on_queues = 0;
3630 stcb->asoc.strmout[i].next_mid_ordered = 0;
3631 stcb->asoc.strmout[i].next_mid_unordered = 0;
3632 #if defined(SCTP_DETAILED_STR_STATS)
3633 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3634 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3635 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3638 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3639 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3641 stcb->asoc.strmout[i].stream_no = i;
3642 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3643 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3644 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3649 case SCTP_DSTADDRV4:
3650 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3654 memset(&sin, 0, sizeof(struct sockaddr_in));
3655 sin.sin_family = AF_INET;
3656 sin.sin_len = sizeof(struct sockaddr_in);
3657 sin.sin_port = stcb->rport;
3658 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3659 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3660 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3661 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3665 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3666 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3673 case SCTP_DSTADDRV6:
3674 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3678 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3679 sin6.sin6_family = AF_INET6;
3680 sin6.sin6_len = sizeof(struct sockaddr_in6);
3681 sin6.sin6_port = stcb->rport;
3682 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3683 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3684 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3689 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3690 in6_sin6_2_sin(&sin, &sin6);
3691 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3692 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3693 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3697 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3698 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3704 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3705 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3715 at += CMSG_ALIGN(cmh.cmsg_len);
3720 static struct sctp_tcb *
3721 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3723 struct mbuf *control,
3724 struct sctp_nets **net_p,
3729 struct sctp_tcb *stcb;
3730 struct sockaddr *addr;
3732 struct sockaddr_in sin;
3735 struct sockaddr_in6 sin6;
3738 tlen = SCTP_BUF_LEN(control);
3741 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3742 /* There is not enough room for one more. */
3746 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3747 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3748 /* We dont't have a complete CMSG header. */
3752 if (((int)cmh.cmsg_len + at) > tlen) {
3753 /* We don't have the complete CMSG. */
3757 if (cmh.cmsg_level == IPPROTO_SCTP) {
3758 switch (cmh.cmsg_type) {
3760 case SCTP_DSTADDRV4:
3761 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3765 memset(&sin, 0, sizeof(struct sockaddr_in));
3766 sin.sin_family = AF_INET;
3767 sin.sin_len = sizeof(struct sockaddr_in);
3768 sin.sin_port = port;
3769 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3770 addr = (struct sockaddr *)&sin;
3774 case SCTP_DSTADDRV6:
3775 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3779 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3780 sin6.sin6_family = AF_INET6;
3781 sin6.sin6_len = sizeof(struct sockaddr_in6);
3782 sin6.sin6_port = port;
3783 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3785 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3786 in6_sin6_2_sin(&sin, &sin6);
3787 addr = (struct sockaddr *)&sin;
3790 addr = (struct sockaddr *)&sin6;
3798 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3804 at += CMSG_ALIGN(cmh.cmsg_len);
3809 static struct mbuf *
3810 sctp_add_cookie(struct mbuf *init, int init_offset,
3811 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3813 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3814 struct sctp_state_cookie *stc;
3815 struct sctp_paramhdr *ph;
3820 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3821 sizeof(struct sctp_paramhdr)), 0,
3822 M_NOWAIT, 1, MT_DATA);
3826 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3827 if (copy_init == NULL) {
3831 #ifdef SCTP_MBUF_LOGGING
3832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3833 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3836 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3838 if (copy_initack == NULL) {
3840 sctp_m_freem(copy_init);
3843 #ifdef SCTP_MBUF_LOGGING
3844 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3845 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3848 /* easy side we just drop it on the end */
3849 ph = mtod(mret, struct sctp_paramhdr *);
3850 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3851 sizeof(struct sctp_paramhdr);
3852 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3853 sizeof(struct sctp_paramhdr));
3854 ph->param_type = htons(SCTP_STATE_COOKIE);
3855 ph->param_length = 0; /* fill in at the end */
3856 /* Fill in the stc cookie data */
3857 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3859 /* tack the INIT and then the INIT-ACK onto the chain */
3861 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3862 cookie_sz += SCTP_BUF_LEN(m_at);
3863 if (SCTP_BUF_NEXT(m_at) == NULL) {
3864 SCTP_BUF_NEXT(m_at) = copy_init;
3868 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3869 cookie_sz += SCTP_BUF_LEN(m_at);
3870 if (SCTP_BUF_NEXT(m_at) == NULL) {
3871 SCTP_BUF_NEXT(m_at) = copy_initack;
3875 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3876 cookie_sz += SCTP_BUF_LEN(m_at);
3877 if (SCTP_BUF_NEXT(m_at) == NULL) {
3881 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3883 /* no space, so free the entire chain */
3887 SCTP_BUF_LEN(sig) = 0;
3888 SCTP_BUF_NEXT(m_at) = sig;
3890 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3891 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3893 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3894 cookie_sz += SCTP_SIGNATURE_SIZE;
3895 ph->param_length = htons(cookie_sz);
3901 sctp_get_ect(struct sctp_tcb *stcb)
3903 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3904 return (SCTP_ECT0_BIT);
3910 #if defined(INET) || defined(INET6)
3912 sctp_handle_no_route(struct sctp_tcb *stcb,
3913 struct sctp_nets *net,
3916 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3919 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3920 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3921 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3922 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3923 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3924 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3928 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3929 net->dest_state &= ~SCTP_ADDR_PF;
3933 if (net == stcb->asoc.primary_destination) {
3934 /* need a new primary */
3935 struct sctp_nets *alt;
3937 alt = sctp_find_alternate_net(stcb, net, 0);
3939 if (stcb->asoc.alternate) {
3940 sctp_free_remote_addr(stcb->asoc.alternate);
3942 stcb->asoc.alternate = alt;
3943 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3944 if (net->ro._s_addr) {
3945 sctp_free_ifa(net->ro._s_addr);
3946 net->ro._s_addr = NULL;
3948 net->src_addr_selected = 0;
3957 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3958 struct sctp_tcb *stcb, /* may be NULL */
3959 struct sctp_nets *net,
3960 struct sockaddr *to,
3962 uint32_t auth_offset,
3963 struct sctp_auth_chunk *auth,
3964 uint16_t auth_keyid,
3965 int nofragment_flag,
3972 union sctp_sockstore *over_addr,
3973 uint8_t mflowtype, uint32_t mflowid,
3974 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3975 int so_locked SCTP_UNUSED
3980 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3983 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3984 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3985 * - fill in the HMAC digest of any AUTH chunk in the packet.
3986 * - calculate and fill in the SCTP checksum.
3987 * - prepend an IP address header.
3988 * - if boundall use INADDR_ANY.
3989 * - if boundspecific do source address selection.
3990 * - set fragmentation option for ipV4.
3991 * - On return from IP output, check/adjust mtu size of output
3992 * interface and smallest_mtu size as well.
3994 /* Will need ifdefs around this */
3996 struct sctphdr *sctphdr;
3999 #if defined(INET) || defined(INET6)
4002 #if defined(INET) || defined(INET6)
4004 sctp_route_t *ro = NULL;
4005 struct udphdr *udp = NULL;
4008 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4009 struct socket *so = NULL;
4012 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4013 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4017 #if defined(INET) || defined(INET6)
4019 vrf_id = stcb->asoc.vrf_id;
4021 vrf_id = inp->def_vrf_id;
4024 /* fill in the HMAC digest for any AUTH chunk in the packet */
4025 if ((auth != NULL) && (stcb != NULL)) {
4026 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4029 tos_value = net->dscp;
4031 tos_value = stcb->asoc.default_dscp;
4033 tos_value = inp->sctp_ep.default_dscp;
4036 switch (to->sa_family) {
4040 struct ip *ip = NULL;
4041 sctp_route_t iproute;
4044 len = SCTP_MIN_V4_OVERHEAD;
4046 len += sizeof(struct udphdr);
4048 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4051 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4054 SCTP_ALIGN_TO_END(newm, len);
4055 SCTP_BUF_LEN(newm) = len;
4056 SCTP_BUF_NEXT(newm) = m;
4059 m->m_pkthdr.flowid = net->flowid;
4060 M_HASHTYPE_SET(m, net->flowtype);
4062 m->m_pkthdr.flowid = mflowid;
4063 M_HASHTYPE_SET(m, mflowtype);
4065 packet_length = sctp_calculate_len(m);
4066 ip = mtod(m, struct ip *);
4067 ip->ip_v = IPVERSION;
4068 ip->ip_hl = (sizeof(struct ip) >> 2);
4069 if (tos_value == 0) {
4071 * This means especially, that it is not set
4072 * at the SCTP layer. So use the value from
4075 tos_value = inp->ip_inp.inp.inp_ip_tos;
4079 tos_value |= sctp_get_ect(stcb);
4081 if ((nofragment_flag) && (port == 0)) {
4082 ip->ip_off = htons(IP_DF);
4084 ip->ip_off = htons(0);
4086 /* FreeBSD has a function for ip_id's */
4089 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4090 ip->ip_len = htons(packet_length);
4091 ip->ip_tos = tos_value;
4093 ip->ip_p = IPPROTO_UDP;
4095 ip->ip_p = IPPROTO_SCTP;
4100 memset(&iproute, 0, sizeof(iproute));
4101 memcpy(&ro->ro_dst, to, to->sa_len);
4103 ro = (sctp_route_t *) & net->ro;
4105 /* Now the address selection part */
4106 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4108 /* call the routine to select the src address */
4109 if (net && out_of_asoc_ok == 0) {
4110 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4111 sctp_free_ifa(net->ro._s_addr);
4112 net->ro._s_addr = NULL;
4113 net->src_addr_selected = 0;
4119 if (net->src_addr_selected == 0) {
4120 /* Cache the source address */
4121 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4124 net->src_addr_selected = 1;
4126 if (net->ro._s_addr == NULL) {
4127 /* No route to host */
4128 net->src_addr_selected = 0;
4129 sctp_handle_no_route(stcb, net, so_locked);
4130 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4132 return (EHOSTUNREACH);
4134 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4136 if (over_addr == NULL) {
4137 struct sctp_ifa *_lsrc;
4139 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4143 if (_lsrc == NULL) {
4144 sctp_handle_no_route(stcb, net, so_locked);
4145 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4147 return (EHOSTUNREACH);
4149 ip->ip_src = _lsrc->address.sin.sin_addr;
4150 sctp_free_ifa(_lsrc);
4152 ip->ip_src = over_addr->sin.sin_addr;
4153 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4157 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4158 sctp_handle_no_route(stcb, net, so_locked);
4159 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4161 return (EHOSTUNREACH);
4163 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4164 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4165 udp->uh_dport = port;
4166 udp->uh_ulen = htons((uint16_t) (packet_length - sizeof(struct ip)));
4168 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4172 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4174 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4177 sctphdr->src_port = src_port;
4178 sctphdr->dest_port = dest_port;
4179 sctphdr->v_tag = v_tag;
4180 sctphdr->checksum = 0;
4183 * If source address selection fails and we find no
4184 * route then the ip_output should fail as well with
4185 * a NO_ROUTE_TO_HOST type error. We probably should
4186 * catch that somewhere and abort the association
4187 * right away (assuming this is an INIT being sent).
4189 if (ro->ro_rt == NULL) {
4191 * src addr selection failed to find a route
4192 * (or valid source addr), so we can't get
4193 * there from here (yet)!
4195 sctp_handle_no_route(stcb, net, so_locked);
4196 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4198 return (EHOSTUNREACH);
4200 if (ro != &iproute) {
4201 memcpy(&iproute, ro, sizeof(*ro));
4203 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4204 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4205 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4206 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4207 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4210 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4211 /* failed to prepend data, give up */
4212 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4216 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4218 #if defined(SCTP_WITH_NO_CSUM)
4219 SCTP_STAT_INCR(sctps_sendnocrc);
4221 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4222 SCTP_STAT_INCR(sctps_sendswcrc);
4225 SCTP_ENABLE_UDP_CSUM(o_pak);
4228 #if defined(SCTP_WITH_NO_CSUM)
4229 SCTP_STAT_INCR(sctps_sendnocrc);
4231 m->m_pkthdr.csum_flags = CSUM_SCTP;
4232 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4233 SCTP_STAT_INCR(sctps_sendhwcrc);
4236 #ifdef SCTP_PACKET_LOGGING
4237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4238 sctp_packet_log(o_pak);
4240 /* send it out. table id is taken from stcb */
4241 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4242 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4243 so = SCTP_INP_SO(inp);
4244 SCTP_SOCKET_UNLOCK(so, 0);
4247 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4248 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4249 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4250 atomic_add_int(&stcb->asoc.refcnt, 1);
4251 SCTP_TCB_UNLOCK(stcb);
4252 SCTP_SOCKET_LOCK(so, 0);
4253 SCTP_TCB_LOCK(stcb);
4254 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4257 SCTP_STAT_INCR(sctps_sendpackets);
4258 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4260 SCTP_STAT_INCR(sctps_senderrors);
4262 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4264 /* free tempy routes */
4267 /* PMTU check versus smallest asoc MTU goes
4269 if ((ro->ro_rt != NULL) &&
4270 (net->ro._s_addr)) {
4273 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4275 mtu -= sizeof(struct udphdr);
4277 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4278 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4281 } else if (ro->ro_rt == NULL) {
4282 /* route was freed */
4283 if (net->ro._s_addr &&
4284 net->src_addr_selected) {
4285 sctp_free_ifa(net->ro._s_addr);
4286 net->ro._s_addr = NULL;
4288 net->src_addr_selected = 0;
4297 uint32_t flowlabel, flowinfo;
4298 struct ip6_hdr *ip6h;
4299 struct route_in6 ip6route;
4301 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4303 struct sockaddr_in6 lsa6_storage;
4305 u_short prev_port = 0;
4309 flowlabel = net->flowlabel;
4311 flowlabel = stcb->asoc.default_flowlabel;
4313 flowlabel = inp->sctp_ep.default_flowlabel;
4315 if (flowlabel == 0) {
4317 * This means especially, that it is not set
4318 * at the SCTP layer. So use the value from
4321 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4323 flowlabel &= 0x000fffff;
4324 len = SCTP_MIN_OVERHEAD;
4326 len += sizeof(struct udphdr);
4328 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4331 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4334 SCTP_ALIGN_TO_END(newm, len);
4335 SCTP_BUF_LEN(newm) = len;
4336 SCTP_BUF_NEXT(newm) = m;
4339 m->m_pkthdr.flowid = net->flowid;
4340 M_HASHTYPE_SET(m, net->flowtype);
4342 m->m_pkthdr.flowid = mflowid;
4343 M_HASHTYPE_SET(m, mflowtype);
4345 packet_length = sctp_calculate_len(m);
4347 ip6h = mtod(m, struct ip6_hdr *);
4348 /* protect *sin6 from overwrite */
4349 sin6 = (struct sockaddr_in6 *)to;
4353 /* KAME hack: embed scopeid */
4354 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4355 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4359 memset(&ip6route, 0, sizeof(ip6route));
4360 ro = (sctp_route_t *) & ip6route;
4361 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4363 ro = (sctp_route_t *) & net->ro;
4366 * We assume here that inp_flow is in host byte
4367 * order within the TCB!
4369 if (tos_value == 0) {
4371 * This means especially, that it is not set
4372 * at the SCTP layer. So use the value from
4375 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4379 tos_value |= sctp_get_ect(stcb);
4383 flowinfo |= tos_value;
4385 flowinfo |= flowlabel;
4386 ip6h->ip6_flow = htonl(flowinfo);
4388 ip6h->ip6_nxt = IPPROTO_UDP;
4390 ip6h->ip6_nxt = IPPROTO_SCTP;
4392 ip6h->ip6_plen = (uint16_t) (packet_length - sizeof(struct ip6_hdr));
4393 ip6h->ip6_dst = sin6->sin6_addr;
4396 * Add SRC address selection here: we can only reuse
4397 * to a limited degree the kame src-addr-sel, since
4398 * we can try their selection but it may not be
4401 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4402 lsa6_tmp.sin6_family = AF_INET6;
4403 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4405 if (net && out_of_asoc_ok == 0) {
4406 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4407 sctp_free_ifa(net->ro._s_addr);
4408 net->ro._s_addr = NULL;
4409 net->src_addr_selected = 0;
4415 if (net->src_addr_selected == 0) {
4416 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4417 /* KAME hack: embed scopeid */
4418 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4419 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4422 /* Cache the source address */
4423 net->ro._s_addr = sctp_source_address_selection(inp,
4429 (void)sa6_recoverscope(sin6);
4430 net->src_addr_selected = 1;
4432 if (net->ro._s_addr == NULL) {
4433 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4434 net->src_addr_selected = 0;
4435 sctp_handle_no_route(stcb, net, so_locked);
4436 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4438 return (EHOSTUNREACH);
4440 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4442 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4443 /* KAME hack: embed scopeid */
4444 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4445 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4448 if (over_addr == NULL) {
4449 struct sctp_ifa *_lsrc;
4451 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4455 if (_lsrc == NULL) {
4456 sctp_handle_no_route(stcb, net, so_locked);
4457 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4459 return (EHOSTUNREACH);
4461 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4462 sctp_free_ifa(_lsrc);
4464 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4465 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4467 (void)sa6_recoverscope(sin6);
4469 lsa6->sin6_port = inp->sctp_lport;
4471 if (ro->ro_rt == NULL) {
4473 * src addr selection failed to find a route
4474 * (or valid source addr), so we can't get
4477 sctp_handle_no_route(stcb, net, so_locked);
4478 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4480 return (EHOSTUNREACH);
4483 * XXX: sa6 may not have a valid sin6_scope_id in
4484 * the non-SCOPEDROUTING case.
4486 bzero(&lsa6_storage, sizeof(lsa6_storage));
4487 lsa6_storage.sin6_family = AF_INET6;
4488 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4489 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4490 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4491 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4496 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4497 lsa6_storage.sin6_port = inp->sctp_lport;
4498 lsa6 = &lsa6_storage;
4499 ip6h->ip6_src = lsa6->sin6_addr;
4502 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4503 sctp_handle_no_route(stcb, net, so_locked);
4504 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4506 return (EHOSTUNREACH);
4508 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4509 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4510 udp->uh_dport = port;
4511 udp->uh_ulen = htons((uint16_t) (packet_length - sizeof(struct ip6_hdr)));
4513 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4515 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4518 sctphdr->src_port = src_port;
4519 sctphdr->dest_port = dest_port;
4520 sctphdr->v_tag = v_tag;
4521 sctphdr->checksum = 0;
4524 * We set the hop limit now since there is a good
4525 * chance that our ro pointer is now filled
4527 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4528 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4531 /* Copy to be sure something bad is not happening */
4532 sin6->sin6_addr = ip6h->ip6_dst;
4533 lsa6->sin6_addr = ip6h->ip6_src;
4536 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4537 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4538 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4539 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4540 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4542 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4543 /* preserve the port and scope for link
4545 prev_scope = sin6->sin6_scope_id;
4546 prev_port = sin6->sin6_port;
4548 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4549 /* failed to prepend data, give up */
4551 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4554 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4556 #if defined(SCTP_WITH_NO_CSUM)
4557 SCTP_STAT_INCR(sctps_sendnocrc);
4559 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4560 SCTP_STAT_INCR(sctps_sendswcrc);
4562 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4563 udp->uh_sum = 0xffff;
4566 #if defined(SCTP_WITH_NO_CSUM)
4567 SCTP_STAT_INCR(sctps_sendnocrc);
4569 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4570 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4571 SCTP_STAT_INCR(sctps_sendhwcrc);
4574 /* send it out. table id is taken from stcb */
4575 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4576 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4577 so = SCTP_INP_SO(inp);
4578 SCTP_SOCKET_UNLOCK(so, 0);
4581 #ifdef SCTP_PACKET_LOGGING
4582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4583 sctp_packet_log(o_pak);
4585 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4586 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4587 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4588 atomic_add_int(&stcb->asoc.refcnt, 1);
4589 SCTP_TCB_UNLOCK(stcb);
4590 SCTP_SOCKET_LOCK(so, 0);
4591 SCTP_TCB_LOCK(stcb);
4592 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4596 /* for link local this must be done */
4597 sin6->sin6_scope_id = prev_scope;
4598 sin6->sin6_port = prev_port;
4600 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4601 SCTP_STAT_INCR(sctps_sendpackets);
4602 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4604 SCTP_STAT_INCR(sctps_senderrors);
4607 /* Now if we had a temp route free it */
4610 /* PMTU check versus smallest asoc MTU goes
4612 if (ro->ro_rt == NULL) {
4613 /* Route was freed */
4614 if (net->ro._s_addr &&
4615 net->src_addr_selected) {
4616 sctp_free_ifa(net->ro._s_addr);
4617 net->ro._s_addr = NULL;
4619 net->src_addr_selected = 0;
4621 if ((ro->ro_rt != NULL) &&
4622 (net->ro._s_addr)) {
4625 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4627 (stcb->asoc.smallest_mtu > mtu)) {
4628 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4631 net->mtu -= sizeof(struct udphdr);
4635 if (ND_IFINFO(ifp)->linkmtu &&
4636 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4637 sctp_mtu_size_reset(inp,
4639 ND_IFINFO(ifp)->linkmtu);
4647 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4648 ((struct sockaddr *)to)->sa_family);
4650 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4657 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4658 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4663 struct mbuf *m, *m_last;
4664 struct sctp_nets *net;
4665 struct sctp_init_chunk *init;
4666 struct sctp_supported_addr_param *sup_addr;
4667 struct sctp_adaptation_layer_indication *ali;
4668 struct sctp_supported_chunk_types_param *pr_supported;
4669 struct sctp_paramhdr *ph;
4670 int cnt_inits_to = 0;
4672 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4674 /* INIT's always go to the primary (and usually ONLY address) */
4675 net = stcb->asoc.primary_destination;
4677 net = TAILQ_FIRST(&stcb->asoc.nets);
4682 /* we confirm any address we send an INIT to */
4683 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4684 (void)sctp_set_primary_addr(stcb, NULL, net);
4686 /* we confirm any address we send an INIT to */
4687 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4689 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4691 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4693 * special hook, if we are sending to link local it will not
4694 * show up in our private address count.
4696 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4700 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4701 /* This case should not happen */
4702 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4705 /* start the INIT timer */
4706 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4708 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4710 /* No memory, INIT timer will re-attempt. */
4711 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4714 chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
4716 /* Now lets put the chunk header in place */
4717 init = mtod(m, struct sctp_init_chunk *);
4718 /* now the chunk header */
4719 init->ch.chunk_type = SCTP_INITIATION;
4720 init->ch.chunk_flags = 0;
4721 /* fill in later from mbuf we build */
4722 init->ch.chunk_length = 0;
4723 /* place in my tag */
4724 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4725 /* set up some of the credits. */
4726 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4727 SCTP_MINIMAL_RWND));
4728 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4729 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4730 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4732 /* Adaptation layer indication parameter */
4733 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4734 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
4735 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4736 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4737 ali->ph.param_length = htons(parameter_len);
4738 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
4739 chunk_len += parameter_len;
4742 if (stcb->asoc.ecn_supported == 1) {
4743 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4744 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4745 ph->param_type = htons(SCTP_ECN_CAPABLE);
4746 ph->param_length = htons(parameter_len);
4747 chunk_len += parameter_len;
4749 /* PR-SCTP supported parameter */
4750 if (stcb->asoc.prsctp_supported == 1) {
4751 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4752 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4753 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4754 ph->param_length = htons(parameter_len);
4755 chunk_len += parameter_len;
4757 /* Add NAT friendly parameter. */
4758 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4759 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4760 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4761 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4762 ph->param_length = htons(parameter_len);
4763 chunk_len += parameter_len;
4765 /* And now tell the peer which extensions we support */
4767 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4768 if (stcb->asoc.prsctp_supported == 1) {
4769 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4770 if (stcb->asoc.idata_supported) {
4771 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4774 if (stcb->asoc.auth_supported == 1) {
4775 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4777 if (stcb->asoc.asconf_supported == 1) {
4778 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4779 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4781 if (stcb->asoc.reconfig_supported == 1) {
4782 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4784 if (stcb->asoc.idata_supported) {
4785 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4787 if (stcb->asoc.nrsack_supported == 1) {
4788 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4790 if (stcb->asoc.pktdrop_supported == 1) {
4791 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4794 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4795 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4796 pr_supported->ph.param_length = htons(parameter_len);
4797 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4798 chunk_len += parameter_len;
4800 /* add authentication parameters */
4801 if (stcb->asoc.auth_supported) {
4802 /* attach RANDOM parameter, if available */
4803 if (stcb->asoc.authinfo.random != NULL) {
4804 struct sctp_auth_random *randp;
4806 if (padding_len > 0) {
4807 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4808 chunk_len += padding_len;
4811 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4812 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4813 /* random key already contains the header */
4814 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4815 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4816 chunk_len += parameter_len;
4818 /* add HMAC_ALGO parameter */
4819 if (stcb->asoc.local_hmacs != NULL) {
4820 struct sctp_auth_hmac_algo *hmacs;
4822 if (padding_len > 0) {
4823 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4824 chunk_len += padding_len;
4827 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4828 parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
4829 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4830 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4831 hmacs->ph.param_length = htons(parameter_len);
4832 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
4833 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4834 chunk_len += parameter_len;
4836 /* add CHUNKS parameter */
4837 if (stcb->asoc.local_auth_chunks != NULL) {
4838 struct sctp_auth_chunk_list *chunks;
4840 if (padding_len > 0) {
4841 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4842 chunk_len += padding_len;
4845 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4846 parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
4847 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4848 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4849 chunks->ph.param_length = htons(parameter_len);
4850 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4851 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4852 chunk_len += parameter_len;
4855 /* now any cookie time extensions */
4856 if (stcb->asoc.cookie_preserve_req) {
4857 struct sctp_cookie_perserve_param *cookie_preserve;
4859 if (padding_len > 0) {
4860 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4861 chunk_len += padding_len;
4864 parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
4865 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4866 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4867 cookie_preserve->ph.param_length = htons(parameter_len);
4868 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4869 stcb->asoc.cookie_preserve_req = 0;
4870 chunk_len += parameter_len;
4872 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4875 if (padding_len > 0) {
4876 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4877 chunk_len += padding_len;
4880 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4881 if (stcb->asoc.scope.ipv4_addr_legal) {
4882 parameter_len += (uint16_t) sizeof(uint16_t);
4884 if (stcb->asoc.scope.ipv6_addr_legal) {
4885 parameter_len += (uint16_t) sizeof(uint16_t);
4887 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4888 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4889 sup_addr->ph.param_length = htons(parameter_len);
4891 if (stcb->asoc.scope.ipv4_addr_legal) {
4892 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4894 if (stcb->asoc.scope.ipv6_addr_legal) {
4895 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4897 padding_len = 4 - 2 * i;
4898 chunk_len += parameter_len;
4900 SCTP_BUF_LEN(m) = chunk_len;
4901 /* now the addresses */
4903 * To optimize this we could put the scoping stuff into a structure
4904 * and remove the individual uint8's from the assoc structure. Then
4905 * we could just sifa in the address within the stcb. But for now
4906 * this is a quick hack to get the address stuff teased apart.
4908 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4910 &padding_len, &chunk_len);
4912 init->ch.chunk_length = htons(chunk_len);
4913 if (padding_len > 0) {
4914 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4919 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4920 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4921 (struct sockaddr *)&net->ro._l_addr,
4922 m, 0, NULL, 0, 0, 0, 0,
4923 inp->sctp_lport, stcb->rport, htonl(0),
4927 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4928 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4929 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4933 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4934 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4937 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4938 * being equal to the beginning of the params i.e. (iphlen +
4939 * sizeof(struct sctp_init_msg) parse through the parameters to the
4940 * end of the mbuf verifying that all parameters are known.
4942 * For unknown parameters build and return a mbuf with
4943 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4944 * processing this chunk stop, and set *abort_processing to 1.
4946 * By having param_offset be pre-set to where parameters begin it is
4947 * hoped that this routine may be reused in the future by new
4950 struct sctp_paramhdr *phdr, params;
4952 struct mbuf *mat, *op_err;
4953 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4954 int at, limit, pad_needed;
4955 uint16_t ptype, plen, padded_size;
4958 *abort_processing = 0;
4961 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4964 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4965 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4966 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4967 ptype = ntohs(phdr->param_type);
4968 plen = ntohs(phdr->param_length);
4969 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4970 /* wacked parameter */
4971 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4974 limit -= SCTP_SIZE32(plen);
4976 * All parameters for all chunks that we know/understand are
4977 * listed here. We process them other places and make
4978 * appropriate stop actions per the upper bits. However this
4979 * is the generic routine processor's can call to get back
4980 * an operr.. to either incorporate (init-ack) or send.
4982 padded_size = SCTP_SIZE32(plen);
4984 /* Param's with variable size */
4985 case SCTP_HEARTBEAT_INFO:
4986 case SCTP_STATE_COOKIE:
4987 case SCTP_UNRECOG_PARAM:
4988 case SCTP_ERROR_CAUSE_IND:
4992 /* Param's with variable size within a range */
4993 case SCTP_CHUNK_LIST:
4994 case SCTP_SUPPORTED_CHUNK_EXT:
4995 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4996 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5001 case SCTP_SUPPORTED_ADDRTYPE:
5002 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5003 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5009 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5010 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5015 case SCTP_SET_PRIM_ADDR:
5016 case SCTP_DEL_IP_ADDRESS:
5017 case SCTP_ADD_IP_ADDRESS:
5018 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5019 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5020 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5025 /* Param's with a fixed size */
5026 case SCTP_IPV4_ADDRESS:
5027 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5028 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5033 case SCTP_IPV6_ADDRESS:
5034 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5035 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5040 case SCTP_COOKIE_PRESERVE:
5041 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5042 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5047 case SCTP_HAS_NAT_SUPPORT:
5050 case SCTP_PRSCTP_SUPPORTED:
5051 if (padded_size != sizeof(struct sctp_paramhdr)) {
5052 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5057 case SCTP_ECN_CAPABLE:
5058 if (padded_size != sizeof(struct sctp_paramhdr)) {
5059 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5064 case SCTP_ULP_ADAPTATION:
5065 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5066 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5071 case SCTP_SUCCESS_REPORT:
5072 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5073 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5078 case SCTP_HOSTNAME_ADDRESS:
5080 /* We can NOT handle HOST NAME addresses!! */
5083 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5084 *abort_processing = 1;
5085 if (op_err == NULL) {
5086 /* Ok need to try to get a mbuf */
5088 l_len = SCTP_MIN_OVERHEAD;
5090 l_len = SCTP_MIN_V4_OVERHEAD;
5092 l_len += sizeof(struct sctp_chunkhdr);
5094 l_len += sizeof(struct sctp_paramhdr);
5095 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5097 SCTP_BUF_LEN(op_err) = 0;
5099 * pre-reserve space for ip
5100 * and sctp header and
5104 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5106 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5108 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5109 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5113 /* If we have space */
5114 struct sctp_paramhdr s;
5117 uint32_t cpthis = 0;
5119 pad_needed = 4 - (err_at % 4);
5120 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5121 err_at += pad_needed;
5123 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5124 s.param_length = htons(sizeof(s) + plen);
5125 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5126 err_at += sizeof(s);
5127 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5129 sctp_m_freem(op_err);
5131 * we are out of memory but
5132 * we still need to have a
5133 * look at what to do (the
5134 * system is in trouble
5139 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5146 * we do not recognize the parameter figure out what
5149 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5150 if ((ptype & 0x4000) == 0x4000) {
5151 /* Report bit is set?? */
5152 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5153 if (op_err == NULL) {
5156 /* Ok need to try to get an mbuf */
5158 l_len = SCTP_MIN_OVERHEAD;
5160 l_len = SCTP_MIN_V4_OVERHEAD;
5162 l_len += sizeof(struct sctp_chunkhdr);
5164 l_len += sizeof(struct sctp_paramhdr);
5165 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5167 SCTP_BUF_LEN(op_err) = 0;
5169 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5171 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5173 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5174 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5178 /* If we have space */
5179 struct sctp_paramhdr s;
5182 uint32_t cpthis = 0;
5184 pad_needed = 4 - (err_at % 4);
5185 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5186 err_at += pad_needed;
5188 s.param_type = htons(SCTP_UNRECOG_PARAM);
5189 s.param_length = htons(sizeof(s) + plen);
5190 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5191 err_at += sizeof(s);
5192 if (plen > sizeof(tempbuf)) {
5193 plen = sizeof(tempbuf);
5195 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5197 sctp_m_freem(op_err);
5199 * we are out of memory but
5200 * we still need to have a
5201 * look at what to do (the
5202 * system is in trouble
5206 goto more_processing;
5208 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5213 if ((ptype & 0x8000) == 0x0000) {
5214 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5217 /* skip this chunk and continue processing */
5218 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5219 at += SCTP_SIZE32(plen);
5224 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5228 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5229 *abort_processing = 1;
5230 if ((op_err == NULL) && phdr) {
5233 l_len = SCTP_MIN_OVERHEAD;
5235 l_len = SCTP_MIN_V4_OVERHEAD;
5237 l_len += sizeof(struct sctp_chunkhdr);
5238 l_len += (2 * sizeof(struct sctp_paramhdr));
5239 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5241 SCTP_BUF_LEN(op_err) = 0;
5243 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5245 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5247 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5248 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5251 if ((op_err) && phdr) {
5252 struct sctp_paramhdr s;
5255 uint32_t cpthis = 0;
5257 pad_needed = 4 - (err_at % 4);
5258 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5259 err_at += pad_needed;
5261 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5262 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5263 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5264 err_at += sizeof(s);
5265 /* Only copy back the p-hdr that caused the issue */
5266 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5272 sctp_are_there_new_addresses(struct sctp_association *asoc,
5273 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5276 * Given a INIT packet, look through the packet to verify that there
5277 * are NO new addresses. As we go through the parameters add reports
5278 * of any un-understood parameters that require an error. Also we
5279 * must return (1) to drop the packet if we see a un-understood
5280 * parameter that tells us to drop the chunk.
5282 struct sockaddr *sa_touse;
5283 struct sockaddr *sa;
5284 struct sctp_paramhdr *phdr, params;
5285 uint16_t ptype, plen;
5287 struct sctp_nets *net;
5290 struct sockaddr_in sin4, *sa4;
5293 struct sockaddr_in6 sin6, *sa6;
5297 memset(&sin4, 0, sizeof(sin4));
5298 sin4.sin_family = AF_INET;
5299 sin4.sin_len = sizeof(sin4);
5302 memset(&sin6, 0, sizeof(sin6));
5303 sin6.sin6_family = AF_INET6;
5304 sin6.sin6_len = sizeof(sin6);
5306 /* First what about the src address of the pkt ? */
5308 switch (src->sa_family) {
5311 if (asoc->scope.ipv4_addr_legal) {
5318 if (asoc->scope.ipv6_addr_legal) {
5329 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5330 sa = (struct sockaddr *)&net->ro._l_addr;
5331 if (sa->sa_family == src->sa_family) {
5333 if (sa->sa_family == AF_INET) {
5334 struct sockaddr_in *src4;
5336 sa4 = (struct sockaddr_in *)sa;
5337 src4 = (struct sockaddr_in *)src;
5338 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5345 if (sa->sa_family == AF_INET6) {
5346 struct sockaddr_in6 *src6;
5348 sa6 = (struct sockaddr_in6 *)sa;
5349 src6 = (struct sockaddr_in6 *)src;
5350 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5359 /* New address added! no need to look further. */
5363 /* Ok so far lets munge through the rest of the packet */
5364 offset += sizeof(struct sctp_init_chunk);
5365 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5368 ptype = ntohs(phdr->param_type);
5369 plen = ntohs(phdr->param_length);
5372 case SCTP_IPV4_ADDRESS:
5374 struct sctp_ipv4addr_param *p4, p4_buf;
5376 phdr = sctp_get_next_param(in_initpkt, offset,
5377 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5378 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5382 if (asoc->scope.ipv4_addr_legal) {
5383 p4 = (struct sctp_ipv4addr_param *)phdr;
5384 sin4.sin_addr.s_addr = p4->addr;
5385 sa_touse = (struct sockaddr *)&sin4;
5391 case SCTP_IPV6_ADDRESS:
5393 struct sctp_ipv6addr_param *p6, p6_buf;
5395 phdr = sctp_get_next_param(in_initpkt, offset,
5396 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5397 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5401 if (asoc->scope.ipv6_addr_legal) {
5402 p6 = (struct sctp_ipv6addr_param *)phdr;
5403 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5405 sa_touse = (struct sockaddr *)&sin6;
5415 /* ok, sa_touse points to one to check */
5417 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5418 sa = (struct sockaddr *)&net->ro._l_addr;
5419 if (sa->sa_family != sa_touse->sa_family) {
5423 if (sa->sa_family == AF_INET) {
5424 sa4 = (struct sockaddr_in *)sa;
5425 if (sa4->sin_addr.s_addr ==
5426 sin4.sin_addr.s_addr) {
5433 if (sa->sa_family == AF_INET6) {
5434 sa6 = (struct sockaddr_in6 *)sa;
5435 if (SCTP6_ARE_ADDR_EQUAL(
5444 /* New addr added! no need to look further */
5448 offset += SCTP_SIZE32(plen);
5449 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5455 * Given a MBUF chain that was sent into us containing an INIT. Build a
5456 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5457 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5458 * message (i.e. the struct sctp_init_msg).
5461 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5462 struct sctp_nets *src_net, struct mbuf *init_pkt,
5463 int iphlen, int offset,
5464 struct sockaddr *src, struct sockaddr *dst,
5465 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5466 uint8_t mflowtype, uint32_t mflowid,
5467 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5469 struct sctp_association *asoc;
5470 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5471 struct sctp_init_ack_chunk *initack;
5472 struct sctp_adaptation_layer_indication *ali;
5473 struct sctp_supported_chunk_types_param *pr_supported;
5474 struct sctp_paramhdr *ph;
5475 union sctp_sockstore *over_addr;
5476 struct sctp_scoping scp;
5478 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5479 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5480 struct sockaddr_in *sin;
5483 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5484 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5485 struct sockaddr_in6 *sin6;
5487 struct sockaddr *to;
5488 struct sctp_state_cookie stc;
5489 struct sctp_nets *net = NULL;
5490 uint8_t *signature = NULL;
5491 int cnt_inits_to = 0;
5492 uint16_t his_limit, i_want;
5494 int nat_friendly = 0;
5496 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5503 if ((asoc != NULL) &&
5504 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT)) {
5505 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5507 * new addresses, out of here in non-cookie-wait
5510 * Send an ABORT, without the new address error
5511 * cause. This looks no different than if no
5512 * listener was present.
5514 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5516 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5517 mflowtype, mflowid, inp->fibnum,
5521 if (src_net != NULL && (src_net->port != port)) {
5523 * change of remote encapsulation port, out of here
5524 * in non-cookie-wait states
5526 * Send an ABORT, without an specific error cause.
5527 * This looks no different than if no listener was
5530 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5531 "Remote encapsulation port changed");
5532 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5533 mflowtype, mflowid, inp->fibnum,
5539 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5540 (offset + sizeof(struct sctp_init_chunk)),
5541 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5544 if (op_err == NULL) {
5545 char msg[SCTP_DIAG_INFO_LEN];
5547 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5548 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5551 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5552 init_chk->init.initiate_tag, op_err,
5553 mflowtype, mflowid, inp->fibnum,
5557 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5559 /* No memory, INIT timer will re-attempt. */
5561 sctp_m_freem(op_err);
5564 chunk_len = (uint16_t) sizeof(struct sctp_init_ack_chunk);
5568 * We might not overwrite the identification[] completely and on
5569 * some platforms time_entered will contain some padding. Therefore
5570 * zero out the cookie to avoid putting uninitialized memory on the
5573 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5575 /* the time I built cookie */
5576 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5578 /* populate any tie tags */
5580 /* unlock before tag selections */
5581 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5582 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5583 stc.cookie_life = asoc->cookie_life;
5584 net = asoc->primary_destination;
5586 stc.tie_tag_my_vtag = 0;
5587 stc.tie_tag_peer_vtag = 0;
5588 /* life I will award this cookie */
5589 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5592 /* copy in the ports for later check */
5593 stc.myport = sh->dest_port;
5594 stc.peerport = sh->src_port;
5597 * If we wanted to honor cookie life extensions, we would add to
5598 * stc.cookie_life. For now we should NOT honor any extension
5600 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5601 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5602 stc.ipv6_addr_legal = 1;
5603 if (SCTP_IPV6_V6ONLY(inp)) {
5604 stc.ipv4_addr_legal = 0;
5606 stc.ipv4_addr_legal = 1;
5609 stc.ipv6_addr_legal = 0;
5610 stc.ipv4_addr_legal = 1;
5615 switch (dst->sa_family) {
5619 /* lookup address */
5620 stc.address[0] = src4->sin_addr.s_addr;
5624 stc.addr_type = SCTP_IPV4_ADDRESS;
5625 /* local from address */
5626 stc.laddress[0] = dst4->sin_addr.s_addr;
5627 stc.laddress[1] = 0;
5628 stc.laddress[2] = 0;
5629 stc.laddress[3] = 0;
5630 stc.laddr_type = SCTP_IPV4_ADDRESS;
5631 /* scope_id is only for v6 */
5633 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5634 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5637 /* Must use the address in this case */
5638 if (sctp_is_address_on_local_host(src, vrf_id)) {
5639 stc.loopback_scope = 1;
5642 stc.local_scope = 0;
5650 stc.addr_type = SCTP_IPV6_ADDRESS;
5651 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5652 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
5653 if (sctp_is_address_on_local_host(src, vrf_id)) {
5654 stc.loopback_scope = 1;
5655 stc.local_scope = 0;
5658 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5659 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5661 * If the new destination or source
5662 * is a LINK_LOCAL we must have
5663 * common both site and local scope.
5664 * Don't set local scope though
5665 * since we must depend on the
5666 * source to be added implicitly. We
5667 * cannot assure just because we
5668 * share one link that all links are
5671 stc.local_scope = 0;
5675 * we start counting for the private
5676 * address stuff at 1. since the
5677 * link local we source from won't
5678 * show up in our scoped count.
5681 /* pull out the scope_id from
5683 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5684 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5686 * If the new destination or source
5687 * is SITE_LOCAL then we must have
5688 * site scope in common.
5692 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5693 stc.laddr_type = SCTP_IPV6_ADDRESS;
5703 /* set the scope per the existing tcb */
5706 struct sctp_nets *lnet;
5709 stc.loopback_scope = asoc->scope.loopback_scope;
5710 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5711 stc.site_scope = asoc->scope.site_scope;
5712 stc.local_scope = asoc->scope.local_scope;
5714 /* Why do we not consider IPv4 LL addresses? */
5715 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5716 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5717 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5719 * if we have a LL address, start
5727 /* use the net pointer */
5728 to = (struct sockaddr *)&net->ro._l_addr;
5729 switch (to->sa_family) {
5732 sin = (struct sockaddr_in *)to;
5733 stc.address[0] = sin->sin_addr.s_addr;
5737 stc.addr_type = SCTP_IPV4_ADDRESS;
5738 if (net->src_addr_selected == 0) {
5740 * strange case here, the INIT should have
5741 * did the selection.
5743 net->ro._s_addr = sctp_source_address_selection(inp,
5744 stcb, (sctp_route_t *) & net->ro,
5746 if (net->ro._s_addr == NULL)
5749 net->src_addr_selected = 1;
5752 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5753 stc.laddress[1] = 0;
5754 stc.laddress[2] = 0;
5755 stc.laddress[3] = 0;
5756 stc.laddr_type = SCTP_IPV4_ADDRESS;
5757 /* scope_id is only for v6 */
5763 sin6 = (struct sockaddr_in6 *)to;
5764 memcpy(&stc.address, &sin6->sin6_addr,
5765 sizeof(struct in6_addr));
5766 stc.addr_type = SCTP_IPV6_ADDRESS;
5767 stc.scope_id = sin6->sin6_scope_id;
5768 if (net->src_addr_selected == 0) {
5770 * strange case here, the INIT should have
5771 * done the selection.
5773 net->ro._s_addr = sctp_source_address_selection(inp,
5774 stcb, (sctp_route_t *) & net->ro,
5776 if (net->ro._s_addr == NULL)
5779 net->src_addr_selected = 1;
5781 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5782 sizeof(struct in6_addr));
5783 stc.laddr_type = SCTP_IPV6_ADDRESS;
5788 /* Now lets put the SCTP header in place */
5789 initack = mtod(m, struct sctp_init_ack_chunk *);
5790 /* Save it off for quick ref */
5791 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
5793 memcpy(stc.identification, SCTP_VERSION_STRING,
5794 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5795 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5796 /* now the chunk header */
5797 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5798 initack->ch.chunk_flags = 0;
5799 /* fill in later from mbuf we build */
5800 initack->ch.chunk_length = 0;
5801 /* place in my tag */
5802 if ((asoc != NULL) &&
5803 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5804 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5805 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5806 /* re-use the v-tags and init-seq here */
5807 initack->init.initiate_tag = htonl(asoc->my_vtag);
5808 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5810 uint32_t vtag, itsn;
5812 if (hold_inp_lock) {
5813 SCTP_INP_INCR_REF(inp);
5814 SCTP_INP_RUNLOCK(inp);
5817 atomic_add_int(&asoc->refcnt, 1);
5818 SCTP_TCB_UNLOCK(stcb);
5820 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5821 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5823 * Got a duplicate vtag on some guy behind a
5824 * nat make sure we don't use it.
5828 initack->init.initiate_tag = htonl(vtag);
5829 /* get a TSN to use too */
5830 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5831 initack->init.initial_tsn = htonl(itsn);
5832 SCTP_TCB_LOCK(stcb);
5833 atomic_add_int(&asoc->refcnt, -1);
5835 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5836 initack->init.initiate_tag = htonl(vtag);
5837 /* get a TSN to use too */
5838 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5840 if (hold_inp_lock) {
5841 SCTP_INP_RLOCK(inp);
5842 SCTP_INP_DECR_REF(inp);
5845 /* save away my tag to */
5846 stc.my_vtag = initack->init.initiate_tag;
5848 /* set up some of the credits. */
5849 so = inp->sctp_socket;
5851 /* memory problem */
5855 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5857 /* set what I want */
5858 his_limit = ntohs(init_chk->init.num_inbound_streams);
5859 /* choose what I want */
5861 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5862 i_want = asoc->streamoutcnt;
5864 i_want = asoc->pre_open_streams;
5867 i_want = inp->sctp_ep.pre_open_stream_count;
5869 if (his_limit < i_want) {
5870 /* I Want more :< */
5871 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5873 /* I can have what I want :> */
5874 initack->init.num_outbound_streams = htons(i_want);
5876 /* tell him his limit. */
5877 initack->init.num_inbound_streams =
5878 htons(inp->sctp_ep.max_open_streams_intome);
5880 /* adaptation layer indication parameter */
5881 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5882 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
5883 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5884 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5885 ali->ph.param_length = htons(parameter_len);
5886 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5887 chunk_len += parameter_len;
5890 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5891 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5892 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
5893 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5894 ph->param_type = htons(SCTP_ECN_CAPABLE);
5895 ph->param_length = htons(parameter_len);
5896 chunk_len += parameter_len;
5898 /* PR-SCTP supported parameter */
5899 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5900 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5901 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
5902 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5903 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5904 ph->param_length = htons(parameter_len);
5905 chunk_len += parameter_len;
5907 /* Add NAT friendly parameter */
5909 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
5910 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5911 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5912 ph->param_length = htons(parameter_len);
5913 chunk_len += parameter_len;
5915 /* And now tell the peer which extensions we support */
5917 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5918 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5919 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5920 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5921 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5922 ((asoc == NULL) && (inp->idata_supported == 1))) {
5923 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5926 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5927 ((asoc == NULL) && (inp->auth_supported == 1))) {
5928 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5930 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5931 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5932 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5933 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5935 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5936 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5937 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5939 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5940 ((asoc == NULL) && (inp->idata_supported == 1))) {
5941 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5943 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5944 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5945 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5947 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
5948 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
5949 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5952 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5953 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5954 pr_supported->ph.param_length = htons(parameter_len);
5955 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5956 chunk_len += parameter_len;
5958 /* add authentication parameters */
5959 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5960 ((asoc == NULL) && (inp->auth_supported == 1))) {
5961 struct sctp_auth_random *randp;
5962 struct sctp_auth_hmac_algo *hmacs;
5963 struct sctp_auth_chunk_list *chunks;
5965 if (padding_len > 0) {
5966 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5967 chunk_len += padding_len;
5970 /* generate and add RANDOM parameter */
5971 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
5972 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) +
5973 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5974 randp->ph.param_type = htons(SCTP_RANDOM);
5975 randp->ph.param_length = htons(parameter_len);
5976 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
5977 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5978 chunk_len += parameter_len;
5980 if (padding_len > 0) {
5981 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5982 chunk_len += padding_len;
5985 /* add HMAC_ALGO parameter */
5986 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
5987 parameter_len = (uint16_t) sizeof(struct sctp_auth_hmac_algo) +
5988 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5989 (uint8_t *) hmacs->hmac_ids);
5990 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5991 hmacs->ph.param_length = htons(parameter_len);
5992 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5993 chunk_len += parameter_len;
5995 if (padding_len > 0) {
5996 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5997 chunk_len += padding_len;
6000 /* add CHUNKS parameter */
6001 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6002 parameter_len = (uint16_t) sizeof(struct sctp_auth_chunk_list) +
6003 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6004 chunks->chunk_types);
6005 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6006 chunks->ph.param_length = htons(parameter_len);
6007 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6008 chunk_len += parameter_len;
6010 SCTP_BUF_LEN(m) = chunk_len;
6012 /* now the addresses */
6014 * To optimize this we could put the scoping stuff into a structure
6015 * and remove the individual uint8's from the stc structure. Then we
6016 * could just sifa in the address within the stc.. but for now this
6017 * is a quick hack to get the address stuff teased apart.
6019 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6020 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6021 scp.loopback_scope = stc.loopback_scope;
6022 scp.ipv4_local_scope = stc.ipv4_scope;
6023 scp.local_scope = stc.local_scope;
6024 scp.site_scope = stc.site_scope;
6025 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6027 &padding_len, &chunk_len);
6028 /* padding_len can only be positive, if no addresses have been added */
6029 if (padding_len > 0) {
6030 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6031 chunk_len += padding_len;
6032 SCTP_BUF_LEN(m) += padding_len;
6035 /* tack on the operational error if present */
6038 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6039 parameter_len += SCTP_BUF_LEN(m_tmp);
6041 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6042 SCTP_BUF_NEXT(m_last) = op_err;
6043 while (SCTP_BUF_NEXT(m_last) != NULL) {
6044 m_last = SCTP_BUF_NEXT(m_last);
6046 chunk_len += parameter_len;
6048 if (padding_len > 0) {
6049 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6050 if (m_last == NULL) {
6051 /* Houston we have a problem, no space */
6055 chunk_len += padding_len;
6058 /* Now we must build a cookie */
6059 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6060 if (m_cookie == NULL) {
6061 /* memory problem */
6065 /* Now append the cookie to the end and update the space/size */
6066 SCTP_BUF_NEXT(m_last) = m_cookie;
6068 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6069 parameter_len += SCTP_BUF_LEN(m_tmp);
6070 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6074 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6075 chunk_len += parameter_len;
6078 * Place in the size, but we don't include the last pad (if any) in
6081 initack->ch.chunk_length = htons(chunk_len);
6084 * Time to sign the cookie, we don't sign over the cookie signature
6085 * though thus we set trailer.
6087 (void)sctp_hmac_m(SCTP_HMAC,
6088 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6089 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6090 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6092 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6093 * here since the timer will drive a retranmission.
6095 if (padding_len > 0) {
6096 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6101 if (stc.loopback_scope) {
6102 over_addr = (union sctp_sockstore *)dst;
6107 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6109 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6112 SCTP_SO_NOT_LOCKED);
6113 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6118 sctp_prune_prsctp(struct sctp_tcb *stcb,
6119 struct sctp_association *asoc,
6120 struct sctp_sndrcvinfo *srcv,
6124 struct sctp_tmit_chunk *chk, *nchk;
6126 SCTP_TCB_LOCK_ASSERT(stcb);
6127 if ((asoc->prsctp_supported) &&
6128 (asoc->sent_queue_cnt_removeable > 0)) {
6129 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6131 * Look for chunks marked with the PR_SCTP flag AND
6132 * the buffer space flag. If the one being sent is
6133 * equal or greater priority then purge the old one
6134 * and free some space.
6136 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6138 * This one is PR-SCTP AND buffer space
6141 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6143 * Lower numbers equates to higher
6144 * priority so if the one we are
6145 * looking at has a larger or equal
6146 * priority we want to drop the data
6147 * and NOT retransmit it.
6151 * We release the book_size
6152 * if the mbuf is here
6157 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6161 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6164 freed_spc += ret_spc;
6165 if (freed_spc >= dataout) {
6168 } /* if chunk was present */
6169 } /* if of sufficient priority */
6170 } /* if chunk has enabled */
6171 } /* tailqforeach */
6173 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6174 /* Here we must move to the sent queue and mark */
6175 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6176 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6179 * We release the book_size
6180 * if the mbuf is here
6184 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6187 freed_spc += ret_spc;
6188 if (freed_spc >= dataout) {
6191 } /* end if chk->data */
6192 } /* end if right class */
6193 } /* end if chk pr-sctp */
6194 } /* tailqforeachsafe (chk) */
6195 } /* if enabled in asoc */
6199 sctp_get_frag_point(struct sctp_tcb *stcb,
6200 struct sctp_association *asoc)
6205 * For endpoints that have both v6 and v4 addresses we must reserve
6206 * room for the ipv6 header, for those that are only dealing with V4
6207 * we use a larger frag point.
6209 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6210 ovh = SCTP_MIN_OVERHEAD;
6212 ovh = SCTP_MIN_V4_OVERHEAD;
6214 if (stcb->asoc.idata_supported) {
6215 ovh += sizeof(struct sctp_idata_chunk);
6217 ovh += sizeof(struct sctp_data_chunk);
6219 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6220 siz = asoc->smallest_mtu - ovh;
6222 siz = (stcb->asoc.sctp_frag_point - ovh);
6224 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6226 /* A data chunk MUST fit in a cluster */
6227 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6230 /* adjust for an AUTH chunk if DATA requires auth */
6231 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6232 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6235 /* make it an even word boundary please */
6242 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6245 * We assume that the user wants PR_SCTP_TTL if the user provides a
6246 * positive lifetime but does not specify any PR_SCTP policy.
6248 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6249 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6250 } else if (sp->timetolive > 0) {
6251 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6252 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6256 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6257 case CHUNK_FLAGS_PR_SCTP_BUF:
6259 * Time to live is a priority stored in tv_sec when doing
6260 * the buffer drop thing.
6262 sp->ts.tv_sec = sp->timetolive;
6265 case CHUNK_FLAGS_PR_SCTP_TTL:
6269 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6270 tv.tv_sec = sp->timetolive / 1000;
6271 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6273 * TODO sctp_constants.h needs alternative time
6274 * macros when _KERNEL is undefined.
6276 timevaladd(&sp->ts, &tv);
6279 case CHUNK_FLAGS_PR_SCTP_RTX:
6281 * Time to live is a the number or retransmissions stored in
6284 sp->ts.tv_sec = sp->timetolive;
6288 SCTPDBG(SCTP_DEBUG_USRREQ1,
6289 "Unknown PR_SCTP policy %u.\n",
6290 PR_SCTP_POLICY(sp->sinfo_flags));
6296 sctp_msg_append(struct sctp_tcb *stcb,
6297 struct sctp_nets *net,
6299 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6303 struct sctp_stream_queue_pending *sp = NULL;
6304 struct sctp_stream_out *strm;
6307 * Given an mbuf chain, put it into the association send queue and
6308 * place it on the wheel
6310 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6311 /* Invalid stream number */
6312 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6316 if ((stcb->asoc.stream_locked) &&
6317 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6318 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6322 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6323 /* Now can we send this? */
6324 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6325 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6326 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6327 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6328 /* got data while shutting down */
6329 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6333 sctp_alloc_a_strmoq(stcb, sp);
6335 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6339 sp->sinfo_flags = srcv->sinfo_flags;
6340 sp->timetolive = srcv->sinfo_timetolive;
6341 sp->ppid = srcv->sinfo_ppid;
6342 sp->context = srcv->sinfo_context;
6344 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6346 atomic_add_int(&sp->net->ref_count, 1);
6350 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6351 sp->stream = srcv->sinfo_stream;
6352 sp->msg_is_complete = 1;
6353 sp->sender_all_done = 1;
6356 sp->tail_mbuf = NULL;
6357 sctp_set_prsctp_policy(sp);
6359 * We could in theory (for sendall) sifa the length in, but we would
6360 * still have to hunt through the chain since we need to setup the
6364 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6365 if (SCTP_BUF_NEXT(at) == NULL)
6367 sp->length += SCTP_BUF_LEN(at);
6369 if (srcv->sinfo_keynumber_valid) {
6370 sp->auth_keyid = srcv->sinfo_keynumber;
6372 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6374 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6375 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6376 sp->holds_key_ref = 1;
6378 if (hold_stcb_lock == 0) {
6379 SCTP_TCB_SEND_LOCK(stcb);
6381 sctp_snd_sb_alloc(stcb, sp->length);
6382 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6383 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6384 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6386 if (hold_stcb_lock == 0) {
6387 SCTP_TCB_SEND_UNLOCK(stcb);
6397 static struct mbuf *
6398 sctp_copy_mbufchain(struct mbuf *clonechain,
6399 struct mbuf *outchain,
6400 struct mbuf **endofchain,
6403 uint8_t copy_by_ref)
6406 struct mbuf *appendchain;
6410 if (endofchain == NULL) {
6414 sctp_m_freem(outchain);
6417 if (can_take_mbuf) {
6418 appendchain = clonechain;
6421 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6423 /* Its not in a cluster */
6424 if (*endofchain == NULL) {
6425 /* lets get a mbuf cluster */
6426 if (outchain == NULL) {
6427 /* This is the general case */
6429 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6430 if (outchain == NULL) {
6433 SCTP_BUF_LEN(outchain) = 0;
6434 *endofchain = outchain;
6435 /* get the prepend space */
6436 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6438 /* We really should not get a NULL
6443 if (SCTP_BUF_NEXT(m) == NULL) {
6447 m = SCTP_BUF_NEXT(m);
6450 if (*endofchain == NULL) {
6451 /* huh, TSNH XXX maybe we
6453 sctp_m_freem(outchain);
6457 /* get the new end of length */
6458 len = (int)M_TRAILINGSPACE(*endofchain);
6460 /* how much is left at the end? */
6461 len = (int)M_TRAILINGSPACE(*endofchain);
6463 /* Find the end of the data, for appending */
6464 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6466 /* Now lets copy it out */
6467 if (len >= sizeofcpy) {
6468 /* It all fits, copy it in */
6469 m_copydata(clonechain, 0, sizeofcpy, cp);
6470 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6472 /* fill up the end of the chain */
6474 m_copydata(clonechain, 0, len, cp);
6475 SCTP_BUF_LEN((*endofchain)) += len;
6476 /* now we need another one */
6479 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6484 SCTP_BUF_NEXT((*endofchain)) = m;
6486 cp = mtod((*endofchain), caddr_t);
6487 m_copydata(clonechain, len, sizeofcpy, cp);
6488 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6492 /* copy the old fashion way */
6493 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6494 #ifdef SCTP_MBUF_LOGGING
6495 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6496 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6501 if (appendchain == NULL) {
6504 sctp_m_freem(outchain);
6508 /* tack on to the end */
6509 if (*endofchain != NULL) {
6510 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6514 if (SCTP_BUF_NEXT(m) == NULL) {
6515 SCTP_BUF_NEXT(m) = appendchain;
6518 m = SCTP_BUF_NEXT(m);
6522 * save off the end and update the end-chain position
6526 if (SCTP_BUF_NEXT(m) == NULL) {
6530 m = SCTP_BUF_NEXT(m);
6534 /* save off the end and update the end-chain position */
6537 if (SCTP_BUF_NEXT(m) == NULL) {
6541 m = SCTP_BUF_NEXT(m);
6543 return (appendchain);
6548 sctp_med_chunk_output(struct sctp_inpcb *inp,
6549 struct sctp_tcb *stcb,
6550 struct sctp_association *asoc,
6553 int control_only, int from_where,
6554 struct timeval *now, int *now_filled, int frag_point, int so_locked
6555 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6561 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6562 uint32_t val SCTP_UNUSED)
6564 struct sctp_copy_all *ca;
6567 int added_control = 0;
6568 int un_sent, do_chunk_output = 1;
6569 struct sctp_association *asoc;
6570 struct sctp_nets *net;
6572 ca = (struct sctp_copy_all *)ptr;
6573 if (ca->m == NULL) {
6576 if (ca->inp != inp) {
6580 if (ca->sndlen > 0) {
6581 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6583 /* can't copy so we are done */
6587 #ifdef SCTP_MBUF_LOGGING
6588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6589 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6595 SCTP_TCB_LOCK_ASSERT(stcb);
6596 if (stcb->asoc.alternate) {
6597 net = stcb->asoc.alternate;
6599 net = stcb->asoc.primary_destination;
6601 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6602 /* Abort this assoc with m as the user defined reason */
6604 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6606 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6607 0, M_NOWAIT, 1, MT_DATA);
6608 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6611 struct sctp_paramhdr *ph;
6613 ph = mtod(m, struct sctp_paramhdr *);
6614 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6615 ph->param_length = htons((uint16_t) (sizeof(struct sctp_paramhdr) + ca->sndlen));
6618 * We add one here to keep the assoc from dis-appearing on
6621 atomic_add_int(&stcb->asoc.refcnt, 1);
6622 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6624 * sctp_abort_an_association calls sctp_free_asoc() free
6625 * association will NOT free it since we incremented the
6626 * refcnt .. we do this to prevent it being freed and things
6627 * getting tricky since we could end up (from free_asoc)
6628 * calling inpcb_free which would get a recursive lock call
6629 * to the iterator lock.. But as a consequence of that the
6630 * stcb will return to us un-locked.. since free_asoc
6631 * returns with either no TCB or the TCB unlocked, we must
6632 * relock.. to unlock in the iterator timer :-0
6634 SCTP_TCB_LOCK(stcb);
6635 atomic_add_int(&stcb->asoc.refcnt, -1);
6636 goto no_chunk_output;
6639 ret = sctp_msg_append(stcb, net, m,
6643 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6644 /* shutdown this assoc */
6645 if (TAILQ_EMPTY(&asoc->send_queue) &&
6646 TAILQ_EMPTY(&asoc->sent_queue) &&
6647 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
6648 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6651 /* there is nothing queued to send, so I'm
6653 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6654 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6655 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6656 /* only send SHUTDOWN the first time
6658 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6659 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6661 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6662 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6663 sctp_stop_timers_for_shutdown(stcb);
6664 sctp_send_shutdown(stcb, net);
6665 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6667 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6668 asoc->primary_destination);
6670 do_chunk_output = 0;
6674 * we still got (or just got) data to send,
6675 * so set SHUTDOWN_PENDING
6678 * XXX sockets draft says that SCTP_EOF
6679 * should be sent with no data. currently,
6680 * we will allow user data to be sent first
6681 * and move to SHUTDOWN-PENDING
6683 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6684 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6685 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6686 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6687 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6689 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6690 if (TAILQ_EMPTY(&asoc->send_queue) &&
6691 TAILQ_EMPTY(&asoc->sent_queue) &&
6692 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6693 struct mbuf *op_err;
6694 char msg[SCTP_DIAG_INFO_LEN];
6697 snprintf(msg, sizeof(msg),
6698 "%s:%d at %s", __FILE__, __LINE__, __func__);
6699 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6701 atomic_add_int(&stcb->asoc.refcnt, 1);
6702 sctp_abort_an_association(stcb->sctp_ep, stcb,
6703 op_err, SCTP_SO_NOT_LOCKED);
6704 atomic_add_int(&stcb->asoc.refcnt, -1);
6705 goto no_chunk_output;
6707 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6708 asoc->primary_destination);
6714 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6715 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6717 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6718 (stcb->asoc.total_flight > 0) &&
6719 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6720 do_chunk_output = 0;
6722 if (do_chunk_output)
6723 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6724 else if (added_control) {
6725 int num_out, reason, now_filled = 0;
6729 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6730 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6731 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6742 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6744 struct sctp_copy_all *ca;
6746 ca = (struct sctp_copy_all *)ptr;
6748 * Do a notify here? Kacheong suggests that the notify be done at
6749 * the send time.. so you would push up a notification if any send
6750 * failed. Don't know if this is feasible since the only failures we
6751 * have is "memory" related and if you cannot get an mbuf to send
6752 * the data you surely can't get an mbuf to send up to notify the
6753 * user you can't send the data :->
6756 /* now free everything */
6757 sctp_m_freem(ca->m);
6758 SCTP_FREE(ca, SCTP_M_COPYAL);
6761 static struct mbuf *
6762 sctp_copy_out_all(struct uio *uio, int len)
6764 struct mbuf *ret, *at;
6765 int left, willcpy, cancpy, error;
6767 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6773 SCTP_BUF_LEN(ret) = 0;
6774 /* save space for the data chunk header */
6775 cancpy = (int)M_TRAILINGSPACE(ret);
6776 willcpy = min(cancpy, left);
6779 /* Align data to the end */
6780 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6786 SCTP_BUF_LEN(at) = willcpy;
6787 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6790 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
6791 if (SCTP_BUF_NEXT(at) == NULL) {
6794 at = SCTP_BUF_NEXT(at);
6795 SCTP_BUF_LEN(at) = 0;
6796 cancpy = (int)M_TRAILINGSPACE(at);
6797 willcpy = min(cancpy, left);
6804 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6805 struct sctp_sndrcvinfo *srcv)
6808 struct sctp_copy_all *ca;
6810 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6814 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6817 memset(ca, 0, sizeof(struct sctp_copy_all));
6821 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6824 * take off the sendall flag, it would be bad if we failed to do
6827 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6828 /* get length and mbuf chain */
6830 ca->sndlen = (int)uio->uio_resid;
6831 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6832 if (ca->m == NULL) {
6833 SCTP_FREE(ca, SCTP_M_COPYAL);
6834 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6838 /* Gather the length of the send */
6842 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6843 ca->sndlen += SCTP_BUF_LEN(mat);
6846 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6847 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6848 SCTP_ASOC_ANY_STATE,
6850 sctp_sendall_completes, inp, 1);
6852 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6853 SCTP_FREE(ca, SCTP_M_COPYAL);
6854 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6862 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6864 struct sctp_tmit_chunk *chk, *nchk;
6866 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6867 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6868 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6870 sctp_m_freem(chk->data);
6873 asoc->ctrl_queue_cnt--;
6874 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6880 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6882 struct sctp_association *asoc;
6883 struct sctp_tmit_chunk *chk, *nchk;
6884 struct sctp_asconf_chunk *acp;
6887 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6888 /* find SCTP_ASCONF chunk in queue */
6889 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6891 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6892 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6897 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6899 sctp_m_freem(chk->data);
6902 asoc->ctrl_queue_cnt--;
6903 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6910 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6911 struct sctp_association *asoc,
6912 struct sctp_tmit_chunk **data_list,
6914 struct sctp_nets *net)
6917 struct sctp_tmit_chunk *tp1;
6919 for (i = 0; i < bundle_at; i++) {
6920 /* off of the send queue */
6921 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6922 asoc->send_queue_cnt--;
6925 * Any chunk NOT 0 you zap the time chunk 0 gets
6926 * zapped or set based on if a RTO measurment is
6929 data_list[i]->do_rtt = 0;
6932 data_list[i]->sent_rcv_time = net->last_sent_time;
6933 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6934 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6935 if (data_list[i]->whoTo == NULL) {
6936 data_list[i]->whoTo = net;
6937 atomic_add_int(&net->ref_count, 1);
6939 /* on to the sent queue */
6940 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6941 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6942 struct sctp_tmit_chunk *tpp;
6944 /* need to move back */
6946 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6948 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6952 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6955 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6957 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6962 /* This does not lower until the cum-ack passes it */
6963 asoc->sent_queue_cnt++;
6964 if ((asoc->peers_rwnd <= 0) &&
6965 (asoc->total_flight == 0) &&
6967 /* Mark the chunk as being a window probe */
6968 SCTP_STAT_INCR(sctps_windowprobed);
6970 #ifdef SCTP_AUDITING_ENABLED
6971 sctp_audit_log(0xC2, 3);
6973 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6974 data_list[i]->snd_count = 1;
6975 data_list[i]->rec.data.chunk_was_revoked = 0;
6976 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6977 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6978 data_list[i]->whoTo->flight_size,
6979 data_list[i]->book_size,
6980 (uint32_t) (uintptr_t) data_list[i]->whoTo,
6981 data_list[i]->rec.data.TSN_seq);
6983 sctp_flight_size_increase(data_list[i]);
6984 sctp_total_flight_increase(stcb, data_list[i]);
6985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6986 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6987 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6989 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6990 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6991 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6992 /* SWS sender side engages */
6993 asoc->peers_rwnd = 0;
6996 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6997 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7002 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7003 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7008 struct sctp_tmit_chunk *chk, *nchk;
7010 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7011 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7012 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7013 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7014 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7015 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7016 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7017 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7018 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7019 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7020 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7021 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7022 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7023 /* Stray chunks must be cleaned up */
7025 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7027 sctp_m_freem(chk->data);
7030 asoc->ctrl_queue_cnt--;
7031 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7032 asoc->fwd_tsn_cnt--;
7033 sctp_free_a_chunk(stcb, chk, so_locked);
7034 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7035 /* special handling, we must look into the param */
7036 if (chk != asoc->str_reset) {
7037 goto clean_up_anyway;
7045 sctp_can_we_split_this(struct sctp_tcb *stcb,
7047 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7050 * Make a decision on if I should split a msg into multiple parts.
7051 * This is only asked of incomplete messages.
7055 * If we are doing EEOR we need to always send it if its the
7056 * entire thing, since it might be all the guy is putting in
7059 if (goal_mtu >= length) {
7061 * If we have data outstanding,
7062 * we get another chance when the sack
7063 * arrives to transmit - wait for more data
7065 if (stcb->asoc.total_flight == 0) {
7067 * If nothing is in flight, we zero the
7075 /* You can fill the rest */
7080 * For those strange folk that make the send buffer
7081 * smaller than our fragmentation point, we can't
7082 * get a full msg in so we have to allow splitting.
7084 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7087 if ((length <= goal_mtu) ||
7088 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7089 /* Sub-optimial residual don't split in non-eeor mode. */
7093 * If we reach here length is larger than the goal_mtu. Do we wish
7094 * to split it for the sake of packet putting together?
7096 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7097 /* Its ok to split it */
7098 return (min(goal_mtu, frag_point));
7100 /* Nope, can't split */
7106 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7107 struct sctp_stream_out *strq,
7109 uint32_t frag_point,
7114 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7119 /* Move from the stream to the send_queue keeping track of the total */
7120 struct sctp_association *asoc;
7121 struct sctp_stream_queue_pending *sp;
7122 struct sctp_tmit_chunk *chk;
7123 struct sctp_data_chunk *dchkh = NULL;
7124 struct sctp_idata_chunk *ndchkh = NULL;
7125 uint32_t to_move, length;
7127 uint8_t rcv_flags = 0;
7129 uint8_t send_lock_up = 0;
7131 SCTP_TCB_LOCK_ASSERT(stcb);
7134 /* sa_ignore FREED_MEMORY */
7135 sp = TAILQ_FIRST(&strq->outqueue);
7137 if (send_lock_up == 0) {
7138 SCTP_TCB_SEND_LOCK(stcb);
7141 sp = TAILQ_FIRST(&strq->outqueue);
7145 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7146 (stcb->asoc.idata_supported == 0) &&
7147 (strq->last_msg_incomplete)) {
7148 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7150 strq->last_msg_incomplete);
7151 strq->last_msg_incomplete = 0;
7155 SCTP_TCB_SEND_UNLOCK(stcb);
7160 if ((sp->msg_is_complete) && (sp->length == 0)) {
7161 if (sp->sender_all_done) {
7163 * We are doing differed cleanup. Last time through
7164 * when we took all the data the sender_all_done was
7167 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7168 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7169 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7170 sp->sender_all_done,
7172 sp->msg_is_complete,
7176 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7177 SCTP_TCB_SEND_LOCK(stcb);
7180 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7181 TAILQ_REMOVE(&strq->outqueue, sp, next);
7182 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7183 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7184 (strq->chunks_on_queues == 0) &&
7185 TAILQ_EMPTY(&strq->outqueue)) {
7186 stcb->asoc.trigger_reset = 1;
7189 sctp_free_remote_addr(sp->net);
7193 sctp_m_freem(sp->data);
7196 sctp_free_a_strmoq(stcb, sp, so_locked);
7197 /* we can't be locked to it */
7199 SCTP_TCB_SEND_UNLOCK(stcb);
7202 /* back to get the next msg */
7206 * sender just finished this but still holds a
7214 /* is there some to get */
7215 if (sp->length == 0) {
7220 } else if (sp->discard_rest) {
7221 if (send_lock_up == 0) {
7222 SCTP_TCB_SEND_LOCK(stcb);
7225 /* Whack down the size */
7226 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7227 if ((stcb->sctp_socket != NULL) &&
7228 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7229 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7230 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7233 sctp_m_freem(sp->data);
7235 sp->tail_mbuf = NULL;
7244 some_taken = sp->some_taken;
7246 length = sp->length;
7247 if (sp->msg_is_complete) {
7248 /* The message is complete */
7249 to_move = min(length, frag_point);
7250 if (to_move == length) {
7251 /* All of it fits in the MTU */
7252 if (sp->some_taken) {
7253 rcv_flags |= SCTP_DATA_LAST_FRAG;
7255 rcv_flags |= SCTP_DATA_NOT_FRAG;
7257 sp->put_last_out = 1;
7258 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7259 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7262 /* Not all of it fits, we fragment */
7263 if (sp->some_taken == 0) {
7264 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7269 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7272 * We use a snapshot of length in case it
7273 * is expanding during the compare.
7278 if (to_move >= llen) {
7280 if (send_lock_up == 0) {
7282 * We are taking all of an incomplete msg
7283 * thus we need a send lock.
7285 SCTP_TCB_SEND_LOCK(stcb);
7287 if (sp->msg_is_complete) {
7288 /* the sender finished the
7294 if (sp->some_taken == 0) {
7295 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7299 /* Nothing to take. */
7306 /* If we reach here, we can copy out a chunk */
7307 sctp_alloc_a_chunk(stcb, chk);
7309 /* No chunk memory */
7315 * Setup for unordered if needed by looking at the user sent info
7318 if (sp->sinfo_flags & SCTP_UNORDERED) {
7319 rcv_flags |= SCTP_DATA_UNORDERED;
7321 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7322 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7323 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7325 /* clear out the chunk before setting up */
7326 memset(chk, 0, sizeof(*chk));
7327 chk->rec.data.rcv_flags = rcv_flags;
7329 if (to_move >= length) {
7330 /* we think we can steal the whole thing */
7331 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7332 SCTP_TCB_SEND_LOCK(stcb);
7335 if (to_move < sp->length) {
7336 /* bail, it changed */
7339 chk->data = sp->data;
7340 chk->last_mbuf = sp->tail_mbuf;
7341 /* register the stealing */
7342 sp->data = sp->tail_mbuf = NULL;
7347 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7348 chk->last_mbuf = NULL;
7349 if (chk->data == NULL) {
7350 sp->some_taken = some_taken;
7351 sctp_free_a_chunk(stcb, chk, so_locked);
7356 #ifdef SCTP_MBUF_LOGGING
7357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7358 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7361 /* Pull off the data */
7362 m_adj(sp->data, to_move);
7363 /* Now lets work our way down and compact it */
7365 while (m && (SCTP_BUF_LEN(m) == 0)) {
7366 sp->data = SCTP_BUF_NEXT(m);
7367 SCTP_BUF_NEXT(m) = NULL;
7368 if (sp->tail_mbuf == m) {
7370 * Freeing tail? TSNH since
7371 * we supposedly were taking less
7372 * than the sp->length.
7375 panic("Huh, freing tail? - TSNH");
7377 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7378 sp->tail_mbuf = sp->data = NULL;
7387 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7388 chk->copy_by_ref = 1;
7390 chk->copy_by_ref = 0;
7393 * get last_mbuf and counts of mb usage This is ugly but hopefully
7394 * its only one mbuf.
7396 if (chk->last_mbuf == NULL) {
7397 chk->last_mbuf = chk->data;
7398 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7399 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7402 if (to_move > length) {
7403 /*- This should not happen either
7404 * since we always lower to_move to the size
7405 * of sp->length if its larger.
7408 panic("Huh, how can to_move be larger?");
7410 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7414 atomic_subtract_int(&sp->length, to_move);
7416 if (stcb->asoc.idata_supported == 0) {
7417 leading = sizeof(struct sctp_data_chunk);
7419 leading = sizeof(struct sctp_idata_chunk);
7421 if (M_LEADINGSPACE(chk->data) < leading) {
7422 /* Not enough room for a chunk header, get some */
7425 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7428 * we're in trouble here. _PREPEND below will free
7429 * all the data if there is no leading space, so we
7430 * must put the data back and restore.
7432 if (send_lock_up == 0) {
7433 SCTP_TCB_SEND_LOCK(stcb);
7436 if (sp->data == NULL) {
7437 /* unsteal the data */
7438 sp->data = chk->data;
7439 sp->tail_mbuf = chk->last_mbuf;
7443 /* reassemble the data */
7445 sp->data = chk->data;
7446 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7448 sp->some_taken = some_taken;
7449 atomic_add_int(&sp->length, to_move);
7452 sctp_free_a_chunk(stcb, chk, so_locked);
7456 SCTP_BUF_LEN(m) = 0;
7457 SCTP_BUF_NEXT(m) = chk->data;
7459 M_ALIGN(chk->data, 4);
7462 if (stcb->asoc.idata_supported == 0) {
7463 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7465 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_idata_chunk), M_NOWAIT);
7467 if (chk->data == NULL) {
7468 /* HELP, TSNH since we assured it would not above? */
7470 panic("prepend failes HELP?");
7472 SCTP_PRINTF("prepend fails HELP?\n");
7473 sctp_free_a_chunk(stcb, chk, so_locked);
7479 if (stcb->asoc.idata_supported == 0) {
7480 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7481 chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_data_chunk));
7483 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_idata_chunk));
7484 chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_idata_chunk));
7486 chk->book_size_scale = 0;
7487 chk->sent = SCTP_DATAGRAM_UNSENT;
7490 chk->asoc = &stcb->asoc;
7491 chk->pad_inplace = 0;
7492 chk->no_fr_allowed = 0;
7493 if (stcb->asoc.idata_supported == 0) {
7494 if (rcv_flags & SCTP_DATA_UNORDERED) {
7495 /* Just use 0. The receiver ignores the values. */
7496 chk->rec.data.stream_seq = 0;
7498 chk->rec.data.stream_seq = strq->next_mid_ordered;
7499 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7500 strq->next_mid_ordered++;
7504 if (rcv_flags & SCTP_DATA_UNORDERED) {
7505 chk->rec.data.stream_seq = strq->next_mid_unordered;
7506 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7507 strq->next_mid_unordered++;
7510 chk->rec.data.stream_seq = strq->next_mid_ordered;
7511 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7512 strq->next_mid_ordered++;
7516 chk->rec.data.stream_number = sp->stream;
7517 chk->rec.data.payloadtype = sp->ppid;
7518 chk->rec.data.context = sp->context;
7519 chk->rec.data.doing_fast_retransmit = 0;
7521 chk->rec.data.timetodrop = sp->ts;
7522 chk->flags = sp->act_flags;
7525 chk->whoTo = sp->net;
7526 atomic_add_int(&chk->whoTo->ref_count, 1);
7530 if (sp->holds_key_ref) {
7531 chk->auth_keyid = sp->auth_keyid;
7532 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7533 chk->holds_key_ref = 1;
7535 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7536 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7537 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7538 (uint32_t) (uintptr_t) stcb, sp->length,
7539 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7540 chk->rec.data.TSN_seq);
7542 if (stcb->asoc.idata_supported == 0) {
7543 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7545 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7548 * Put the rest of the things in place now. Size was done earlier in
7549 * previous loop prior to padding.
7552 #ifdef SCTP_ASOCLOG_OF_TSNS
7553 SCTP_TCB_LOCK_ASSERT(stcb);
7554 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7555 asoc->tsn_out_at = 0;
7556 asoc->tsn_out_wrapped = 1;
7558 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7559 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7560 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7561 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7562 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7563 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7564 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7565 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7568 if (stcb->asoc.idata_supported == 0) {
7569 dchkh->ch.chunk_type = SCTP_DATA;
7570 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7571 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7572 dchkh->dp.stream_id = htons((strq->stream_no & 0x0000ffff));
7573 dchkh->dp.stream_sequence = htons((uint16_t) chk->rec.data.stream_seq);
7574 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7575 dchkh->ch.chunk_length = htons(chk->send_size);
7577 ndchkh->ch.chunk_type = SCTP_IDATA;
7578 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7579 ndchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7580 ndchkh->dp.stream_id = htons(strq->stream_no);
7581 ndchkh->dp.reserved = htons(0);
7582 ndchkh->dp.msg_id = htonl(chk->rec.data.stream_seq);
7584 ndchkh->dp.ppid_fsn.protocol_id = chk->rec.data.payloadtype;
7586 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
7588 ndchkh->ch.chunk_length = htons(chk->send_size);
7590 /* Now advance the chk->send_size by the actual pad needed. */
7591 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7596 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7597 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7599 chk->last_mbuf = lm;
7600 chk->pad_inplace = 1;
7602 chk->send_size += pads;
7604 if (PR_SCTP_ENABLED(chk->flags)) {
7605 asoc->pr_sctp_cnt++;
7607 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7608 /* All done pull and kill the message */
7609 if (sp->put_last_out == 0) {
7610 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7611 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7612 sp->sender_all_done,
7614 sp->msg_is_complete,
7618 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7619 SCTP_TCB_SEND_LOCK(stcb);
7622 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7623 TAILQ_REMOVE(&strq->outqueue, sp, next);
7624 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7625 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7626 (strq->chunks_on_queues == 0) &&
7627 TAILQ_EMPTY(&strq->outqueue)) {
7628 stcb->asoc.trigger_reset = 1;
7631 sctp_free_remote_addr(sp->net);
7635 sctp_m_freem(sp->data);
7638 sctp_free_a_strmoq(stcb, sp, so_locked);
7640 asoc->chunks_on_out_queue++;
7641 strq->chunks_on_queues++;
7642 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7643 asoc->send_queue_cnt++;
7646 SCTP_TCB_SEND_UNLOCK(stcb);
7653 sctp_fill_outqueue(struct sctp_tcb *stcb,
7654 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7655 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7660 struct sctp_association *asoc;
7661 struct sctp_stream_out *strq;
7662 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7665 SCTP_TCB_LOCK_ASSERT(stcb);
7667 switch (net->ro._l_addr.sa.sa_family) {
7670 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7675 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7680 goal_mtu = net->mtu;
7683 /* Need an allowance for the data chunk header too */
7684 if (stcb->asoc.idata_supported == 0) {
7685 goal_mtu -= sizeof(struct sctp_data_chunk);
7687 goal_mtu -= sizeof(struct sctp_idata_chunk);
7690 /* must make even word boundary */
7691 goal_mtu &= 0xfffffffc;
7692 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7693 while ((goal_mtu > 0) && strq) {
7696 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point,
7697 &giveup, eeor_mode, &bail, so_locked);
7698 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7700 if ((giveup) || bail) {
7703 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7707 total_moved += moved_how_much;
7708 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7709 goal_mtu &= 0xfffffffc;
7714 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7716 if (total_moved == 0) {
7717 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7718 (net == stcb->asoc.primary_destination)) {
7719 /* ran dry for primary network net */
7720 SCTP_STAT_INCR(sctps_primary_randry);
7721 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7722 /* ran dry with CMT on */
7723 SCTP_STAT_INCR(sctps_cmt_randry);
7729 sctp_fix_ecn_echo(struct sctp_association *asoc)
7731 struct sctp_tmit_chunk *chk;
7733 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7734 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7735 chk->sent = SCTP_DATAGRAM_UNSENT;
7741 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7743 struct sctp_association *asoc;
7744 struct sctp_tmit_chunk *chk;
7745 struct sctp_stream_queue_pending *sp;
7752 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7753 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7754 if (sp->net == net) {
7755 sctp_free_remote_addr(sp->net);
7760 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7761 if (chk->whoTo == net) {
7762 sctp_free_remote_addr(chk->whoTo);
7769 sctp_med_chunk_output(struct sctp_inpcb *inp,
7770 struct sctp_tcb *stcb,
7771 struct sctp_association *asoc,
7774 int control_only, int from_where,
7775 struct timeval *now, int *now_filled, int frag_point, int so_locked
7776 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7782 * Ok this is the generic chunk service queue. we must do the
7784 * - Service the stream queue that is next, moving any
7785 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7786 * LAST to the out queue in one pass) and assigning TSN's. This
7787 * only applys though if the peer does not support NDATA. For NDATA
7788 * chunks its ok to not send the entire message ;-)
7789 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
7790 * fomulate and send the low level chunks. Making sure to combine
7791 * any control in the control chunk queue also.
7793 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7794 struct mbuf *outchain, *endoutchain;
7795 struct sctp_tmit_chunk *chk, *nchk;
7797 /* temp arrays for unlinking */
7798 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7799 int no_fragmentflg, error;
7800 unsigned int max_rwnd_per_dest, max_send_per_dest;
7801 int one_chunk, hbflag, skip_data_for_this_net;
7802 int asconf, cookie, no_out_cnt;
7803 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7804 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7806 uint32_t auth_offset = 0;
7807 struct sctp_auth_chunk *auth = NULL;
7808 uint16_t auth_keyid;
7809 int override_ok = 1;
7810 int skip_fill_up = 0;
7811 int data_auth_reqd = 0;
7814 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7821 auth_keyid = stcb->asoc.authinfo.active_keyid;
7822 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7823 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7824 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7829 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7831 * First lets prime the pump. For each destination, if there is room
7832 * in the flight size, attempt to pull an MTU's worth out of the
7833 * stream queues into the general send_queue
7835 #ifdef SCTP_AUDITING_ENABLED
7836 sctp_audit_log(0xC2, 2);
7838 SCTP_TCB_LOCK_ASSERT(stcb);
7845 /* Nothing to possible to send? */
7846 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7847 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7848 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7849 TAILQ_EMPTY(&asoc->send_queue) &&
7850 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
7855 if (asoc->peers_rwnd == 0) {
7856 /* No room in peers rwnd */
7858 if (asoc->total_flight > 0) {
7859 /* we are allowed one chunk in flight */
7863 if (stcb->asoc.ecn_echo_cnt_onq) {
7864 /* Record where a sack goes, if any */
7865 if (no_data_chunks &&
7866 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7867 /* Nothing but ECNe to send - we don't do that */
7868 goto nothing_to_send;
7870 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7871 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7872 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7873 sack_goes_to = chk->whoTo;
7878 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7879 if (stcb->sctp_socket)
7880 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7882 max_send_per_dest = 0;
7883 if (no_data_chunks == 0) {
7884 /* How many non-directed chunks are there? */
7885 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7886 if (chk->whoTo == NULL) {
7888 * We already have non-directed chunks on
7889 * the queue, no need to do a fill-up.
7897 if ((no_data_chunks == 0) &&
7898 (skip_fill_up == 0) &&
7899 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7900 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7902 * This for loop we are in takes in each net, if
7903 * its's got space in cwnd and has data sent to it
7904 * (when CMT is off) then it calls
7905 * sctp_fill_outqueue for the net. This gets data on
7906 * the send queue for that network.
7908 * In sctp_fill_outqueue TSN's are assigned and data
7909 * is copied out of the stream buffers. Note mostly
7910 * copy by reference (we hope).
7912 net->window_probe = 0;
7913 if ((net != stcb->asoc.alternate) &&
7914 ((net->dest_state & SCTP_ADDR_PF) ||
7915 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7916 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7918 sctp_log_cwnd(stcb, net, 1,
7919 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7923 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7924 (net->flight_size == 0)) {
7925 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7927 if (net->flight_size >= net->cwnd) {
7928 /* skip this network, no room - can't fill */
7929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7930 sctp_log_cwnd(stcb, net, 3,
7931 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7935 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7936 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7938 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7940 /* memory alloc failure */
7946 /* now service each destination and send out what we can for it */
7947 /* Nothing to send? */
7948 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7949 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7950 TAILQ_EMPTY(&asoc->send_queue)) {
7954 if (asoc->sctp_cmt_on_off > 0) {
7955 /* get the last start point */
7956 start_at = asoc->last_net_cmt_send_started;
7957 if (start_at == NULL) {
7958 /* null so to beginning */
7959 start_at = TAILQ_FIRST(&asoc->nets);
7961 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7962 if (start_at == NULL) {
7963 start_at = TAILQ_FIRST(&asoc->nets);
7966 asoc->last_net_cmt_send_started = start_at;
7968 start_at = TAILQ_FIRST(&asoc->nets);
7970 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7971 if (chk->whoTo == NULL) {
7972 if (asoc->alternate) {
7973 chk->whoTo = asoc->alternate;
7975 chk->whoTo = asoc->primary_destination;
7977 atomic_add_int(&chk->whoTo->ref_count, 1);
7980 old_start_at = NULL;
7981 again_one_more_time:
7982 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7983 /* how much can we send? */
7984 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7985 if (old_start_at && (old_start_at == net)) {
7986 /* through list ocmpletely. */
7990 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7991 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7992 (net->flight_size >= net->cwnd)) {
7994 * Nothing on control or asconf and flight is full,
7995 * we can skip even in the CMT case.
8000 endoutchain = outchain = NULL;
8003 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8004 skip_data_for_this_net = 1;
8006 skip_data_for_this_net = 0;
8008 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8011 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8016 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8026 if (mtu > asoc->peers_rwnd) {
8027 if (asoc->total_flight > 0) {
8028 /* We have a packet in flight somewhere */
8029 r_mtu = asoc->peers_rwnd;
8031 /* We are always allowed to send one MTU out */
8039 /************************/
8040 /* ASCONF transmission */
8041 /************************/
8042 /* Now first lets go through the asconf queue */
8043 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8044 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8047 if (chk->whoTo == NULL) {
8048 if (asoc->alternate == NULL) {
8049 if (asoc->primary_destination != net) {
8053 if (asoc->alternate != net) {
8058 if (chk->whoTo != net) {
8062 if (chk->data == NULL) {
8065 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8066 chk->sent != SCTP_DATAGRAM_RESEND) {
8070 * if no AUTH is yet included and this chunk
8071 * requires it, make sure to account for it. We
8072 * don't apply the size until the AUTH chunk is
8073 * actually added below in case there is no room for
8074 * this chunk. NOTE: we overload the use of "omtu"
8077 if ((auth == NULL) &&
8078 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8079 stcb->asoc.peer_auth_chunks)) {
8080 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8083 /* Here we do NOT factor the r_mtu */
8084 if ((chk->send_size < (int)(mtu - omtu)) ||
8085 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8087 * We probably should glom the mbuf chain
8088 * from the chk->data for control but the
8089 * problem is it becomes yet one more level
8090 * of tracking to do if for some reason
8091 * output fails. Then I have got to
8092 * reconstruct the merged control chain.. el
8093 * yucko.. for now we take the easy way and
8097 * Add an AUTH chunk, if chunk requires it
8098 * save the offset into the chain for AUTH
8100 if ((auth == NULL) &&
8101 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8102 stcb->asoc.peer_auth_chunks))) {
8103 outchain = sctp_add_auth_chunk(outchain,
8108 chk->rec.chunk_id.id);
8109 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8111 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8112 (int)chk->rec.chunk_id.can_take_data,
8113 chk->send_size, chk->copy_by_ref);
8114 if (outchain == NULL) {
8116 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8119 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8120 /* update our MTU size */
8121 if (mtu > (chk->send_size + omtu))
8122 mtu -= (chk->send_size + omtu);
8125 to_out += (chk->send_size + omtu);
8126 /* Do clear IP_DF ? */
8127 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8130 if (chk->rec.chunk_id.can_take_data)
8133 * set hb flag since we can use these for
8139 * should sysctl this: don't bundle data
8140 * with ASCONF since it requires AUTH
8143 chk->sent = SCTP_DATAGRAM_SENT;
8144 if (chk->whoTo == NULL) {
8146 atomic_add_int(&net->ref_count, 1);
8151 * Ok we are out of room but we can
8152 * output without effecting the
8153 * flight size since this little guy
8154 * is a control only packet.
8156 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8158 * do NOT clear the asconf flag as
8159 * it is used to do appropriate
8160 * source address selection.
8162 if (*now_filled == 0) {
8163 (void)SCTP_GETTIME_TIMEVAL(now);
8166 net->last_sent_time = *now;
8168 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8169 (struct sockaddr *)&net->ro._l_addr,
8170 outchain, auth_offset, auth,
8171 stcb->asoc.authinfo.active_keyid,
8172 no_fragmentflg, 0, asconf,
8173 inp->sctp_lport, stcb->rport,
8174 htonl(stcb->asoc.peer_vtag),
8178 /* error, we could not
8180 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8181 if (from_where == 0) {
8182 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8184 if (error == ENOBUFS) {
8185 asoc->ifp_had_enobuf = 1;
8186 SCTP_STAT_INCR(sctps_lowlevelerr);
8188 /* error, could not output */
8189 if (error == EHOSTUNREACH) {
8195 sctp_move_chunks_from_net(stcb, net);
8200 asoc->ifp_had_enobuf = 0;
8203 * increase the number we sent, if a
8204 * cookie is sent we don't tell them
8207 outchain = endoutchain = NULL;
8211 *num_out += ctl_cnt;
8212 /* recalc a clean slate and setup */
8213 switch (net->ro._l_addr.sa.sa_family) {
8216 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8221 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8238 /************************/
8239 /* Control transmission */
8240 /************************/
8241 /* Now first lets go through the control queue */
8242 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8243 if ((sack_goes_to) &&
8244 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8245 (chk->whoTo != sack_goes_to)) {
8247 * if we have a sack in queue, and we are
8248 * looking at an ecn echo that is NOT queued
8249 * to where the sack is going..
8251 if (chk->whoTo == net) {
8252 /* Don't transmit it to where its
8253 * going (current net) */
8255 } else if (sack_goes_to == net) {
8256 /* But do transmit it to this
8258 goto skip_net_check;
8261 if (chk->whoTo == NULL) {
8262 if (asoc->alternate == NULL) {
8263 if (asoc->primary_destination != net) {
8267 if (asoc->alternate != net) {
8272 if (chk->whoTo != net) {
8277 if (chk->data == NULL) {
8280 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8282 * It must be unsent. Cookies and ASCONF's
8283 * hang around but there timers will force
8284 * when marked for resend.
8289 * if no AUTH is yet included and this chunk
8290 * requires it, make sure to account for it. We
8291 * don't apply the size until the AUTH chunk is
8292 * actually added below in case there is no room for
8293 * this chunk. NOTE: we overload the use of "omtu"
8296 if ((auth == NULL) &&
8297 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8298 stcb->asoc.peer_auth_chunks)) {
8299 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8302 /* Here we do NOT factor the r_mtu */
8303 if ((chk->send_size <= (int)(mtu - omtu)) ||
8304 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8306 * We probably should glom the mbuf chain
8307 * from the chk->data for control but the
8308 * problem is it becomes yet one more level
8309 * of tracking to do if for some reason
8310 * output fails. Then I have got to
8311 * reconstruct the merged control chain.. el
8312 * yucko.. for now we take the easy way and
8316 * Add an AUTH chunk, if chunk requires it
8317 * save the offset into the chain for AUTH
8319 if ((auth == NULL) &&
8320 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8321 stcb->asoc.peer_auth_chunks))) {
8322 outchain = sctp_add_auth_chunk(outchain,
8327 chk->rec.chunk_id.id);
8328 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8330 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8331 (int)chk->rec.chunk_id.can_take_data,
8332 chk->send_size, chk->copy_by_ref);
8333 if (outchain == NULL) {
8335 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8338 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8339 /* update our MTU size */
8340 if (mtu > (chk->send_size + omtu))
8341 mtu -= (chk->send_size + omtu);
8344 to_out += (chk->send_size + omtu);
8345 /* Do clear IP_DF ? */
8346 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8349 if (chk->rec.chunk_id.can_take_data)
8351 /* Mark things to be removed, if needed */
8352 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8353 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8354 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8355 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8356 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8357 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8358 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8359 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8360 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8361 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8362 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8363 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8366 /* remove these chunks at the end */
8367 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8368 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8369 /* turn off the timer */
8370 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8371 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8373 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8379 * Other chunks, since they have
8380 * timers running (i.e. COOKIE) we
8381 * just "trust" that it gets sent or
8385 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8388 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8390 * Increment ecne send count
8391 * here this means we may be
8392 * over-zealous in our
8393 * counting if the send
8394 * fails, but its the best
8395 * place to do it (we used
8396 * to do it in the queue of
8397 * the chunk, but that did
8398 * not tell how many times
8401 SCTP_STAT_INCR(sctps_sendecne);
8403 chk->sent = SCTP_DATAGRAM_SENT;
8404 if (chk->whoTo == NULL) {
8406 atomic_add_int(&net->ref_count, 1);
8412 * Ok we are out of room but we can
8413 * output without effecting the
8414 * flight size since this little guy
8415 * is a control only packet.
8418 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8420 * do NOT clear the asconf
8421 * flag as it is used to do
8422 * appropriate source
8423 * address selection.
8427 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8430 /* Only HB or ASCONF advances time */
8432 if (*now_filled == 0) {
8433 (void)SCTP_GETTIME_TIMEVAL(now);
8436 net->last_sent_time = *now;
8439 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8440 (struct sockaddr *)&net->ro._l_addr,
8443 stcb->asoc.authinfo.active_keyid,
8444 no_fragmentflg, 0, asconf,
8445 inp->sctp_lport, stcb->rport,
8446 htonl(stcb->asoc.peer_vtag),
8450 /* error, we could not
8452 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8453 if (from_where == 0) {
8454 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8456 if (error == ENOBUFS) {
8457 asoc->ifp_had_enobuf = 1;
8458 SCTP_STAT_INCR(sctps_lowlevelerr);
8460 if (error == EHOSTUNREACH) {
8466 sctp_move_chunks_from_net(stcb, net);
8471 asoc->ifp_had_enobuf = 0;
8474 * increase the number we sent, if a
8475 * cookie is sent we don't tell them
8478 outchain = endoutchain = NULL;
8482 *num_out += ctl_cnt;
8483 /* recalc a clean slate and setup */
8484 switch (net->ro._l_addr.sa.sa_family) {
8487 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8492 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8509 /* JRI: if dest is in PF state, do not send data to it */
8510 if ((asoc->sctp_cmt_on_off > 0) &&
8511 (net != stcb->asoc.alternate) &&
8512 (net->dest_state & SCTP_ADDR_PF)) {
8515 if (net->flight_size >= net->cwnd) {
8518 if ((asoc->sctp_cmt_on_off > 0) &&
8519 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8520 (net->flight_size > max_rwnd_per_dest)) {
8524 * We need a specific accounting for the usage of the send
8525 * buffer. We also need to check the number of messages per
8526 * net. For now, this is better than nothing and it disabled
8529 if ((asoc->sctp_cmt_on_off > 0) &&
8530 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8531 (max_send_per_dest > 0) &&
8532 (net->flight_size > max_send_per_dest)) {
8535 /*********************/
8536 /* Data transmission */
8537 /*********************/
8539 * if AUTH for DATA is required and no AUTH has been added
8540 * yet, account for this in the mtu now... if no data can be
8541 * bundled, this adjustment won't matter anyways since the
8542 * packet will be going out...
8544 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8545 stcb->asoc.peer_auth_chunks);
8546 if (data_auth_reqd && (auth == NULL)) {
8547 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8549 /* now lets add any data within the MTU constraints */
8550 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8553 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8554 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8561 if (net->mtu > SCTP_MIN_OVERHEAD)
8562 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8572 if ((((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8573 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8574 (skip_data_for_this_net == 0)) ||
8576 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8577 if (no_data_chunks) {
8578 /* let only control go out */
8582 if (net->flight_size >= net->cwnd) {
8583 /* skip this net, no room for data */
8587 if ((chk->whoTo != NULL) &&
8588 (chk->whoTo != net)) {
8589 /* Don't send the chunk on this net */
8592 if (asoc->sctp_cmt_on_off == 0) {
8593 if ((asoc->alternate) &&
8594 (asoc->alternate != net) &&
8595 (chk->whoTo == NULL)) {
8597 } else if ((net != asoc->primary_destination) &&
8598 (asoc->alternate == NULL) &&
8599 (chk->whoTo == NULL)) {
8603 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8605 * strange, we have a chunk that is
8606 * to big for its destination and
8607 * yet no fragment ok flag.
8608 * Something went wrong when the
8609 * PMTU changed...we did not mark
8610 * this chunk for some reason?? I
8611 * will fix it here by letting IP
8612 * fragment it for now and printing
8613 * a warning. This really should not
8616 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8617 chk->send_size, mtu);
8618 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8620 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8621 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8622 struct sctp_data_chunk *dchkh;
8624 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8625 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8627 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8628 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8629 /* ok we will add this one */
8632 * Add an AUTH chunk, if chunk
8633 * requires it, save the offset into
8634 * the chain for AUTH
8636 if (data_auth_reqd) {
8638 outchain = sctp_add_auth_chunk(outchain,
8644 auth_keyid = chk->auth_keyid;
8646 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8647 } else if (override_ok) {
8650 auth_keyid = chk->auth_keyid;
8652 } else if (auth_keyid != chk->auth_keyid) {
8654 * so done bundling */
8658 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8659 chk->send_size, chk->copy_by_ref);
8660 if (outchain == NULL) {
8661 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8662 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8663 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8666 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8669 /* upate our MTU size */
8670 /* Do clear IP_DF ? */
8671 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8674 /* unsigned subtraction of mtu */
8675 if (mtu > chk->send_size)
8676 mtu -= chk->send_size;
8679 /* unsigned subtraction of r_mtu */
8680 if (r_mtu > chk->send_size)
8681 r_mtu -= chk->send_size;
8685 to_out += chk->send_size;
8686 if ((to_out > mx_mtu) && no_fragmentflg) {
8688 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8690 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8694 chk->window_probe = 0;
8695 data_list[bundle_at++] = chk;
8696 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8699 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8700 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8701 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8703 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8705 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8706 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8716 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8718 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8719 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8720 data_list[0]->window_probe = 1;
8721 net->window_probe = 1;
8727 * Must be sent in order of the
8728 * TSN's (on a network)
8732 } /* for (chunk gather loop for this net) */
8733 } /* if asoc.state OPEN */ no_data_fill:
8734 /* Is there something to send for this destination? */
8736 /* We may need to start a control timer or two */
8738 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8741 * do NOT clear the asconf flag as it is
8742 * used to do appropriate source address
8747 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8750 /* must start a send timer if data is being sent */
8751 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8753 * no timer running on this destination
8756 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8758 if (bundle_at || hbflag) {
8759 /* For data/asconf and hb set time */
8760 if (*now_filled == 0) {
8761 (void)SCTP_GETTIME_TIMEVAL(now);
8764 net->last_sent_time = *now;
8766 /* Now send it, if there is anything to send :> */
8767 if ((error = sctp_lowlevel_chunk_output(inp,
8770 (struct sockaddr *)&net->ro._l_addr,
8778 inp->sctp_lport, stcb->rport,
8779 htonl(stcb->asoc.peer_vtag),
8783 /* error, we could not output */
8784 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8785 if (from_where == 0) {
8786 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8788 if (error == ENOBUFS) {
8789 SCTP_STAT_INCR(sctps_lowlevelerr);
8790 asoc->ifp_had_enobuf = 1;
8792 if (error == EHOSTUNREACH) {
8794 * Destination went unreachable
8797 sctp_move_chunks_from_net(stcb, net);
8801 * I add this line to be paranoid. As far as
8802 * I can tell the continue, takes us back to
8803 * the top of the for, but just to make sure
8804 * I will reset these again here.
8806 ctl_cnt = bundle_at = 0;
8807 continue; /* This takes us back to the
8808 * for() for the nets. */
8810 asoc->ifp_had_enobuf = 0;
8816 *num_out += (ctl_cnt + bundle_at);
8819 /* setup for a RTO measurement */
8820 tsns_sent = data_list[0]->rec.data.TSN_seq;
8821 /* fill time if not already filled */
8822 if (*now_filled == 0) {
8823 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8825 *now = asoc->time_last_sent;
8827 asoc->time_last_sent = *now;
8829 if (net->rto_needed) {
8830 data_list[0]->do_rtt = 1;
8831 net->rto_needed = 0;
8833 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8834 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8841 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8844 if (old_start_at == NULL) {
8845 old_start_at = start_at;
8846 start_at = TAILQ_FIRST(&asoc->nets);
8848 goto again_one_more_time;
8851 * At the end there should be no NON timed chunks hanging on this
8854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8855 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8857 if ((*num_out == 0) && (*reason_code == 0)) {
8862 sctp_clean_up_ctl(stcb, asoc, so_locked);
8867 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8870 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8871 * the control chunk queue.
8873 struct sctp_chunkhdr *hdr;
8874 struct sctp_tmit_chunk *chk;
8875 struct mbuf *mat, *last_mbuf;
8876 uint32_t chunk_length;
8877 uint16_t padding_length;
8879 SCTP_TCB_LOCK_ASSERT(stcb);
8880 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8881 if (op_err == NULL) {
8886 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8887 chunk_length += SCTP_BUF_LEN(mat);
8888 if (SCTP_BUF_NEXT(mat) == NULL) {
8892 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8893 sctp_m_freem(op_err);
8896 padding_length = chunk_length % 4;
8897 if (padding_length != 0) {
8898 padding_length = 4 - padding_length;
8900 if (padding_length != 0) {
8901 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8902 sctp_m_freem(op_err);
8906 sctp_alloc_a_chunk(stcb, chk);
8909 sctp_m_freem(op_err);
8912 chk->copy_by_ref = 0;
8913 chk->send_size = (uint16_t) chunk_length;
8914 chk->sent = SCTP_DATAGRAM_UNSENT;
8916 chk->asoc = &stcb->asoc;
8919 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8920 chk->rec.chunk_id.can_take_data = 0;
8921 hdr = mtod(op_err, struct sctp_chunkhdr *);
8922 hdr->chunk_type = SCTP_OPERATION_ERROR;
8923 hdr->chunk_flags = 0;
8924 hdr->chunk_length = htons(chk->send_size);
8925 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8926 chk->asoc->ctrl_queue_cnt++;
8930 sctp_send_cookie_echo(struct mbuf *m,
8932 struct sctp_tcb *stcb,
8933 struct sctp_nets *net)
8936 * pull out the cookie and put it at the front of the control chunk
8940 struct mbuf *cookie;
8941 struct sctp_paramhdr parm, *phdr;
8942 struct sctp_chunkhdr *hdr;
8943 struct sctp_tmit_chunk *chk;
8944 uint16_t ptype, plen;
8946 SCTP_TCB_LOCK_ASSERT(stcb);
8947 /* First find the cookie in the param area */
8949 at = offset + sizeof(struct sctp_init_chunk);
8951 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8955 ptype = ntohs(phdr->param_type);
8956 plen = ntohs(phdr->param_length);
8957 if (ptype == SCTP_STATE_COOKIE) {
8960 /* found the cookie */
8961 if ((pad = (plen % 4))) {
8964 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
8965 if (cookie == NULL) {
8969 #ifdef SCTP_MBUF_LOGGING
8970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8971 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
8976 at += SCTP_SIZE32(plen);
8978 /* ok, we got the cookie lets change it into a cookie echo chunk */
8979 /* first the change from param to cookie */
8980 hdr = mtod(cookie, struct sctp_chunkhdr *);
8981 hdr->chunk_type = SCTP_COOKIE_ECHO;
8982 hdr->chunk_flags = 0;
8983 /* get the chunk stuff now and place it in the FRONT of the queue */
8984 sctp_alloc_a_chunk(stcb, chk);
8987 sctp_m_freem(cookie);
8990 chk->copy_by_ref = 0;
8991 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8992 chk->rec.chunk_id.can_take_data = 0;
8993 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8994 chk->send_size = plen;
8995 chk->sent = SCTP_DATAGRAM_UNSENT;
8997 chk->asoc = &stcb->asoc;
9000 atomic_add_int(&chk->whoTo->ref_count, 1);
9001 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9002 chk->asoc->ctrl_queue_cnt++;
9007 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9011 struct sctp_nets *net)
9014 * take a HB request and make it into a HB ack and send it.
9016 struct mbuf *outchain;
9017 struct sctp_chunkhdr *chdr;
9018 struct sctp_tmit_chunk *chk;
9022 /* must have a net pointer */
9025 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9026 if (outchain == NULL) {
9027 /* gak out of memory */
9030 #ifdef SCTP_MBUF_LOGGING
9031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9032 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9035 chdr = mtod(outchain, struct sctp_chunkhdr *);
9036 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9037 chdr->chunk_flags = 0;
9038 if (chk_length % 4) {
9040 uint32_t cpthis = 0;
9043 padlen = 4 - (chk_length % 4);
9044 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9046 sctp_alloc_a_chunk(stcb, chk);
9049 sctp_m_freem(outchain);
9052 chk->copy_by_ref = 0;
9053 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9054 chk->rec.chunk_id.can_take_data = 1;
9056 chk->send_size = chk_length;
9057 chk->sent = SCTP_DATAGRAM_UNSENT;
9059 chk->asoc = &stcb->asoc;
9060 chk->data = outchain;
9062 atomic_add_int(&chk->whoTo->ref_count, 1);
9063 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9064 chk->asoc->ctrl_queue_cnt++;
9068 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9070 /* formulate and queue a cookie-ack back to sender */
9071 struct mbuf *cookie_ack;
9072 struct sctp_chunkhdr *hdr;
9073 struct sctp_tmit_chunk *chk;
9075 SCTP_TCB_LOCK_ASSERT(stcb);
9077 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9078 if (cookie_ack == NULL) {
9082 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9083 sctp_alloc_a_chunk(stcb, chk);
9086 sctp_m_freem(cookie_ack);
9089 chk->copy_by_ref = 0;
9090 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9091 chk->rec.chunk_id.can_take_data = 1;
9093 chk->send_size = sizeof(struct sctp_chunkhdr);
9094 chk->sent = SCTP_DATAGRAM_UNSENT;
9096 chk->asoc = &stcb->asoc;
9097 chk->data = cookie_ack;
9098 if (chk->asoc->last_control_chunk_from != NULL) {
9099 chk->whoTo = chk->asoc->last_control_chunk_from;
9100 atomic_add_int(&chk->whoTo->ref_count, 1);
9104 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9105 hdr->chunk_type = SCTP_COOKIE_ACK;
9106 hdr->chunk_flags = 0;
9107 hdr->chunk_length = htons(chk->send_size);
9108 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9109 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9110 chk->asoc->ctrl_queue_cnt++;
9116 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9118 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9119 struct mbuf *m_shutdown_ack;
9120 struct sctp_shutdown_ack_chunk *ack_cp;
9121 struct sctp_tmit_chunk *chk;
9123 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9124 if (m_shutdown_ack == NULL) {
9128 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9129 sctp_alloc_a_chunk(stcb, chk);
9132 sctp_m_freem(m_shutdown_ack);
9135 chk->copy_by_ref = 0;
9136 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9137 chk->rec.chunk_id.can_take_data = 1;
9139 chk->send_size = sizeof(struct sctp_chunkhdr);
9140 chk->sent = SCTP_DATAGRAM_UNSENT;
9143 chk->asoc = &stcb->asoc;
9144 chk->data = m_shutdown_ack;
9147 atomic_add_int(&chk->whoTo->ref_count, 1);
9149 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9150 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9151 ack_cp->ch.chunk_flags = 0;
9152 ack_cp->ch.chunk_length = htons(chk->send_size);
9153 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9154 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9155 chk->asoc->ctrl_queue_cnt++;
9160 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9162 /* formulate and queue a SHUTDOWN to the sender */
9163 struct mbuf *m_shutdown;
9164 struct sctp_shutdown_chunk *shutdown_cp;
9165 struct sctp_tmit_chunk *chk;
9167 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9168 if (m_shutdown == NULL) {
9172 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9173 sctp_alloc_a_chunk(stcb, chk);
9176 sctp_m_freem(m_shutdown);
9179 chk->copy_by_ref = 0;
9180 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9181 chk->rec.chunk_id.can_take_data = 1;
9183 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9184 chk->sent = SCTP_DATAGRAM_UNSENT;
9187 chk->asoc = &stcb->asoc;
9188 chk->data = m_shutdown;
9191 atomic_add_int(&chk->whoTo->ref_count, 1);
9193 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9194 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9195 shutdown_cp->ch.chunk_flags = 0;
9196 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9197 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9198 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9199 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9200 chk->asoc->ctrl_queue_cnt++;
9205 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9208 * formulate and queue an ASCONF to the peer. ASCONF parameters
9209 * should be queued on the assoc queue.
9211 struct sctp_tmit_chunk *chk;
9212 struct mbuf *m_asconf;
9215 SCTP_TCB_LOCK_ASSERT(stcb);
9217 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9218 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9219 /* can't send a new one if there is one in flight already */
9222 /* compose an ASCONF chunk, maximum length is PMTU */
9223 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9224 if (m_asconf == NULL) {
9227 sctp_alloc_a_chunk(stcb, chk);
9230 sctp_m_freem(m_asconf);
9233 chk->copy_by_ref = 0;
9234 chk->rec.chunk_id.id = SCTP_ASCONF;
9235 chk->rec.chunk_id.can_take_data = 0;
9236 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9237 chk->data = m_asconf;
9238 chk->send_size = len;
9239 chk->sent = SCTP_DATAGRAM_UNSENT;
9241 chk->asoc = &stcb->asoc;
9244 atomic_add_int(&chk->whoTo->ref_count, 1);
9246 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9247 chk->asoc->ctrl_queue_cnt++;
9252 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9255 * formulate and queue a asconf-ack back to sender. the asconf-ack
9256 * must be stored in the tcb.
9258 struct sctp_tmit_chunk *chk;
9259 struct sctp_asconf_ack *ack, *latest_ack;
9261 struct sctp_nets *net = NULL;
9263 SCTP_TCB_LOCK_ASSERT(stcb);
9264 /* Get the latest ASCONF-ACK */
9265 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9266 if (latest_ack == NULL) {
9269 if (latest_ack->last_sent_to != NULL &&
9270 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9271 /* we're doing a retransmission */
9272 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9275 if (stcb->asoc.last_control_chunk_from == NULL) {
9276 if (stcb->asoc.alternate) {
9277 net = stcb->asoc.alternate;
9279 net = stcb->asoc.primary_destination;
9282 net = stcb->asoc.last_control_chunk_from;
9287 if (stcb->asoc.last_control_chunk_from == NULL) {
9288 if (stcb->asoc.alternate) {
9289 net = stcb->asoc.alternate;
9291 net = stcb->asoc.primary_destination;
9294 net = stcb->asoc.last_control_chunk_from;
9297 latest_ack->last_sent_to = net;
9299 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9300 if (ack->data == NULL) {
9303 /* copy the asconf_ack */
9304 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9305 if (m_ack == NULL) {
9306 /* couldn't copy it */
9309 #ifdef SCTP_MBUF_LOGGING
9310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9311 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9315 sctp_alloc_a_chunk(stcb, chk);
9319 sctp_m_freem(m_ack);
9322 chk->copy_by_ref = 0;
9323 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9324 chk->rec.chunk_id.can_take_data = 1;
9325 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9328 atomic_add_int(&chk->whoTo->ref_count, 1);
9331 chk->send_size = ack->len;
9332 chk->sent = SCTP_DATAGRAM_UNSENT;
9334 chk->asoc = &stcb->asoc;
9336 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9337 chk->asoc->ctrl_queue_cnt++;
9344 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9345 struct sctp_tcb *stcb,
9346 struct sctp_association *asoc,
9347 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9348 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9354 * send out one MTU of retransmission. If fast_retransmit is
9355 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9356 * rwnd. For a Cookie or Asconf in the control chunk queue we
9357 * retransmit them by themselves.
9359 * For data chunks we will pick out the lowest TSN's in the sent_queue
9360 * marked for resend and bundle them all together (up to a MTU of
9361 * destination). The address to send to should have been
9362 * selected/changed where the retransmission was marked (i.e. in FR
9363 * or t3-timeout routines).
9365 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9366 struct sctp_tmit_chunk *chk, *fwd;
9367 struct mbuf *m, *endofchain;
9368 struct sctp_nets *net = NULL;
9369 uint32_t tsns_sent = 0;
9370 int no_fragmentflg, bundle_at, cnt_thru;
9372 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9373 struct sctp_auth_chunk *auth = NULL;
9374 uint32_t auth_offset = 0;
9375 uint16_t auth_keyid;
9376 int override_ok = 1;
9377 int data_auth_reqd = 0;
9380 SCTP_TCB_LOCK_ASSERT(stcb);
9381 tmr_started = ctl_cnt = bundle_at = error = 0;
9386 endofchain = m = NULL;
9387 auth_keyid = stcb->asoc.authinfo.active_keyid;
9388 #ifdef SCTP_AUDITING_ENABLED
9389 sctp_audit_log(0xC3, 1);
9391 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9392 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9393 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9394 asoc->sent_queue_retran_cnt);
9395 asoc->sent_queue_cnt = 0;
9396 asoc->sent_queue_cnt_removeable = 0;
9397 /* send back 0/0 so we enter normal transmission */
9401 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9402 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9403 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9404 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9405 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9408 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9409 if (chk != asoc->str_reset) {
9411 * not eligible for retran if its
9418 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9422 * Add an AUTH chunk, if chunk requires it save the
9423 * offset into the chain for AUTH
9425 if ((auth == NULL) &&
9426 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9427 stcb->asoc.peer_auth_chunks))) {
9428 m = sctp_add_auth_chunk(m, &endofchain,
9429 &auth, &auth_offset,
9431 chk->rec.chunk_id.id);
9432 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9434 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9440 /* do we have control chunks to retransmit? */
9442 /* Start a timer no matter if we succeed or fail */
9443 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9444 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9445 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9446 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9447 chk->snd_count++; /* update our count */
9448 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9449 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9450 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9451 no_fragmentflg, 0, 0,
9452 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9453 chk->whoTo->port, NULL,
9456 SCTP_STAT_INCR(sctps_lowlevelerr);
9463 * We don't want to mark the net->sent time here since this
9464 * we use this for HB and retrans cannot measure RTT
9466 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9468 chk->sent = SCTP_DATAGRAM_SENT;
9469 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9473 /* Clean up the fwd-tsn list */
9474 sctp_clean_up_ctl(stcb, asoc, so_locked);
9479 * Ok, it is just data retransmission we need to do or that and a
9480 * fwd-tsn with it all.
9482 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9483 return (SCTP_RETRAN_DONE);
9485 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9486 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9487 /* not yet open, resend the cookie and that is it */
9490 #ifdef SCTP_AUDITING_ENABLED
9491 sctp_auditing(20, inp, stcb, NULL);
9493 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9494 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9495 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9496 /* No, not sent to this net or not ready for rtx */
9499 if (chk->data == NULL) {
9500 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9501 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9504 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9505 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9506 struct mbuf *op_err;
9507 char msg[SCTP_DIAG_INFO_LEN];
9509 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9510 chk->rec.data.TSN_seq, chk->snd_count);
9511 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9513 atomic_add_int(&stcb->asoc.refcnt, 1);
9514 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9516 SCTP_TCB_LOCK(stcb);
9517 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9518 return (SCTP_RETRAN_EXIT);
9520 /* pick up the net */
9522 switch (net->ro._l_addr.sa.sa_family) {
9525 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9530 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9539 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9540 /* No room in peers rwnd */
9543 tsn = asoc->last_acked_seq + 1;
9544 if (tsn == chk->rec.data.TSN_seq) {
9546 * we make a special exception for this
9547 * case. The peer has no rwnd but is missing
9548 * the lowest chunk.. which is probably what
9549 * is holding up the rwnd.
9551 goto one_chunk_around;
9556 if (asoc->peers_rwnd < mtu) {
9558 if ((asoc->peers_rwnd == 0) &&
9559 (asoc->total_flight == 0)) {
9560 chk->window_probe = 1;
9561 chk->whoTo->window_probe = 1;
9564 #ifdef SCTP_AUDITING_ENABLED
9565 sctp_audit_log(0xC3, 2);
9569 net->fast_retran_ip = 0;
9570 if (chk->rec.data.doing_fast_retransmit == 0) {
9572 * if no FR in progress skip destination that have
9573 * flight_size > cwnd.
9575 if (net->flight_size >= net->cwnd) {
9580 * Mark the destination net to have FR recovery
9584 net->fast_retran_ip = 1;
9588 * if no AUTH is yet included and this chunk requires it,
9589 * make sure to account for it. We don't apply the size
9590 * until the AUTH chunk is actually added below in case
9591 * there is no room for this chunk.
9593 if (data_auth_reqd && (auth == NULL)) {
9594 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9598 if ((chk->send_size <= (mtu - dmtu)) ||
9599 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9600 /* ok we will add this one */
9601 if (data_auth_reqd) {
9603 m = sctp_add_auth_chunk(m,
9609 auth_keyid = chk->auth_keyid;
9611 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9612 } else if (override_ok) {
9613 auth_keyid = chk->auth_keyid;
9615 } else if (chk->auth_keyid != auth_keyid) {
9616 /* different keyid, so done bundling */
9620 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9622 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9625 /* Do clear IP_DF ? */
9626 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9629 /* upate our MTU size */
9630 if (mtu > (chk->send_size + dmtu))
9631 mtu -= (chk->send_size + dmtu);
9634 data_list[bundle_at++] = chk;
9635 if (one_chunk && (asoc->total_flight <= 0)) {
9636 SCTP_STAT_INCR(sctps_windowprobed);
9639 if (one_chunk == 0) {
9641 * now are there anymore forward from chk to pick
9644 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9645 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9646 /* Nope, not for retran */
9649 if (fwd->whoTo != net) {
9650 /* Nope, not the net in question */
9653 if (data_auth_reqd && (auth == NULL)) {
9654 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9657 if (fwd->send_size <= (mtu - dmtu)) {
9658 if (data_auth_reqd) {
9660 m = sctp_add_auth_chunk(m,
9666 auth_keyid = fwd->auth_keyid;
9668 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9669 } else if (override_ok) {
9670 auth_keyid = fwd->auth_keyid;
9672 } else if (fwd->auth_keyid != auth_keyid) {
9674 * so done bundling */
9678 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9680 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9683 /* Do clear IP_DF ? */
9684 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9687 /* upate our MTU size */
9688 if (mtu > (fwd->send_size + dmtu))
9689 mtu -= (fwd->send_size + dmtu);
9692 data_list[bundle_at++] = fwd;
9693 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9697 /* can't fit so we are done */
9702 /* Is there something to send for this destination? */
9705 * No matter if we fail/or succeed we should start a
9706 * timer. A failure is like a lost IP packet :-)
9708 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9710 * no timer running on this destination
9713 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9716 /* Now lets send it, if there is anything to send :> */
9717 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9718 (struct sockaddr *)&net->ro._l_addr, m,
9719 auth_offset, auth, auth_keyid,
9720 no_fragmentflg, 0, 0,
9721 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9725 /* error, we could not output */
9726 SCTP_STAT_INCR(sctps_lowlevelerr);
9734 * We don't want to mark the net->sent time here
9735 * since this we use this for HB and retrans cannot
9738 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9740 /* For auto-close */
9742 if (*now_filled == 0) {
9743 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9744 *now = asoc->time_last_sent;
9747 asoc->time_last_sent = *now;
9749 *cnt_out += bundle_at;
9750 #ifdef SCTP_AUDITING_ENABLED
9751 sctp_audit_log(0xC4, bundle_at);
9754 tsns_sent = data_list[0]->rec.data.TSN_seq;
9756 for (i = 0; i < bundle_at; i++) {
9757 SCTP_STAT_INCR(sctps_sendretransdata);
9758 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9760 * When we have a revoked data, and we
9761 * retransmit it, then we clear the revoked
9762 * flag since this flag dictates if we
9763 * subtracted from the fs
9765 if (data_list[i]->rec.data.chunk_was_revoked) {
9766 /* Deflate the cwnd */
9767 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9768 data_list[i]->rec.data.chunk_was_revoked = 0;
9770 data_list[i]->snd_count++;
9771 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9772 /* record the time */
9773 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9774 if (data_list[i]->book_size_scale) {
9776 * need to double the book size on
9779 data_list[i]->book_size_scale = 0;
9781 * Since we double the booksize, we
9782 * must also double the output queue
9783 * size, since this get shrunk when
9784 * we free by this amount.
9786 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9787 data_list[i]->book_size *= 2;
9791 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9792 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9793 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9795 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9796 (uint32_t) (data_list[i]->send_size +
9797 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9800 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9801 data_list[i]->whoTo->flight_size,
9802 data_list[i]->book_size,
9803 (uint32_t) (uintptr_t) data_list[i]->whoTo,
9804 data_list[i]->rec.data.TSN_seq);
9806 sctp_flight_size_increase(data_list[i]);
9807 sctp_total_flight_increase(stcb, data_list[i]);
9808 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9809 /* SWS sender side engages */
9810 asoc->peers_rwnd = 0;
9813 (data_list[i]->rec.data.doing_fast_retransmit)) {
9814 SCTP_STAT_INCR(sctps_sendfastretrans);
9815 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9816 (tmr_started == 0)) {
9818 * ok we just fast-retrans'd
9819 * the lowest TSN, i.e the
9820 * first on the list. In
9821 * this case we want to give
9822 * some more time to get a
9823 * SACK back without a
9826 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9827 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
9828 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9833 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9835 #ifdef SCTP_AUDITING_ENABLED
9836 sctp_auditing(21, inp, stcb, NULL);
9842 if (asoc->sent_queue_retran_cnt <= 0) {
9843 /* all done we have no more to retran */
9844 asoc->sent_queue_retran_cnt = 0;
9848 /* No more room in rwnd */
9851 /* stop the for loop here. we sent out a packet */
9858 sctp_timer_validation(struct sctp_inpcb *inp,
9859 struct sctp_tcb *stcb,
9860 struct sctp_association *asoc)
9862 struct sctp_nets *net;
9864 /* Validate that a timer is running somewhere */
9865 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9866 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9867 /* Here is a timer */
9871 SCTP_TCB_LOCK_ASSERT(stcb);
9872 /* Gak, we did not have a timer somewhere */
9873 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9874 if (asoc->alternate) {
9875 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9877 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9883 sctp_chunk_output(struct sctp_inpcb *inp,
9884 struct sctp_tcb *stcb,
9887 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9893 * Ok this is the generic chunk service queue. we must do the
9895 * - See if there are retransmits pending, if so we must
9897 * - Service the stream queue that is next, moving any
9898 * message (note I must get a complete message i.e.
9899 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9901 * - Check to see if the cwnd/rwnd allows any output, if so we
9902 * go ahead and fomulate and send the low level chunks. Making sure
9903 * to combine any control in the control chunk queue also.
9905 struct sctp_association *asoc;
9906 struct sctp_nets *net;
9907 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
9908 unsigned int burst_cnt = 0;
9912 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9915 unsigned int tot_frs = 0;
9919 /* The Nagle algorithm is only applied when handling a send call. */
9920 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9921 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9929 SCTP_TCB_LOCK_ASSERT(stcb);
9931 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9933 if ((un_sent <= 0) &&
9934 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9935 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9936 (asoc->sent_queue_retran_cnt == 0) &&
9937 (asoc->trigger_reset == 0)) {
9938 /* Nothing to do unless there is something to be sent left */
9942 * Do we have something to send, data or control AND a sack timer
9943 * running, if so piggy-back the sack.
9945 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9946 sctp_send_sack(stcb, so_locked);
9947 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9949 while (asoc->sent_queue_retran_cnt) {
9951 * Ok, it is retransmission time only, we send out only ONE
9952 * packet with a single call off to the retran code.
9954 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9956 * Special hook for handling cookiess discarded
9957 * by peer that carried data. Send cookie-ack only
9958 * and then the next call with get the retran's.
9960 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9962 &now, &now_filled, frag_point, so_locked);
9964 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9965 /* if its not from a HB then do it */
9967 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9973 * its from any other place, we don't allow retran
9974 * output (only control)
9979 /* Can't send anymore */
9981 * now lets push out control by calling med-level
9982 * output once. this assures that we WILL send HB's
9985 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9987 &now, &now_filled, frag_point, so_locked);
9988 #ifdef SCTP_AUDITING_ENABLED
9989 sctp_auditing(8, inp, stcb, NULL);
9991 sctp_timer_validation(inp, stcb, asoc);
9996 * The count was off.. retran is not happening so do
9997 * the normal retransmission.
9999 #ifdef SCTP_AUDITING_ENABLED
10000 sctp_auditing(9, inp, stcb, NULL);
10002 if (ret == SCTP_RETRAN_EXIT) {
10007 if (from_where == SCTP_OUTPUT_FROM_T3) {
10008 /* Only one transmission allowed out of a timeout */
10009 #ifdef SCTP_AUDITING_ENABLED
10010 sctp_auditing(10, inp, stcb, NULL);
10012 /* Push out any control */
10013 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10014 &now, &now_filled, frag_point, so_locked);
10017 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10018 /* Hit FR burst limit */
10021 if ((num_out == 0) && (ret == 0)) {
10022 /* No more retrans to send */
10026 #ifdef SCTP_AUDITING_ENABLED
10027 sctp_auditing(12, inp, stcb, NULL);
10029 /* Check for bad destinations, if they exist move chunks around. */
10030 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10031 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10033 * if possible move things off of this address we
10034 * still may send below due to the dormant state but
10035 * we try to find an alternate address to send to
10036 * and if we have one we move all queued data on the
10037 * out wheel to this alternate address.
10039 if (net->ref_count > 1)
10040 sctp_move_chunks_from_net(stcb, net);
10043 * if ((asoc->sat_network) || (net->addr_is_local))
10044 * { burst_limit = asoc->max_burst *
10045 * SCTP_SAT_NETWORK_BURST_INCR; }
10047 if (asoc->max_burst > 0) {
10048 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10049 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10050 /* JRS - Use the congestion
10051 * control given in the
10052 * congestion control module */
10053 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10055 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10057 SCTP_STAT_INCR(sctps_maxburstqueued);
10059 net->fast_retran_ip = 0;
10061 if (net->flight_size == 0) {
10062 /* Should be decaying the
10073 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10074 &reason_code, 0, from_where,
10075 &now, &now_filled, frag_point, so_locked);
10077 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10078 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10079 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10081 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10082 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10083 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10087 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10089 tot_out += num_out;
10091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10092 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10093 if (num_out == 0) {
10094 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10099 * When the Nagle algorithm is used, look at how
10100 * much is unsent, then if its smaller than an MTU
10101 * and we have data in flight we stop, except if we
10102 * are handling a fragmented user message.
10104 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10105 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10106 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10107 (stcb->asoc.total_flight > 0)) {
10108 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10112 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10113 TAILQ_EMPTY(&asoc->send_queue) &&
10114 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10115 /* Nothing left to send */
10118 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10119 /* Nothing left to send */
10122 } while (num_out &&
10123 ((asoc->max_burst == 0) ||
10124 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10125 (burst_cnt < asoc->max_burst)));
10127 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10128 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10129 SCTP_STAT_INCR(sctps_maxburstqueued);
10130 asoc->burst_limit_applied = 1;
10131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10132 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10135 asoc->burst_limit_applied = 0;
10138 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10139 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10141 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10145 * Now we need to clean up the control chunk chain if a ECNE is on
10146 * it. It must be marked as UNSENT again so next call will continue
10147 * to send it until such time that we get a CWR, to remove it.
10149 if (stcb->asoc.ecn_echo_cnt_onq)
10150 sctp_fix_ecn_echo(asoc);
10152 if (stcb->asoc.trigger_reset) {
10153 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10163 struct sctp_inpcb *inp,
10165 struct sockaddr *addr,
10166 struct mbuf *control,
10171 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10174 if (inp->sctp_socket == NULL) {
10175 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10178 return (sctp_sosend(inp->sctp_socket,
10180 (struct uio *)NULL,
10188 send_forward_tsn(struct sctp_tcb *stcb,
10189 struct sctp_association *asoc)
10191 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10192 struct sctp_forward_tsn_chunk *fwdtsn;
10193 struct sctp_strseq *strseq;
10194 struct sctp_strseq_mid *strseq_m;
10195 uint32_t advance_peer_ack_point;
10196 unsigned int cnt_of_space, i, ovh;
10197 unsigned int space_needed;
10198 unsigned int cnt_of_skipped = 0;
10201 if (asoc->idata_supported) {
10206 SCTP_TCB_LOCK_ASSERT(stcb);
10207 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10208 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10209 /* mark it to unsent */
10210 chk->sent = SCTP_DATAGRAM_UNSENT;
10211 chk->snd_count = 0;
10212 /* Do we correct its output location? */
10214 sctp_free_remote_addr(chk->whoTo);
10217 goto sctp_fill_in_rest;
10220 /* Ok if we reach here we must build one */
10221 sctp_alloc_a_chunk(stcb, chk);
10225 asoc->fwd_tsn_cnt++;
10226 chk->copy_by_ref = 0;
10228 * We don't do the old thing here since this is used not for on-wire
10229 * but to tell if we are sending a fwd-tsn by the stack during
10230 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10232 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10233 chk->rec.chunk_id.can_take_data = 0;
10237 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10238 if (chk->data == NULL) {
10239 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10242 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10243 chk->sent = SCTP_DATAGRAM_UNSENT;
10244 chk->snd_count = 0;
10245 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10246 asoc->ctrl_queue_cnt++;
10249 * Here we go through and fill out the part that deals with
10250 * stream/seq of the ones we skip.
10252 SCTP_BUF_LEN(chk->data) = 0;
10253 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10254 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10255 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10256 /* no more to look at */
10259 if (old && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10260 /* We don't report these */
10266 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10267 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10269 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10270 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10272 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10274 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10275 ovh = SCTP_MIN_OVERHEAD;
10277 ovh = SCTP_MIN_V4_OVERHEAD;
10279 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10280 /* trim to a mtu size */
10281 cnt_of_space = asoc->smallest_mtu - ovh;
10283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10284 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10285 0xff, 0, cnt_of_skipped,
10286 asoc->advanced_peer_ack_point);
10288 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10289 if (cnt_of_space < space_needed) {
10291 * ok we must trim down the chunk by lowering the
10292 * advance peer ack point.
10294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10295 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10296 0xff, 0xff, cnt_of_space,
10300 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10301 cnt_of_skipped /= sizeof(struct sctp_strseq);
10303 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10304 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10307 * Go through and find the TSN that will be the one
10310 at = TAILQ_FIRST(&asoc->sent_queue);
10312 for (i = 0; i < cnt_of_skipped; i++) {
10313 tp1 = TAILQ_NEXT(at, sctp_next);
10320 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10321 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10322 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10323 asoc->advanced_peer_ack_point);
10327 * last now points to last one I can report, update
10331 advance_peer_ack_point = last->rec.data.TSN_seq;
10334 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10335 cnt_of_skipped * sizeof(struct sctp_strseq);
10337 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10338 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10341 chk->send_size = space_needed;
10342 /* Setup the chunk */
10343 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10344 fwdtsn->ch.chunk_length = htons(chk->send_size);
10345 fwdtsn->ch.chunk_flags = 0;
10347 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10349 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10351 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10352 SCTP_BUF_LEN(chk->data) = chk->send_size;
10355 * Move pointer to after the fwdtsn and transfer to the
10359 strseq = (struct sctp_strseq *)fwdtsn;
10361 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10364 * Now populate the strseq list. This is done blindly
10365 * without pulling out duplicate stream info. This is
10366 * inefficent but won't harm the process since the peer will
10367 * look at these in sequence and will thus release anything.
10368 * It could mean we exceed the PMTU and chop off some that
10369 * we could have included.. but this is unlikely (aka 1432/4
10370 * would mean 300+ stream seq's would have to be reported in
10371 * one FWD-TSN. With a bit of work we can later FIX this to
10372 * optimize and pull out duplicates.. but it does add more
10373 * overhead. So for now... not!
10376 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10377 if (i >= cnt_of_skipped) {
10380 if (old && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10381 /* We don't report these */
10384 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10385 at->rec.data.fwd_tsn_cnt = 0;
10388 strseq->stream = htons(at->rec.data.stream_number);
10389 strseq->sequence = htons((uint16_t) at->rec.data.stream_seq);
10392 strseq_m->stream = htons(at->rec.data.stream_number);
10393 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10394 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
10396 strseq_m->flags = 0;
10398 strseq_m->msg_id = htonl(at->rec.data.stream_seq);
10407 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10408 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10414 * Queue up a SACK or NR-SACK in the control queue.
10415 * We must first check to see if a SACK or NR-SACK is
10416 * somehow on the control queue.
10417 * If so, we will take and and remove the old one.
10419 struct sctp_association *asoc;
10420 struct sctp_tmit_chunk *chk, *a_chk;
10421 struct sctp_sack_chunk *sack;
10422 struct sctp_nr_sack_chunk *nr_sack;
10423 struct sctp_gap_ack_block *gap_descriptor;
10424 const struct sack_track *selector;
10429 int limit_reached = 0;
10430 unsigned int i, siz, j;
10431 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10434 uint32_t highest_tsn;
10439 if (stcb->asoc.nrsack_supported == 1) {
10440 type = SCTP_NR_SELECTIVE_ACK;
10442 type = SCTP_SELECTIVE_ACK;
10445 asoc = &stcb->asoc;
10446 SCTP_TCB_LOCK_ASSERT(stcb);
10447 if (asoc->last_data_chunk_from == NULL) {
10448 /* Hmm we never received anything */
10451 sctp_slide_mapping_arrays(stcb);
10452 sctp_set_rwnd(stcb, asoc);
10453 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10454 if (chk->rec.chunk_id.id == type) {
10455 /* Hmm, found a sack already on queue, remove it */
10456 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10457 asoc->ctrl_queue_cnt--;
10460 sctp_m_freem(a_chk->data);
10461 a_chk->data = NULL;
10463 if (a_chk->whoTo) {
10464 sctp_free_remote_addr(a_chk->whoTo);
10465 a_chk->whoTo = NULL;
10470 if (a_chk == NULL) {
10471 sctp_alloc_a_chunk(stcb, a_chk);
10472 if (a_chk == NULL) {
10473 /* No memory so we drop the idea, and set a timer */
10474 if (stcb->asoc.delayed_ack) {
10475 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10476 stcb->sctp_ep, stcb, NULL,
10477 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10478 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10479 stcb->sctp_ep, stcb, NULL);
10481 stcb->asoc.send_sack = 1;
10485 a_chk->copy_by_ref = 0;
10486 a_chk->rec.chunk_id.id = type;
10487 a_chk->rec.chunk_id.can_take_data = 1;
10489 /* Clear our pkt counts */
10490 asoc->data_pkts_seen = 0;
10493 a_chk->asoc = asoc;
10494 a_chk->snd_count = 0;
10495 a_chk->send_size = 0; /* fill in later */
10496 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10497 a_chk->whoTo = NULL;
10499 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
10501 * Ok, the destination for the SACK is unreachable, lets see if
10502 * we can select an alternate to asoc->last_data_chunk_from
10504 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10505 if (a_chk->whoTo == NULL) {
10506 /* Nope, no alternate */
10507 a_chk->whoTo = asoc->last_data_chunk_from;
10510 a_chk->whoTo = asoc->last_data_chunk_from;
10512 if (a_chk->whoTo) {
10513 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10515 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10516 highest_tsn = asoc->highest_tsn_inside_map;
10518 highest_tsn = asoc->highest_tsn_inside_nr_map;
10520 if (highest_tsn == asoc->cumulative_tsn) {
10522 if (type == SCTP_SELECTIVE_ACK) {
10523 space_req = sizeof(struct sctp_sack_chunk);
10525 space_req = sizeof(struct sctp_nr_sack_chunk);
10528 /* gaps get a cluster */
10529 space_req = MCLBYTES;
10531 /* Ok now lets formulate a MBUF with our sack */
10532 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10533 if ((a_chk->data == NULL) ||
10534 (a_chk->whoTo == NULL)) {
10535 /* rats, no mbuf memory */
10537 /* was a problem with the destination */
10538 sctp_m_freem(a_chk->data);
10539 a_chk->data = NULL;
10541 sctp_free_a_chunk(stcb, a_chk, so_locked);
10542 /* sa_ignore NO_NULL_CHK */
10543 if (stcb->asoc.delayed_ack) {
10544 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10545 stcb->sctp_ep, stcb, NULL,
10546 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
10547 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10548 stcb->sctp_ep, stcb, NULL);
10550 stcb->asoc.send_sack = 1;
10554 /* ok, lets go through and fill it in */
10555 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10556 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10557 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10558 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10560 limit = mtod(a_chk->data, caddr_t);
10565 if ((asoc->sctp_cmt_on_off > 0) &&
10566 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10568 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10569 * received, then set high bit to 1, else 0. Reset
10572 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10573 asoc->cmt_dac_pkts_rcvd = 0;
10575 #ifdef SCTP_ASOCLOG_OF_TSNS
10576 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10577 stcb->asoc.cumack_log_atsnt++;
10578 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10579 stcb->asoc.cumack_log_atsnt = 0;
10582 /* reset the readers interpretation */
10583 stcb->freed_by_sorcv_sincelast = 0;
10585 if (type == SCTP_SELECTIVE_ACK) {
10586 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10588 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10589 if (highest_tsn > asoc->mapping_array_base_tsn) {
10590 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10592 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10596 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10597 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10598 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10599 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10601 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10605 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10608 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10610 if (((type == SCTP_SELECTIVE_ACK) &&
10611 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10612 ((type == SCTP_NR_SELECTIVE_ACK) &&
10613 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10614 /* we have a gap .. maybe */
10615 for (i = 0; i < siz; i++) {
10616 tsn_map = asoc->mapping_array[i];
10617 if (type == SCTP_SELECTIVE_ACK) {
10618 tsn_map |= asoc->nr_mapping_array[i];
10622 * Clear all bits corresponding to TSNs
10623 * smaller or equal to the cumulative TSN.
10625 tsn_map &= (~0U << (1 - offset));
10627 selector = &sack_array[tsn_map];
10628 if (mergeable && selector->right_edge) {
10630 * Backup, left and right edges were ok to
10636 if (selector->num_entries == 0)
10639 for (j = 0; j < selector->num_entries; j++) {
10640 if (mergeable && selector->right_edge) {
10642 * do a merge by NOT setting
10648 * no merge, set the left
10652 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10654 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10657 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10663 if (selector->left_edge) {
10667 if (limit_reached) {
10668 /* Reached the limit stop */
10674 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10675 (limit_reached == 0)) {
10679 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10680 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10682 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10685 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10688 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10690 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10691 /* we have a gap .. maybe */
10692 for (i = 0; i < siz; i++) {
10693 tsn_map = asoc->nr_mapping_array[i];
10696 * Clear all bits corresponding to
10697 * TSNs smaller or equal to the
10700 tsn_map &= (~0U << (1 - offset));
10702 selector = &sack_array[tsn_map];
10703 if (mergeable && selector->right_edge) {
10705 * Backup, left and right edges were
10708 num_nr_gap_blocks--;
10711 if (selector->num_entries == 0)
10714 for (j = 0; j < selector->num_entries; j++) {
10715 if (mergeable && selector->right_edge) {
10717 * do a merge by NOT
10724 * no merge, set the
10728 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10730 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10731 num_nr_gap_blocks++;
10733 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10739 if (selector->left_edge) {
10743 if (limit_reached) {
10744 /* Reached the limit stop */
10751 /* now we must add any dups we are going to report. */
10752 if ((limit_reached == 0) && (asoc->numduptsns)) {
10753 dup = (uint32_t *) gap_descriptor;
10754 for (i = 0; i < asoc->numduptsns; i++) {
10755 *dup = htonl(asoc->dup_tsns[i]);
10758 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10763 asoc->numduptsns = 0;
10766 * now that the chunk is prepared queue it to the control chunk
10769 if (type == SCTP_SELECTIVE_ACK) {
10770 a_chk->send_size = (uint16_t) (sizeof(struct sctp_sack_chunk) +
10771 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10772 num_dups * sizeof(int32_t));
10773 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10774 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10775 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10776 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10777 sack->sack.num_dup_tsns = htons(num_dups);
10778 sack->ch.chunk_type = type;
10779 sack->ch.chunk_flags = flags;
10780 sack->ch.chunk_length = htons(a_chk->send_size);
10782 a_chk->send_size = (uint16_t) (sizeof(struct sctp_nr_sack_chunk) +
10783 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10784 num_dups * sizeof(int32_t));
10785 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10786 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10787 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10788 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10789 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10790 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10791 nr_sack->nr_sack.reserved = 0;
10792 nr_sack->ch.chunk_type = type;
10793 nr_sack->ch.chunk_flags = flags;
10794 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10796 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10797 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10798 asoc->ctrl_queue_cnt++;
10799 asoc->send_sack = 0;
10800 SCTP_STAT_INCR(sctps_sendsacks);
10805 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10806 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10811 struct mbuf *m_abort, *m, *m_last;
10812 struct mbuf *m_out, *m_end = NULL;
10813 struct sctp_abort_chunk *abort;
10814 struct sctp_auth_chunk *auth = NULL;
10815 struct sctp_nets *net;
10817 uint32_t auth_offset = 0;
10818 uint16_t cause_len, chunk_len, padding_len;
10820 SCTP_TCB_LOCK_ASSERT(stcb);
10822 * Add an AUTH chunk, if chunk requires it and save the offset into
10823 * the chain for AUTH
10825 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10826 stcb->asoc.peer_auth_chunks)) {
10827 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10828 stcb, SCTP_ABORT_ASSOCIATION);
10829 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10833 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10834 if (m_abort == NULL) {
10836 sctp_m_freem(m_out);
10839 sctp_m_freem(operr);
10843 /* link in any error */
10844 SCTP_BUF_NEXT(m_abort) = operr;
10847 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10848 cause_len += (uint16_t) SCTP_BUF_LEN(m);
10849 if (SCTP_BUF_NEXT(m) == NULL) {
10853 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10854 chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
10855 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10856 if (m_out == NULL) {
10857 /* NO Auth chunk prepended, so reserve space in front */
10858 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10861 /* Put AUTH chunk at the front of the chain */
10862 SCTP_BUF_NEXT(m_end) = m_abort;
10864 if (stcb->asoc.alternate) {
10865 net = stcb->asoc.alternate;
10867 net = stcb->asoc.primary_destination;
10869 /* Fill in the ABORT chunk header. */
10870 abort = mtod(m_abort, struct sctp_abort_chunk *);
10871 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10872 if (stcb->asoc.peer_vtag == 0) {
10873 /* This happens iff the assoc is in COOKIE-WAIT state. */
10874 vtag = stcb->asoc.my_vtag;
10875 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10877 vtag = stcb->asoc.peer_vtag;
10878 abort->ch.chunk_flags = 0;
10880 abort->ch.chunk_length = htons(chunk_len);
10881 /* Add padding, if necessary. */
10882 if (padding_len > 0) {
10883 if ((m_last == NULL) ||
10884 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
10885 sctp_m_freem(m_out);
10889 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10890 (struct sockaddr *)&net->ro._l_addr,
10891 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10892 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10893 stcb->asoc.primary_destination->port, NULL,
10896 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10900 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10901 struct sctp_nets *net,
10904 /* formulate and SEND a SHUTDOWN-COMPLETE */
10905 struct mbuf *m_shutdown_comp;
10906 struct sctp_shutdown_complete_chunk *shutdown_complete;
10910 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
10911 if (m_shutdown_comp == NULL) {
10915 if (reflect_vtag) {
10916 flags = SCTP_HAD_NO_TCB;
10917 vtag = stcb->asoc.my_vtag;
10920 vtag = stcb->asoc.peer_vtag;
10922 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10923 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10924 shutdown_complete->ch.chunk_flags = flags;
10925 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10926 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10927 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10928 (struct sockaddr *)&net->ro._l_addr,
10929 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10930 stcb->sctp_ep->sctp_lport, stcb->rport,
10934 SCTP_SO_NOT_LOCKED);
10935 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10940 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
10941 struct sctphdr *sh, uint32_t vtag,
10942 uint8_t type, struct mbuf *cause,
10943 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
10944 uint32_t vrf_id, uint16_t port)
10946 struct mbuf *o_pak;
10948 struct sctphdr *shout;
10949 struct sctp_chunkhdr *ch;
10950 #if defined(INET) || defined(INET6)
10951 struct udphdr *udp;
10954 int len, cause_len, padding_len;
10956 struct sockaddr_in *src_sin, *dst_sin;
10960 struct sockaddr_in6 *src_sin6, *dst_sin6;
10961 struct ip6_hdr *ip6;
10964 /* Compute the length of the cause and add final padding. */
10966 if (cause != NULL) {
10967 struct mbuf *m_at, *m_last = NULL;
10969 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
10970 if (SCTP_BUF_NEXT(m_at) == NULL)
10972 cause_len += SCTP_BUF_LEN(m_at);
10974 padding_len = cause_len % 4;
10975 if (padding_len != 0) {
10976 padding_len = 4 - padding_len;
10978 if (padding_len != 0) {
10979 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
10980 sctp_m_freem(cause);
10987 /* Get an mbuf for the header. */
10988 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
10989 switch (dst->sa_family) {
10992 len += sizeof(struct ip);
10997 len += sizeof(struct ip6_hdr);
11003 #if defined(INET) || defined(INET6)
11005 len += sizeof(struct udphdr);
11008 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11009 if (mout == NULL) {
11011 sctp_m_freem(cause);
11015 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11016 SCTP_BUF_LEN(mout) = len;
11017 SCTP_BUF_NEXT(mout) = cause;
11018 M_SETFIB(mout, fibnum);
11019 mout->m_pkthdr.flowid = mflowid;
11020 M_HASHTYPE_SET(mout, mflowtype);
11027 switch (dst->sa_family) {
11030 src_sin = (struct sockaddr_in *)src;
11031 dst_sin = (struct sockaddr_in *)dst;
11032 ip = mtod(mout, struct ip *);
11033 ip->ip_v = IPVERSION;
11034 ip->ip_hl = (sizeof(struct ip) >> 2);
11038 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11040 ip->ip_p = IPPROTO_UDP;
11042 ip->ip_p = IPPROTO_SCTP;
11044 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11045 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11047 len = sizeof(struct ip);
11048 shout = (struct sctphdr *)((caddr_t)ip + len);
11053 src_sin6 = (struct sockaddr_in6 *)src;
11054 dst_sin6 = (struct sockaddr_in6 *)dst;
11055 ip6 = mtod(mout, struct ip6_hdr *);
11056 ip6->ip6_flow = htonl(0x60000000);
11057 if (V_ip6_auto_flowlabel) {
11058 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11060 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11062 ip6->ip6_nxt = IPPROTO_UDP;
11064 ip6->ip6_nxt = IPPROTO_SCTP;
11066 ip6->ip6_src = dst_sin6->sin6_addr;
11067 ip6->ip6_dst = src_sin6->sin6_addr;
11068 len = sizeof(struct ip6_hdr);
11069 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11074 shout = mtod(mout, struct sctphdr *);
11077 #if defined(INET) || defined(INET6)
11079 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11080 sctp_m_freem(mout);
11083 udp = (struct udphdr *)shout;
11084 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11085 udp->uh_dport = port;
11087 udp->uh_ulen = htons((uint16_t) (sizeof(struct udphdr) +
11088 sizeof(struct sctphdr) +
11089 sizeof(struct sctp_chunkhdr) +
11090 cause_len + padding_len));
11091 len += sizeof(struct udphdr);
11092 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11097 shout->src_port = sh->dest_port;
11098 shout->dest_port = sh->src_port;
11099 shout->checksum = 0;
11101 shout->v_tag = htonl(vtag);
11103 shout->v_tag = sh->v_tag;
11105 len += sizeof(struct sctphdr);
11106 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11107 ch->chunk_type = type;
11109 ch->chunk_flags = 0;
11111 ch->chunk_flags = SCTP_HAD_NO_TCB;
11113 ch->chunk_length = htons((uint16_t) (sizeof(struct sctp_chunkhdr) + cause_len));
11114 len += sizeof(struct sctp_chunkhdr);
11115 len += cause_len + padding_len;
11117 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11118 sctp_m_freem(mout);
11121 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11122 switch (dst->sa_family) {
11127 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11132 ip->ip_len = htons(len);
11134 #if defined(SCTP_WITH_NO_CSUM)
11135 SCTP_STAT_INCR(sctps_sendnocrc);
11137 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11138 SCTP_STAT_INCR(sctps_sendswcrc);
11141 SCTP_ENABLE_UDP_CSUM(o_pak);
11144 #if defined(SCTP_WITH_NO_CSUM)
11145 SCTP_STAT_INCR(sctps_sendnocrc);
11147 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11148 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11149 SCTP_STAT_INCR(sctps_sendhwcrc);
11152 #ifdef SCTP_PACKET_LOGGING
11153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11154 sctp_packet_log(o_pak);
11157 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11162 ip6->ip6_plen = (uint16_t) (len - sizeof(struct ip6_hdr));
11164 #if defined(SCTP_WITH_NO_CSUM)
11165 SCTP_STAT_INCR(sctps_sendnocrc);
11167 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11168 SCTP_STAT_INCR(sctps_sendswcrc);
11170 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11171 udp->uh_sum = 0xffff;
11174 #if defined(SCTP_WITH_NO_CSUM)
11175 SCTP_STAT_INCR(sctps_sendnocrc);
11177 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11178 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11179 SCTP_STAT_INCR(sctps_sendhwcrc);
11182 #ifdef SCTP_PACKET_LOGGING
11183 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11184 sctp_packet_log(o_pak);
11187 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11191 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11193 sctp_m_freem(mout);
11194 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11197 SCTP_STAT_INCR(sctps_sendpackets);
11198 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11199 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11204 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11205 struct sctphdr *sh,
11206 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11207 uint32_t vrf_id, uint16_t port)
11209 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11210 mflowtype, mflowid, fibnum,
11215 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11216 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11221 struct sctp_tmit_chunk *chk;
11222 struct sctp_heartbeat_chunk *hb;
11223 struct timeval now;
11225 SCTP_TCB_LOCK_ASSERT(stcb);
11229 (void)SCTP_GETTIME_TIMEVAL(&now);
11230 switch (net->ro._l_addr.sa.sa_family) {
11242 sctp_alloc_a_chunk(stcb, chk);
11244 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11247 chk->copy_by_ref = 0;
11248 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11249 chk->rec.chunk_id.can_take_data = 1;
11251 chk->asoc = &stcb->asoc;
11252 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11254 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11255 if (chk->data == NULL) {
11256 sctp_free_a_chunk(stcb, chk, so_locked);
11259 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11260 SCTP_BUF_LEN(chk->data) = chk->send_size;
11261 chk->sent = SCTP_DATAGRAM_UNSENT;
11262 chk->snd_count = 0;
11264 atomic_add_int(&chk->whoTo->ref_count, 1);
11265 /* Now we have a mbuf that we can fill in with the details */
11266 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11267 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11268 /* fill out chunk header */
11269 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11270 hb->ch.chunk_flags = 0;
11271 hb->ch.chunk_length = htons(chk->send_size);
11272 /* Fill out hb parameter */
11273 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11274 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11275 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11276 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11277 /* Did our user request this one, put it in */
11278 hb->heartbeat.hb_info.addr_family = (uint8_t) net->ro._l_addr.sa.sa_family;
11279 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11280 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11282 * we only take from the entropy pool if the address is not
11285 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11286 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11288 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11289 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11291 switch (net->ro._l_addr.sa.sa_family) {
11294 memcpy(hb->heartbeat.hb_info.address,
11295 &net->ro._l_addr.sin.sin_addr,
11296 sizeof(net->ro._l_addr.sin.sin_addr));
11301 memcpy(hb->heartbeat.hb_info.address,
11302 &net->ro._l_addr.sin6.sin6_addr,
11303 sizeof(net->ro._l_addr.sin6.sin6_addr));
11308 sctp_m_freem(chk->data);
11311 sctp_free_a_chunk(stcb, chk, so_locked);
11315 net->hb_responded = 0;
11316 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11317 stcb->asoc.ctrl_queue_cnt++;
11318 SCTP_STAT_INCR(sctps_sendheartbeat);
11323 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11326 struct sctp_association *asoc;
11327 struct sctp_ecne_chunk *ecne;
11328 struct sctp_tmit_chunk *chk;
11333 asoc = &stcb->asoc;
11334 SCTP_TCB_LOCK_ASSERT(stcb);
11335 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11336 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11337 /* found a previous ECN_ECHO update it if needed */
11338 uint32_t cnt, ctsn;
11340 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11341 ctsn = ntohl(ecne->tsn);
11342 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11343 ecne->tsn = htonl(high_tsn);
11344 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11346 cnt = ntohl(ecne->num_pkts_since_cwr);
11348 ecne->num_pkts_since_cwr = htonl(cnt);
11352 /* nope could not find one to update so we must build one */
11353 sctp_alloc_a_chunk(stcb, chk);
11357 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11358 chk->copy_by_ref = 0;
11359 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11360 chk->rec.chunk_id.can_take_data = 0;
11362 chk->asoc = &stcb->asoc;
11363 chk->send_size = sizeof(struct sctp_ecne_chunk);
11364 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11365 if (chk->data == NULL) {
11366 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11369 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11370 SCTP_BUF_LEN(chk->data) = chk->send_size;
11371 chk->sent = SCTP_DATAGRAM_UNSENT;
11372 chk->snd_count = 0;
11374 atomic_add_int(&chk->whoTo->ref_count, 1);
11376 stcb->asoc.ecn_echo_cnt_onq++;
11377 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11378 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11379 ecne->ch.chunk_flags = 0;
11380 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11381 ecne->tsn = htonl(high_tsn);
11382 ecne->num_pkts_since_cwr = htonl(1);
11383 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11384 asoc->ctrl_queue_cnt++;
11388 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11389 struct mbuf *m, int len, int iphlen, int bad_crc)
11391 struct sctp_association *asoc;
11392 struct sctp_pktdrop_chunk *drp;
11393 struct sctp_tmit_chunk *chk;
11399 struct sctp_chunkhdr *ch, chunk_buf;
11400 unsigned int chk_length;
11405 asoc = &stcb->asoc;
11406 SCTP_TCB_LOCK_ASSERT(stcb);
11407 if (asoc->pktdrop_supported == 0) {
11409 * peer must declare support before I send one.
11413 if (stcb->sctp_socket == NULL) {
11416 sctp_alloc_a_chunk(stcb, chk);
11420 chk->copy_by_ref = 0;
11421 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11422 chk->rec.chunk_id.can_take_data = 1;
11425 chk->send_size = len;
11426 /* Validate that we do not have an ABORT in here. */
11427 offset = iphlen + sizeof(struct sctphdr);
11428 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11429 sizeof(*ch), (uint8_t *) & chunk_buf);
11430 while (ch != NULL) {
11431 chk_length = ntohs(ch->chunk_length);
11432 if (chk_length < sizeof(*ch)) {
11433 /* break to abort land */
11436 switch (ch->chunk_type) {
11437 case SCTP_PACKET_DROPPED:
11438 case SCTP_ABORT_ASSOCIATION:
11439 case SCTP_INITIATION_ACK:
11441 * We don't respond with an PKT-DROP to an ABORT
11442 * or PKT-DROP. We also do not respond to an
11443 * INIT-ACK, because we can't know if the initiation
11444 * tag is correct or not.
11446 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11451 offset += SCTP_SIZE32(chk_length);
11452 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11453 sizeof(*ch), (uint8_t *) & chunk_buf);
11456 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11457 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11459 * only send 1 mtu worth, trim off the excess on the end.
11462 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11465 chk->asoc = &stcb->asoc;
11466 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11467 if (chk->data == NULL) {
11469 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11472 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11473 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11475 sctp_m_freem(chk->data);
11479 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11480 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11481 chk->book_size_scale = 0;
11483 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11484 drp->trunc_len = htons(fullsz);
11486 * Len is already adjusted to size minus overhead above take
11487 * out the pkt_drop chunk itself from it.
11489 chk->send_size = (uint16_t) (len - sizeof(struct sctp_pktdrop_chunk));
11490 len = chk->send_size;
11492 /* no truncation needed */
11493 drp->ch.chunk_flags = 0;
11494 drp->trunc_len = htons(0);
11497 drp->ch.chunk_flags |= SCTP_BADCRC;
11499 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11500 SCTP_BUF_LEN(chk->data) = chk->send_size;
11501 chk->sent = SCTP_DATAGRAM_UNSENT;
11502 chk->snd_count = 0;
11504 /* we should hit here */
11506 atomic_add_int(&chk->whoTo->ref_count, 1);
11510 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11511 drp->ch.chunk_length = htons(chk->send_size);
11512 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11516 drp->bottle_bw = htonl(spc);
11517 if (asoc->my_rwnd) {
11518 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11519 asoc->size_on_all_streams +
11520 asoc->my_rwnd_control_len +
11521 stcb->sctp_socket->so_rcv.sb_cc);
11524 * If my rwnd is 0, possibly from mbuf depletion as well as
11525 * space used, tell the peer there is NO space aka onq == bw
11527 drp->current_onq = htonl(spc);
11531 m_copydata(m, iphlen, len, (caddr_t)datap);
11532 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11533 asoc->ctrl_queue_cnt++;
11537 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11539 struct sctp_association *asoc;
11540 struct sctp_cwr_chunk *cwr;
11541 struct sctp_tmit_chunk *chk;
11543 SCTP_TCB_LOCK_ASSERT(stcb);
11547 asoc = &stcb->asoc;
11548 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11549 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11550 /* found a previous CWR queued to same destination
11551 * update it if needed */
11554 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11555 ctsn = ntohl(cwr->tsn);
11556 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11557 cwr->tsn = htonl(high_tsn);
11559 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11560 /* Make sure override is carried */
11561 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11566 sctp_alloc_a_chunk(stcb, chk);
11570 chk->copy_by_ref = 0;
11571 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11572 chk->rec.chunk_id.can_take_data = 1;
11574 chk->asoc = &stcb->asoc;
11575 chk->send_size = sizeof(struct sctp_cwr_chunk);
11576 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11577 if (chk->data == NULL) {
11578 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11581 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11582 SCTP_BUF_LEN(chk->data) = chk->send_size;
11583 chk->sent = SCTP_DATAGRAM_UNSENT;
11584 chk->snd_count = 0;
11586 atomic_add_int(&chk->whoTo->ref_count, 1);
11587 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11588 cwr->ch.chunk_type = SCTP_ECN_CWR;
11589 cwr->ch.chunk_flags = override;
11590 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11591 cwr->tsn = htonl(high_tsn);
11592 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11593 asoc->ctrl_queue_cnt++;
11597 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
11598 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11600 uint16_t len, old_len, i;
11601 struct sctp_stream_reset_out_request *req_out;
11602 struct sctp_chunkhdr *ch;
11604 int number_entries = 0;
11606 ch = mtod(chk->data, struct sctp_chunkhdr *);
11607 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11608 /* get to new offset for the param. */
11609 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11610 /* now how long will this param be? */
11611 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11612 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11613 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11614 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11618 if (number_entries == 0) {
11621 if (number_entries == stcb->asoc.streamoutcnt) {
11622 number_entries = 0;
11624 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11625 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11627 len = (uint16_t) (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11628 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11629 req_out->ph.param_length = htons(len);
11630 req_out->request_seq = htonl(seq);
11631 req_out->response_seq = htonl(resp_seq);
11632 req_out->send_reset_at_tsn = htonl(last_sent);
11634 if (number_entries) {
11635 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11636 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11637 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11638 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11639 req_out->list_of_streams[at] = htons(i);
11641 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11642 if (at >= number_entries) {
11648 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11649 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11652 if (SCTP_SIZE32(len) > len) {
11654 * Need to worry about the pad we may end up adding to the
11655 * end. This is easy since the struct is either aligned to 4
11656 * bytes or 2 bytes off.
11658 req_out->list_of_streams[number_entries] = 0;
11660 /* now fix the chunk length */
11661 ch->chunk_length = htons(len + old_len);
11662 chk->book_size = len + old_len;
11663 chk->book_size_scale = 0;
11664 chk->send_size = SCTP_SIZE32(chk->book_size);
11665 SCTP_BUF_LEN(chk->data) = chk->send_size;
11670 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11671 int number_entries, uint16_t * list,
11674 uint16_t len, old_len, i;
11675 struct sctp_stream_reset_in_request *req_in;
11676 struct sctp_chunkhdr *ch;
11678 ch = mtod(chk->data, struct sctp_chunkhdr *);
11679 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11681 /* get to new offset for the param. */
11682 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11683 /* now how long will this param be? */
11684 len = (uint16_t) (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11685 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11686 req_in->ph.param_length = htons(len);
11687 req_in->request_seq = htonl(seq);
11688 if (number_entries) {
11689 for (i = 0; i < number_entries; i++) {
11690 req_in->list_of_streams[i] = htons(list[i]);
11693 if (SCTP_SIZE32(len) > len) {
11695 * Need to worry about the pad we may end up adding to the
11696 * end. This is easy since the struct is either aligned to 4
11697 * bytes or 2 bytes off.
11699 req_in->list_of_streams[number_entries] = 0;
11701 /* now fix the chunk length */
11702 ch->chunk_length = htons(len + old_len);
11703 chk->book_size = len + old_len;
11704 chk->book_size_scale = 0;
11705 chk->send_size = SCTP_SIZE32(chk->book_size);
11706 SCTP_BUF_LEN(chk->data) = chk->send_size;
11711 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11714 uint16_t len, old_len;
11715 struct sctp_stream_reset_tsn_request *req_tsn;
11716 struct sctp_chunkhdr *ch;
11718 ch = mtod(chk->data, struct sctp_chunkhdr *);
11719 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11721 /* get to new offset for the param. */
11722 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11723 /* now how long will this param be? */
11724 len = sizeof(struct sctp_stream_reset_tsn_request);
11725 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11726 req_tsn->ph.param_length = htons(len);
11727 req_tsn->request_seq = htonl(seq);
11729 /* now fix the chunk length */
11730 ch->chunk_length = htons(len + old_len);
11731 chk->send_size = len + old_len;
11732 chk->book_size = SCTP_SIZE32(chk->send_size);
11733 chk->book_size_scale = 0;
11734 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11739 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11740 uint32_t resp_seq, uint32_t result)
11742 uint16_t len, old_len;
11743 struct sctp_stream_reset_response *resp;
11744 struct sctp_chunkhdr *ch;
11746 ch = mtod(chk->data, struct sctp_chunkhdr *);
11747 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11749 /* get to new offset for the param. */
11750 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11751 /* now how long will this param be? */
11752 len = sizeof(struct sctp_stream_reset_response);
11753 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11754 resp->ph.param_length = htons(len);
11755 resp->response_seq = htonl(resp_seq);
11756 resp->result = ntohl(result);
11758 /* now fix the chunk length */
11759 ch->chunk_length = htons(len + old_len);
11760 chk->book_size = len + old_len;
11761 chk->book_size_scale = 0;
11762 chk->send_size = SCTP_SIZE32(chk->book_size);
11763 SCTP_BUF_LEN(chk->data) = chk->send_size;
11768 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
11769 struct sctp_stream_reset_list *ent,
11772 struct sctp_association *asoc;
11773 struct sctp_tmit_chunk *chk;
11774 struct sctp_chunkhdr *ch;
11776 asoc = &stcb->asoc;
11779 * Reset our last reset action to the new one IP -> response
11780 * (PERFORMED probably). This assures that if we fail to send, a
11781 * retran from the peer will get the new response.
11783 asoc->last_reset_action[0] = response;
11784 if (asoc->stream_reset_outstanding) {
11787 sctp_alloc_a_chunk(stcb, chk);
11789 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11792 chk->copy_by_ref = 0;
11793 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11794 chk->rec.chunk_id.can_take_data = 0;
11796 chk->asoc = &stcb->asoc;
11797 chk->book_size = sizeof(struct sctp_chunkhdr);
11798 chk->send_size = SCTP_SIZE32(chk->book_size);
11799 chk->book_size_scale = 0;
11800 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11801 if (chk->data == NULL) {
11802 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11803 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11806 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11807 /* setup chunk parameters */
11808 chk->sent = SCTP_DATAGRAM_UNSENT;
11809 chk->snd_count = 0;
11810 if (stcb->asoc.alternate) {
11811 chk->whoTo = stcb->asoc.alternate;
11813 chk->whoTo = stcb->asoc.primary_destination;
11815 ch = mtod(chk->data, struct sctp_chunkhdr *);
11816 ch->chunk_type = SCTP_STREAM_RESET;
11817 ch->chunk_flags = 0;
11818 ch->chunk_length = htons(chk->book_size);
11819 atomic_add_int(&chk->whoTo->ref_count, 1);
11820 SCTP_BUF_LEN(chk->data) = chk->send_size;
11821 sctp_add_stream_reset_result(chk, ent->seq, response);
11822 /* insert the chunk for sending */
11823 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11826 asoc->ctrl_queue_cnt++;
11830 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11831 uint32_t resp_seq, uint32_t result,
11832 uint32_t send_una, uint32_t recv_next)
11834 uint16_t len, old_len;
11835 struct sctp_stream_reset_response_tsn *resp;
11836 struct sctp_chunkhdr *ch;
11838 ch = mtod(chk->data, struct sctp_chunkhdr *);
11839 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11841 /* get to new offset for the param. */
11842 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11843 /* now how long will this param be? */
11844 len = sizeof(struct sctp_stream_reset_response_tsn);
11845 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11846 resp->ph.param_length = htons(len);
11847 resp->response_seq = htonl(resp_seq);
11848 resp->result = htonl(result);
11849 resp->senders_next_tsn = htonl(send_una);
11850 resp->receivers_next_tsn = htonl(recv_next);
11852 /* now fix the chunk length */
11853 ch->chunk_length = htons(len + old_len);
11854 chk->book_size = len + old_len;
11855 chk->send_size = SCTP_SIZE32(chk->book_size);
11856 chk->book_size_scale = 0;
11857 SCTP_BUF_LEN(chk->data) = chk->send_size;
11862 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11866 uint16_t len, old_len;
11867 struct sctp_chunkhdr *ch;
11868 struct sctp_stream_reset_add_strm *addstr;
11870 ch = mtod(chk->data, struct sctp_chunkhdr *);
11871 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11873 /* get to new offset for the param. */
11874 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11875 /* now how long will this param be? */
11876 len = sizeof(struct sctp_stream_reset_add_strm);
11879 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11880 addstr->ph.param_length = htons(len);
11881 addstr->request_seq = htonl(seq);
11882 addstr->number_of_streams = htons(adding);
11883 addstr->reserved = 0;
11885 /* now fix the chunk length */
11886 ch->chunk_length = htons(len + old_len);
11887 chk->send_size = len + old_len;
11888 chk->book_size = SCTP_SIZE32(chk->send_size);
11889 chk->book_size_scale = 0;
11890 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11895 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11899 uint16_t len, old_len;
11900 struct sctp_chunkhdr *ch;
11901 struct sctp_stream_reset_add_strm *addstr;
11903 ch = mtod(chk->data, struct sctp_chunkhdr *);
11904 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11906 /* get to new offset for the param. */
11907 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11908 /* now how long will this param be? */
11909 len = sizeof(struct sctp_stream_reset_add_strm);
11911 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11912 addstr->ph.param_length = htons(len);
11913 addstr->request_seq = htonl(seq);
11914 addstr->number_of_streams = htons(adding);
11915 addstr->reserved = 0;
11917 /* now fix the chunk length */
11918 ch->chunk_length = htons(len + old_len);
11919 chk->send_size = len + old_len;
11920 chk->book_size = SCTP_SIZE32(chk->send_size);
11921 chk->book_size_scale = 0;
11922 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11927 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
11929 struct sctp_association *asoc;
11930 struct sctp_tmit_chunk *chk;
11931 struct sctp_chunkhdr *ch;
11934 asoc = &stcb->asoc;
11935 asoc->trigger_reset = 0;
11936 if (asoc->stream_reset_outstanding) {
11939 sctp_alloc_a_chunk(stcb, chk);
11941 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11944 chk->copy_by_ref = 0;
11945 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11946 chk->rec.chunk_id.can_take_data = 0;
11948 chk->asoc = &stcb->asoc;
11949 chk->book_size = sizeof(struct sctp_chunkhdr);
11950 chk->send_size = SCTP_SIZE32(chk->book_size);
11951 chk->book_size_scale = 0;
11952 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11953 if (chk->data == NULL) {
11954 sctp_free_a_chunk(stcb, chk, so_locked);
11955 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11958 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11960 /* setup chunk parameters */
11961 chk->sent = SCTP_DATAGRAM_UNSENT;
11962 chk->snd_count = 0;
11963 if (stcb->asoc.alternate) {
11964 chk->whoTo = stcb->asoc.alternate;
11966 chk->whoTo = stcb->asoc.primary_destination;
11968 ch = mtod(chk->data, struct sctp_chunkhdr *);
11969 ch->chunk_type = SCTP_STREAM_RESET;
11970 ch->chunk_flags = 0;
11971 ch->chunk_length = htons(chk->book_size);
11972 atomic_add_int(&chk->whoTo->ref_count, 1);
11973 SCTP_BUF_LEN(chk->data) = chk->send_size;
11974 seq = stcb->asoc.str_reset_seq_out;
11975 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
11977 asoc->stream_reset_outstanding++;
11979 m_freem(chk->data);
11981 sctp_free_a_chunk(stcb, chk, so_locked);
11984 asoc->str_reset = chk;
11985 /* insert the chunk for sending */
11986 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11989 asoc->ctrl_queue_cnt++;
11991 if (stcb->asoc.send_sack) {
11992 sctp_send_sack(stcb, so_locked);
11994 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11999 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12000 uint16_t number_entries, uint16_t * list,
12001 uint8_t send_in_req,
12002 uint8_t send_tsn_req,
12003 uint8_t add_stream,
12005 uint16_t adding_i, uint8_t peer_asked)
12007 struct sctp_association *asoc;
12008 struct sctp_tmit_chunk *chk;
12009 struct sctp_chunkhdr *ch;
12010 int can_send_out_req = 0;
12013 asoc = &stcb->asoc;
12014 if (asoc->stream_reset_outstanding) {
12016 * Already one pending, must get ACK back to clear the flag.
12018 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12021 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12022 (add_stream == 0)) {
12023 /* nothing to do */
12024 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12027 if (send_tsn_req && send_in_req) {
12028 /* error, can't do that */
12029 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12031 } else if (send_in_req) {
12032 can_send_out_req = 1;
12034 if (number_entries > (MCLBYTES -
12035 SCTP_MIN_OVERHEAD -
12036 sizeof(struct sctp_chunkhdr) -
12037 sizeof(struct sctp_stream_reset_out_request)) /
12038 sizeof(uint16_t)) {
12039 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12042 sctp_alloc_a_chunk(stcb, chk);
12044 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12047 chk->copy_by_ref = 0;
12048 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12049 chk->rec.chunk_id.can_take_data = 0;
12051 chk->asoc = &stcb->asoc;
12052 chk->book_size = sizeof(struct sctp_chunkhdr);
12053 chk->send_size = SCTP_SIZE32(chk->book_size);
12054 chk->book_size_scale = 0;
12056 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12057 if (chk->data == NULL) {
12058 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12059 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12062 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12064 /* setup chunk parameters */
12065 chk->sent = SCTP_DATAGRAM_UNSENT;
12066 chk->snd_count = 0;
12067 if (stcb->asoc.alternate) {
12068 chk->whoTo = stcb->asoc.alternate;
12070 chk->whoTo = stcb->asoc.primary_destination;
12072 atomic_add_int(&chk->whoTo->ref_count, 1);
12073 ch = mtod(chk->data, struct sctp_chunkhdr *);
12074 ch->chunk_type = SCTP_STREAM_RESET;
12075 ch->chunk_flags = 0;
12076 ch->chunk_length = htons(chk->book_size);
12077 SCTP_BUF_LEN(chk->data) = chk->send_size;
12079 seq = stcb->asoc.str_reset_seq_out;
12080 if (can_send_out_req) {
12083 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12086 asoc->stream_reset_outstanding++;
12089 if ((add_stream & 1) &&
12090 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12091 /* Need to allocate more */
12092 struct sctp_stream_out *oldstream;
12093 struct sctp_stream_queue_pending *sp, *nsp;
12095 #if defined(SCTP_DETAILED_STR_STATS)
12099 oldstream = stcb->asoc.strmout;
12100 /* get some more */
12101 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12102 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12104 if (stcb->asoc.strmout == NULL) {
12107 stcb->asoc.strmout = oldstream;
12108 /* Turn off the bit */
12109 x = add_stream & 0xfe;
12114 * Ok now we proceed with copying the old out stuff and
12115 * initializing the new stuff.
12117 SCTP_TCB_SEND_LOCK(stcb);
12118 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12119 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12120 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12121 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12122 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12123 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12124 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12125 stcb->asoc.strmout[i].stream_no = i;
12126 stcb->asoc.strmout[i].state = oldstream[i].state;
12127 /* FIX ME FIX ME */
12128 /* This should be a SS_COPY operation FIX ME STREAM
12129 * SCHEDULER EXPERT */
12130 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12131 /* now anything on those queues? */
12132 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12133 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12134 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12138 /* now the new streams */
12139 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12140 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12141 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12142 stcb->asoc.strmout[i].chunks_on_queues = 0;
12143 #if defined(SCTP_DETAILED_STR_STATS)
12144 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12145 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12146 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12149 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12150 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12152 stcb->asoc.strmout[i].next_mid_ordered = 0;
12153 stcb->asoc.strmout[i].next_mid_unordered = 0;
12154 stcb->asoc.strmout[i].stream_no = i;
12155 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12156 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12157 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12159 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12160 SCTP_FREE(oldstream, SCTP_M_STRMO);
12161 SCTP_TCB_SEND_UNLOCK(stcb);
12164 if ((add_stream & 1) && (adding_o > 0)) {
12165 asoc->strm_pending_add_size = adding_o;
12166 asoc->peer_req_out = peer_asked;
12167 sctp_add_an_out_stream(chk, seq, adding_o);
12169 asoc->stream_reset_outstanding++;
12171 if ((add_stream & 2) && (adding_i > 0)) {
12172 sctp_add_an_in_stream(chk, seq, adding_i);
12174 asoc->stream_reset_outstanding++;
12177 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12179 asoc->stream_reset_outstanding++;
12181 if (send_tsn_req) {
12182 sctp_add_stream_reset_tsn(chk, seq);
12183 asoc->stream_reset_outstanding++;
12185 asoc->str_reset = chk;
12186 /* insert the chunk for sending */
12187 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12190 asoc->ctrl_queue_cnt++;
12191 if (stcb->asoc.send_sack) {
12192 sctp_send_sack(stcb, SCTP_SO_LOCKED);
12194 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12199 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12200 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12201 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12202 uint32_t vrf_id, uint16_t port)
12204 /* Don't respond to an ABORT with an ABORT. */
12205 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12207 sctp_m_freem(cause);
12210 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12211 mflowtype, mflowid, fibnum,
12217 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12218 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12219 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12220 uint32_t vrf_id, uint16_t port)
12222 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12223 mflowtype, mflowid, fibnum,
12228 static struct mbuf *
12229 sctp_copy_resume(struct uio *uio,
12231 int user_marks_eor,
12234 struct mbuf **new_tail)
12238 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12239 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12241 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12244 *sndout = m_length(m, NULL);
12245 *new_tail = m_last(m);
12251 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12255 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12257 if (sp->data == NULL) {
12258 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12261 sp->tail_mbuf = m_last(sp->data);
12267 static struct sctp_stream_queue_pending *
12268 sctp_copy_it_in(struct sctp_tcb *stcb,
12269 struct sctp_association *asoc,
12270 struct sctp_sndrcvinfo *srcv,
12272 struct sctp_nets *net,
12274 int user_marks_eor,
12278 * This routine must be very careful in its work. Protocol
12279 * processing is up and running so care must be taken to spl...()
12280 * when you need to do something that may effect the stcb/asoc. The
12281 * sb is locked however. When data is copied the protocol processing
12282 * should be enabled since this is a slower operation...
12284 struct sctp_stream_queue_pending *sp = NULL;
12288 /* Now can we send this? */
12289 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12290 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12291 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12292 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12293 /* got data while shutting down */
12294 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12295 *error = ECONNRESET;
12298 sctp_alloc_a_strmoq(stcb, sp);
12300 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12305 sp->sender_all_done = 0;
12306 sp->sinfo_flags = srcv->sinfo_flags;
12307 sp->timetolive = srcv->sinfo_timetolive;
12308 sp->ppid = srcv->sinfo_ppid;
12309 sp->context = srcv->sinfo_context;
12311 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12313 sp->stream = srcv->sinfo_stream;
12314 sp->length = (uint32_t) min(uio->uio_resid, max_send_len);
12315 if ((sp->length == (uint32_t) uio->uio_resid) &&
12316 ((user_marks_eor == 0) ||
12317 (srcv->sinfo_flags & SCTP_EOF) ||
12318 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12319 sp->msg_is_complete = 1;
12321 sp->msg_is_complete = 0;
12323 sp->sender_all_done = 0;
12324 sp->some_taken = 0;
12325 sp->put_last_out = 0;
12326 resv_in_first = sizeof(struct sctp_data_chunk);
12327 sp->data = sp->tail_mbuf = NULL;
12328 if (sp->length == 0) {
12332 if (srcv->sinfo_keynumber_valid) {
12333 sp->auth_keyid = srcv->sinfo_keynumber;
12335 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12337 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12338 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12339 sp->holds_key_ref = 1;
12341 *error = sctp_copy_one(sp, uio, resv_in_first);
12344 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12347 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12349 atomic_add_int(&sp->net->ref_count, 1);
12353 sctp_set_prsctp_policy(sp);
12361 sctp_sosend(struct socket *so,
12362 struct sockaddr *addr,
12365 struct mbuf *control,
12370 int error, use_sndinfo = 0;
12371 struct sctp_sndrcvinfo sndrcvninfo;
12372 struct sockaddr *addr_to_use;
12373 #if defined(INET) && defined(INET6)
12374 struct sockaddr_in sin;
12378 /* process cmsg snd/rcv info (maybe a assoc-id) */
12379 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12380 sizeof(sndrcvninfo))) {
12385 addr_to_use = addr;
12386 #if defined(INET) && defined(INET6)
12387 if ((addr) && (addr->sa_family == AF_INET6)) {
12388 struct sockaddr_in6 *sin6;
12390 sin6 = (struct sockaddr_in6 *)addr;
12391 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12392 in6_sin6_2_sin(&sin, sin6);
12393 addr_to_use = (struct sockaddr *)&sin;
12397 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12400 use_sndinfo ? &sndrcvninfo : NULL
12408 sctp_lower_sosend(struct socket *so,
12409 struct sockaddr *addr,
12411 struct mbuf *i_pak,
12412 struct mbuf *control,
12414 struct sctp_sndrcvinfo *srcv
12419 unsigned int sndlen = 0, max_len;
12421 struct mbuf *top = NULL;
12422 int queue_only = 0, queue_only_for_init = 0;
12423 int free_cnt_applied = 0;
12425 int now_filled = 0;
12426 unsigned int inqueue_bytes = 0;
12427 struct sctp_block_entry be;
12428 struct sctp_inpcb *inp;
12429 struct sctp_tcb *stcb = NULL;
12430 struct timeval now;
12431 struct sctp_nets *net;
12432 struct sctp_association *asoc;
12433 struct sctp_inpcb *t_inp;
12434 int user_marks_eor;
12435 int create_lock_applied = 0;
12436 int nagle_applies = 0;
12437 int some_on_control = 0;
12438 int got_all_of_the_send = 0;
12439 int hold_tcblock = 0;
12440 int non_blocking = 0;
12441 uint32_t local_add_more, local_soresv = 0;
12443 uint16_t sinfo_flags;
12444 sctp_assoc_t sinfo_assoc_id;
12451 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12453 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12456 SCTP_RELEASE_PKT(i_pak);
12460 if ((uio == NULL) && (i_pak == NULL)) {
12461 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12464 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12465 atomic_add_int(&inp->total_sends, 1);
12467 if (uio->uio_resid < 0) {
12468 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12471 sndlen = (unsigned int)uio->uio_resid;
12473 top = SCTP_HEADER_TO_CHAIN(i_pak);
12474 sndlen = SCTP_HEADER_LEN(i_pak);
12476 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12479 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12480 (inp->sctp_socket->so_qlimit)) {
12481 /* The listener can NOT send */
12482 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12487 * Pre-screen address, if one is given the sin-len
12488 * must be set correctly!
12491 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12493 switch (raddr->sa.sa_family) {
12496 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12497 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12501 port = raddr->sin.sin_port;
12506 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12507 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12511 port = raddr->sin6.sin6_port;
12515 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12516 error = EAFNOSUPPORT;
12523 sinfo_flags = srcv->sinfo_flags;
12524 sinfo_assoc_id = srcv->sinfo_assoc_id;
12525 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12526 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12527 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12531 if (srcv->sinfo_flags)
12532 SCTP_STAT_INCR(sctps_sends_with_flags);
12534 sinfo_flags = inp->def_send.sinfo_flags;
12535 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12537 if (sinfo_flags & SCTP_SENDALL) {
12538 /* its a sendall */
12539 error = sctp_sendall(inp, uio, top, srcv);
12543 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12544 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12548 /* now we must find the assoc */
12549 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12550 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12551 SCTP_INP_RLOCK(inp);
12552 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12554 SCTP_TCB_LOCK(stcb);
12557 SCTP_INP_RUNLOCK(inp);
12558 } else if (sinfo_assoc_id) {
12559 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
12560 if (stcb != NULL) {
12565 * Since we did not use findep we must
12566 * increment it, and if we don't find a tcb
12569 SCTP_INP_WLOCK(inp);
12570 SCTP_INP_INCR_REF(inp);
12571 SCTP_INP_WUNLOCK(inp);
12572 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12573 if (stcb == NULL) {
12574 SCTP_INP_WLOCK(inp);
12575 SCTP_INP_DECR_REF(inp);
12576 SCTP_INP_WUNLOCK(inp);
12581 if ((stcb == NULL) && (addr)) {
12582 /* Possible implicit send? */
12583 SCTP_ASOC_CREATE_LOCK(inp);
12584 create_lock_applied = 1;
12585 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12586 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12587 /* Should I really unlock ? */
12588 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12593 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12594 (addr->sa_family == AF_INET6)) {
12595 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12599 SCTP_INP_WLOCK(inp);
12600 SCTP_INP_INCR_REF(inp);
12601 SCTP_INP_WUNLOCK(inp);
12602 /* With the lock applied look again */
12603 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12604 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12605 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12607 if (stcb == NULL) {
12608 SCTP_INP_WLOCK(inp);
12609 SCTP_INP_DECR_REF(inp);
12610 SCTP_INP_WUNLOCK(inp);
12617 if (t_inp != inp) {
12618 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12623 if (stcb == NULL) {
12624 if (addr == NULL) {
12625 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12629 /* We must go ahead and start the INIT process */
12632 if ((sinfo_flags & SCTP_ABORT) ||
12633 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12635 * User asks to abort a non-existant assoc,
12636 * or EOF a non-existant assoc with no data
12638 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12642 /* get an asoc/stcb struct */
12643 vrf_id = inp->def_vrf_id;
12645 if (create_lock_applied == 0) {
12646 panic("Error, should hold create lock and I don't?");
12649 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12650 inp->sctp_ep.pre_open_stream_count,
12653 if (stcb == NULL) {
12654 /* Error is setup for us in the call */
12657 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12658 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12659 /* Set the connected flag so we can queue
12661 soisconnecting(so);
12664 if (create_lock_applied) {
12665 SCTP_ASOC_CREATE_UNLOCK(inp);
12666 create_lock_applied = 0;
12668 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12670 /* Turn on queue only flag to prevent data from
12673 asoc = &stcb->asoc;
12674 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12675 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12677 /* initialize authentication params for the assoc */
12678 sctp_initialize_auth_params(inp, stcb);
12681 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12682 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
12683 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
12689 /* out with the INIT */
12690 queue_only_for_init = 1;
12692 * we may want to dig in after this call and adjust the MTU
12693 * value. It defaulted to 1500 (constant) but the ro
12694 * structure may now have an update and thus we may need to
12695 * change it BEFORE we append the message.
12699 asoc = &stcb->asoc;
12701 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12702 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12704 net = sctp_findnet(stcb, addr);
12707 if ((net == NULL) ||
12708 ((port != 0) && (port != stcb->rport))) {
12709 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12714 if (stcb->asoc.alternate) {
12715 net = stcb->asoc.alternate;
12717 net = stcb->asoc.primary_destination;
12720 atomic_add_int(&stcb->total_sends, 1);
12721 /* Keep the stcb from being freed under our feet */
12722 atomic_add_int(&asoc->refcnt, 1);
12723 free_cnt_applied = 1;
12725 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12726 if (sndlen > asoc->smallest_mtu) {
12727 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12732 if (SCTP_SO_IS_NBIO(so)
12733 || (flags & MSG_NBIO)
12737 /* would we block? */
12738 if (non_blocking) {
12739 if (hold_tcblock == 0) {
12740 SCTP_TCB_LOCK(stcb);
12743 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12744 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12745 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12746 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12747 if (sndlen > SCTP_SB_LIMIT_SND(so))
12750 error = EWOULDBLOCK;
12753 stcb->asoc.sb_send_resv += sndlen;
12754 SCTP_TCB_UNLOCK(stcb);
12757 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12759 local_soresv = sndlen;
12760 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12761 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12762 error = ECONNRESET;
12765 if (create_lock_applied) {
12766 SCTP_ASOC_CREATE_UNLOCK(inp);
12767 create_lock_applied = 0;
12769 /* Is the stream no. valid? */
12770 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12771 /* Invalid stream number */
12772 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12776 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12777 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12779 * Can't queue any data while stream reset is underway.
12781 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12786 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12789 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12790 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12793 /* we are now done with all control */
12795 sctp_m_freem(control);
12798 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12799 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12800 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12801 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12802 if (srcv->sinfo_flags & SCTP_ABORT) {
12805 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12806 error = ECONNRESET;
12810 /* Ok, we will attempt a msgsnd :> */
12812 p->td_ru.ru_msgsnd++;
12814 /* Are we aborting? */
12815 if (srcv->sinfo_flags & SCTP_ABORT) {
12817 int tot_demand, tot_out = 0, max_out;
12819 SCTP_STAT_INCR(sctps_sends_with_abort);
12820 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12821 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12822 /* It has to be up before we abort */
12823 /* how big is the user initiated abort? */
12824 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12828 if (hold_tcblock) {
12829 SCTP_TCB_UNLOCK(stcb);
12833 struct mbuf *cntm = NULL;
12835 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12837 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12838 tot_out += SCTP_BUF_LEN(cntm);
12842 /* Must fit in a MTU */
12844 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12845 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12847 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12851 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
12854 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12858 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12859 max_out -= sizeof(struct sctp_abort_msg);
12860 if (tot_out > max_out) {
12864 struct sctp_paramhdr *ph;
12866 /* now move forward the data pointer */
12867 ph = mtod(mm, struct sctp_paramhdr *);
12868 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12869 ph->param_length = htons((uint16_t) (sizeof(struct sctp_paramhdr) + tot_out));
12871 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12873 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12876 * Here if we can't get his data we
12877 * still abort we just don't get to
12878 * send the users note :-0
12885 SCTP_BUF_NEXT(mm) = top;
12889 if (hold_tcblock == 0) {
12890 SCTP_TCB_LOCK(stcb);
12892 atomic_add_int(&stcb->asoc.refcnt, -1);
12893 free_cnt_applied = 0;
12894 /* release this lock, otherwise we hang on ourselves */
12895 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12896 /* now relock the stcb so everything is sane */
12900 * In this case top is already chained to mm avoid double
12901 * free, since we free it below if top != NULL and driver
12902 * would free it after sending the packet out
12909 /* Calculate the maximum we can send */
12910 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12911 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12912 if (non_blocking) {
12913 /* we already checked for non-blocking above. */
12916 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12921 if (hold_tcblock) {
12922 SCTP_TCB_UNLOCK(stcb);
12925 if (asoc->strmout == NULL) {
12926 /* huh? software error */
12927 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12931 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12932 if ((user_marks_eor == 0) &&
12933 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12934 /* It will NEVER fit */
12935 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12939 if ((uio == NULL) && user_marks_eor) {
12941 * We do not support eeor mode for
12942 * sending with mbuf chains (like sendfile).
12944 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12948 if (user_marks_eor) {
12949 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12952 * For non-eeor the whole message must fit in
12953 * the socket send buffer.
12955 local_add_more = sndlen;
12958 if (non_blocking) {
12959 goto skip_preblock;
12961 if (((max_len <= local_add_more) &&
12962 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12964 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12965 /* No room right now ! */
12966 SOCKBUF_LOCK(&so->so_snd);
12967 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12968 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12969 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12970 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12971 (unsigned int)SCTP_SB_LIMIT_SND(so),
12974 stcb->asoc.stream_queue_cnt,
12975 stcb->asoc.chunks_on_out_queue,
12976 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12978 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
12981 stcb->block_entry = &be;
12982 error = sbwait(&so->so_snd);
12983 stcb->block_entry = NULL;
12984 if (error || so->so_error || be.error) {
12987 error = so->so_error;
12992 SOCKBUF_UNLOCK(&so->so_snd);
12995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12996 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12997 asoc, stcb->asoc.total_output_queue_size);
12999 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13000 SOCKBUF_UNLOCK(&so->so_snd);
13003 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13005 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13006 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13010 SOCKBUF_UNLOCK(&so->so_snd);
13013 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13017 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13018 * case NOTE: uio will be null when top/mbuf is passed
13021 if (srcv->sinfo_flags & SCTP_EOF) {
13022 got_all_of_the_send = 1;
13025 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13031 struct sctp_stream_queue_pending *sp;
13032 struct sctp_stream_out *strm;
13035 SCTP_TCB_SEND_LOCK(stcb);
13036 if ((asoc->stream_locked) &&
13037 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13038 SCTP_TCB_SEND_UNLOCK(stcb);
13039 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13043 SCTP_TCB_SEND_UNLOCK(stcb);
13045 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13046 if (strm->last_msg_incomplete == 0) {
13048 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13049 if ((sp == NULL) || (error)) {
13052 SCTP_TCB_SEND_LOCK(stcb);
13053 if (sp->msg_is_complete) {
13054 strm->last_msg_incomplete = 0;
13055 asoc->stream_locked = 0;
13058 * Just got locked to this guy in case of an
13061 strm->last_msg_incomplete = 1;
13062 if (stcb->asoc.idata_supported == 0) {
13063 asoc->stream_locked = 1;
13064 asoc->stream_locked_on = srcv->sinfo_stream;
13066 sp->sender_all_done = 0;
13068 sctp_snd_sb_alloc(stcb, sp->length);
13069 atomic_add_int(&asoc->stream_queue_cnt, 1);
13070 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13071 SCTP_STAT_INCR(sctps_sends_with_unord);
13073 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13074 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13075 SCTP_TCB_SEND_UNLOCK(stcb);
13077 SCTP_TCB_SEND_LOCK(stcb);
13078 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13079 SCTP_TCB_SEND_UNLOCK(stcb);
13081 /* ???? Huh ??? last msg is gone */
13083 panic("Warning: Last msg marked incomplete, yet nothing left?");
13085 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13086 strm->last_msg_incomplete = 0;
13092 while (uio->uio_resid > 0) {
13093 /* How much room do we have? */
13094 struct mbuf *new_tail, *mm;
13096 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13097 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13101 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13102 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13103 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13106 if (hold_tcblock) {
13107 SCTP_TCB_UNLOCK(stcb);
13110 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13111 if ((mm == NULL) || error) {
13117 /* Update the mbuf and count */
13118 SCTP_TCB_SEND_LOCK(stcb);
13119 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13121 * we need to get out. Peer probably
13125 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13126 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13127 error = ECONNRESET;
13129 SCTP_TCB_SEND_UNLOCK(stcb);
13132 if (sp->tail_mbuf) {
13133 /* tack it to the end */
13134 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13135 sp->tail_mbuf = new_tail;
13137 /* A stolen mbuf */
13139 sp->tail_mbuf = new_tail;
13141 sctp_snd_sb_alloc(stcb, sndout);
13142 atomic_add_int(&sp->length, sndout);
13144 if (srcv->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
13145 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
13147 /* Did we reach EOR? */
13148 if ((uio->uio_resid == 0) &&
13149 ((user_marks_eor == 0) ||
13150 (srcv->sinfo_flags & SCTP_EOF) ||
13151 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13152 sp->msg_is_complete = 1;
13154 sp->msg_is_complete = 0;
13156 SCTP_TCB_SEND_UNLOCK(stcb);
13158 if (uio->uio_resid == 0) {
13163 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13164 /* This is ugly but we must assure locking
13166 if (hold_tcblock == 0) {
13167 SCTP_TCB_LOCK(stcb);
13170 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13171 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13172 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13173 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13179 SCTP_TCB_UNLOCK(stcb);
13182 /* wait for space now */
13183 if (non_blocking) {
13184 /* Non-blocking io in place out */
13187 /* What about the INIT, send it maybe */
13188 if (queue_only_for_init) {
13189 if (hold_tcblock == 0) {
13190 SCTP_TCB_LOCK(stcb);
13193 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13194 /* a collision took us forward? */
13197 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13198 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13202 if ((net->flight_size > net->cwnd) &&
13203 (asoc->sctp_cmt_on_off == 0)) {
13204 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13206 } else if (asoc->ifp_had_enobuf) {
13207 SCTP_STAT_INCR(sctps_ifnomemqueued);
13208 if (net->flight_size > (2 * net->mtu)) {
13211 asoc->ifp_had_enobuf = 0;
13213 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13214 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13215 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13216 (stcb->asoc.total_flight > 0) &&
13217 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13218 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13221 * Ok, Nagle is set on and we have data outstanding.
13222 * Don't send anything and let SACKs drive out the
13223 * data unless we have a "full" segment to send.
13225 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13226 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13228 SCTP_STAT_INCR(sctps_naglequeued);
13231 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13232 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13233 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13235 SCTP_STAT_INCR(sctps_naglesent);
13238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13240 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13241 nagle_applies, un_sent);
13242 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13243 stcb->asoc.total_flight,
13244 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13246 if (queue_only_for_init)
13247 queue_only_for_init = 0;
13248 if ((queue_only == 0) && (nagle_applies == 0)) {
13250 * need to start chunk output
13251 * before blocking.. note that if
13252 * a lock is already applied, then
13253 * the input via the net is happening
13254 * and I don't need to start output :-D
13256 if (hold_tcblock == 0) {
13257 if (SCTP_TCB_TRYLOCK(stcb)) {
13259 sctp_chunk_output(inp,
13261 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13264 sctp_chunk_output(inp,
13266 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13268 if (hold_tcblock == 1) {
13269 SCTP_TCB_UNLOCK(stcb);
13273 SOCKBUF_LOCK(&so->so_snd);
13275 * This is a bit strange, but I think it will
13276 * work. The total_output_queue_size is locked and
13277 * protected by the TCB_LOCK, which we just released.
13278 * There is a race that can occur between releasing it
13279 * above, and me getting the socket lock, where sacks
13280 * come in but we have not put the SB_WAIT on the
13281 * so_snd buffer to get the wakeup. After the LOCK
13282 * is applied the sack_processing will also need to
13283 * LOCK the so->so_snd to do the actual sowwakeup(). So
13284 * once we have the socket buffer lock if we recheck the
13285 * size we KNOW we will get to sleep safely with the
13286 * wakeup flag in place.
13288 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13289 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13291 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13292 asoc, (size_t)uio->uio_resid);
13295 stcb->block_entry = &be;
13296 error = sbwait(&so->so_snd);
13297 stcb->block_entry = NULL;
13299 if (error || so->so_error || be.error) {
13302 error = so->so_error;
13307 SOCKBUF_UNLOCK(&so->so_snd);
13310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13311 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13312 asoc, stcb->asoc.total_output_queue_size);
13315 SOCKBUF_UNLOCK(&so->so_snd);
13316 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13320 SCTP_TCB_SEND_LOCK(stcb);
13321 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13322 SCTP_TCB_SEND_UNLOCK(stcb);
13326 if (sp->msg_is_complete == 0) {
13327 strm->last_msg_incomplete = 1;
13328 if (stcb->asoc.idata_supported == 0) {
13329 asoc->stream_locked = 1;
13330 asoc->stream_locked_on = srcv->sinfo_stream;
13333 sp->sender_all_done = 1;
13334 strm->last_msg_incomplete = 0;
13335 asoc->stream_locked = 0;
13338 SCTP_PRINTF("Huh no sp TSNH?\n");
13339 strm->last_msg_incomplete = 0;
13340 asoc->stream_locked = 0;
13342 SCTP_TCB_SEND_UNLOCK(stcb);
13343 if (uio->uio_resid == 0) {
13344 got_all_of_the_send = 1;
13347 /* We send in a 0, since we do NOT have any locks */
13348 error = sctp_msg_append(stcb, net, top, srcv, 0);
13350 if (srcv->sinfo_flags & SCTP_EOF) {
13352 * This should only happen for Panda for the mbuf
13353 * send case, which does NOT yet support EEOR mode.
13354 * Thus, we can just set this flag to do the proper
13357 got_all_of_the_send = 1;
13365 if ((srcv->sinfo_flags & SCTP_EOF) &&
13366 (got_all_of_the_send == 1)) {
13367 SCTP_STAT_INCR(sctps_sends_with_eof);
13369 if (hold_tcblock == 0) {
13370 SCTP_TCB_LOCK(stcb);
13373 if (TAILQ_EMPTY(&asoc->send_queue) &&
13374 TAILQ_EMPTY(&asoc->sent_queue) &&
13375 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
13376 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13379 /* there is nothing queued to send, so I'm done... */
13380 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13381 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13382 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13383 struct sctp_nets *netp;
13385 /* only send SHUTDOWN the first time through */
13386 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13387 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13389 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13390 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13391 sctp_stop_timers_for_shutdown(stcb);
13392 if (stcb->asoc.alternate) {
13393 netp = stcb->asoc.alternate;
13395 netp = stcb->asoc.primary_destination;
13397 sctp_send_shutdown(stcb, netp);
13398 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13400 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13401 asoc->primary_destination);
13405 * we still got (or just got) data to send, so set
13409 * XXX sockets draft says that SCTP_EOF should be
13410 * sent with no data. currently, we will allow user
13411 * data to be sent first and move to
13414 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13415 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13416 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13417 if (hold_tcblock == 0) {
13418 SCTP_TCB_LOCK(stcb);
13421 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13422 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13424 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13425 if (TAILQ_EMPTY(&asoc->send_queue) &&
13426 TAILQ_EMPTY(&asoc->sent_queue) &&
13427 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13428 struct mbuf *op_err;
13429 char msg[SCTP_DIAG_INFO_LEN];
13432 if (free_cnt_applied) {
13433 atomic_add_int(&stcb->asoc.refcnt, -1);
13434 free_cnt_applied = 0;
13436 snprintf(msg, sizeof(msg),
13437 "%s:%d at %s", __FILE__, __LINE__, __func__);
13438 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13440 sctp_abort_an_association(stcb->sctp_ep, stcb,
13441 op_err, SCTP_SO_LOCKED);
13442 /* now relock the stcb so everything
13448 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13449 asoc->primary_destination);
13450 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13455 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13456 some_on_control = 1;
13458 if (queue_only_for_init) {
13459 if (hold_tcblock == 0) {
13460 SCTP_TCB_LOCK(stcb);
13463 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13464 /* a collision took us forward? */
13467 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13468 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13472 if ((net->flight_size > net->cwnd) &&
13473 (stcb->asoc.sctp_cmt_on_off == 0)) {
13474 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13476 } else if (asoc->ifp_had_enobuf) {
13477 SCTP_STAT_INCR(sctps_ifnomemqueued);
13478 if (net->flight_size > (2 * net->mtu)) {
13481 asoc->ifp_had_enobuf = 0;
13483 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13484 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13485 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13486 (stcb->asoc.total_flight > 0) &&
13487 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13488 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13490 * Ok, Nagle is set on and we have data outstanding.
13491 * Don't send anything and let SACKs drive out the
13492 * data unless wen have a "full" segment to send.
13494 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13495 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13497 SCTP_STAT_INCR(sctps_naglequeued);
13500 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13501 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13502 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13504 SCTP_STAT_INCR(sctps_naglesent);
13507 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13508 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13509 nagle_applies, un_sent);
13510 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13511 stcb->asoc.total_flight,
13512 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13514 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13515 /* we can attempt to send too. */
13516 if (hold_tcblock == 0) {
13517 /* If there is activity recv'ing sacks no need to
13519 if (SCTP_TCB_TRYLOCK(stcb)) {
13520 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13524 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13526 } else if ((queue_only == 0) &&
13527 (stcb->asoc.peers_rwnd == 0) &&
13528 (stcb->asoc.total_flight == 0)) {
13529 /* We get to have a probe outstanding */
13530 if (hold_tcblock == 0) {
13532 SCTP_TCB_LOCK(stcb);
13534 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13535 } else if (some_on_control) {
13536 int num_out, reason, frag_point;
13538 /* Here we do control only */
13539 if (hold_tcblock == 0) {
13541 SCTP_TCB_LOCK(stcb);
13543 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13544 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13545 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13547 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13548 queue_only, stcb->asoc.peers_rwnd, un_sent,
13549 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13550 stcb->asoc.total_output_queue_size, error);
13555 if (local_soresv && stcb) {
13556 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13558 if (create_lock_applied) {
13559 SCTP_ASOC_CREATE_UNLOCK(inp);
13561 if ((stcb) && hold_tcblock) {
13562 SCTP_TCB_UNLOCK(stcb);
13564 if (stcb && free_cnt_applied) {
13565 atomic_add_int(&stcb->asoc.refcnt, -1);
13569 if (mtx_owned(&stcb->tcb_mtx)) {
13570 panic("Leaving with tcb mtx owned?");
13572 if (mtx_owned(&stcb->tcb_send_mtx)) {
13573 panic("Leaving with tcb send mtx owned?");
13581 sctp_m_freem(control);
13588 * generate an AUTHentication chunk, if required
13591 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13592 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13593 struct sctp_tcb *stcb, uint8_t chunk)
13595 struct mbuf *m_auth;
13596 struct sctp_auth_chunk *auth;
13600 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13604 if (stcb->asoc.auth_supported == 0) {
13607 /* does the requested chunk require auth? */
13608 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13611 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13612 if (m_auth == NULL) {
13616 /* reserve some space if this will be the first mbuf */
13618 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13619 /* fill in the AUTH chunk details */
13620 auth = mtod(m_auth, struct sctp_auth_chunk *);
13621 bzero(auth, sizeof(*auth));
13622 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13623 auth->ch.chunk_flags = 0;
13624 chunk_len = sizeof(*auth) +
13625 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13626 auth->ch.chunk_length = htons(chunk_len);
13627 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13628 /* key id and hmac digest will be computed and filled in upon send */
13630 /* save the offset where the auth was inserted into the chain */
13632 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13633 *offset += SCTP_BUF_LEN(cn);
13636 /* update length and return pointer to the auth chunk */
13637 SCTP_BUF_LEN(m_auth) = chunk_len;
13638 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13639 if (auth_ret != NULL)
13647 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13649 struct nd_prefix *pfx = NULL;
13650 struct nd_pfxrouter *pfxrtr = NULL;
13651 struct sockaddr_in6 gw6;
13653 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13656 /* get prefix entry of address */
13658 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13659 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13661 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13662 &src6->sin6_addr, &pfx->ndpr_mask))
13665 /* no prefix entry in the prefix list */
13668 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13669 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13672 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13673 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13675 /* search installed gateway from prefix entry */
13676 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13677 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13678 gw6.sin6_family = AF_INET6;
13679 gw6.sin6_len = sizeof(struct sockaddr_in6);
13680 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13681 sizeof(struct in6_addr));
13682 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13683 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13684 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13685 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13686 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
13688 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13693 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13699 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13702 struct sockaddr_in *sin, *mask;
13703 struct ifaddr *ifa;
13704 struct in_addr srcnetaddr, gwnetaddr;
13706 if (ro == NULL || ro->ro_rt == NULL ||
13707 sifa->address.sa.sa_family != AF_INET) {
13710 ifa = (struct ifaddr *)sifa->ifa;
13711 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13712 sin = &sifa->address.sin;
13713 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13714 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13715 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13716 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13718 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13719 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13720 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13721 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13722 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13723 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {