2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #if defined(INET) || defined(INET6)
54 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <machine/in_cksum.h>
61 #define SCTP_MAX_GAPS_INARRAY 4
63 uint8_t right_edge; /* mergable on the right edge */
64 uint8_t left_edge; /* mergable on the left edge */
67 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
70 const struct sack_track sack_array[256] = {
71 {0, 0, 0, 0, /* 0x00 */
78 {1, 0, 1, 0, /* 0x01 */
85 {0, 0, 1, 0, /* 0x02 */
92 {1, 0, 1, 0, /* 0x03 */
99 {0, 0, 1, 0, /* 0x04 */
106 {1, 0, 2, 0, /* 0x05 */
113 {0, 0, 1, 0, /* 0x06 */
120 {1, 0, 1, 0, /* 0x07 */
127 {0, 0, 1, 0, /* 0x08 */
134 {1, 0, 2, 0, /* 0x09 */
141 {0, 0, 2, 0, /* 0x0a */
148 {1, 0, 2, 0, /* 0x0b */
155 {0, 0, 1, 0, /* 0x0c */
162 {1, 0, 2, 0, /* 0x0d */
169 {0, 0, 1, 0, /* 0x0e */
176 {1, 0, 1, 0, /* 0x0f */
183 {0, 0, 1, 0, /* 0x10 */
190 {1, 0, 2, 0, /* 0x11 */
197 {0, 0, 2, 0, /* 0x12 */
204 {1, 0, 2, 0, /* 0x13 */
211 {0, 0, 2, 0, /* 0x14 */
218 {1, 0, 3, 0, /* 0x15 */
225 {0, 0, 2, 0, /* 0x16 */
232 {1, 0, 2, 0, /* 0x17 */
239 {0, 0, 1, 0, /* 0x18 */
246 {1, 0, 2, 0, /* 0x19 */
253 {0, 0, 2, 0, /* 0x1a */
260 {1, 0, 2, 0, /* 0x1b */
267 {0, 0, 1, 0, /* 0x1c */
274 {1, 0, 2, 0, /* 0x1d */
281 {0, 0, 1, 0, /* 0x1e */
288 {1, 0, 1, 0, /* 0x1f */
295 {0, 0, 1, 0, /* 0x20 */
302 {1, 0, 2, 0, /* 0x21 */
309 {0, 0, 2, 0, /* 0x22 */
316 {1, 0, 2, 0, /* 0x23 */
323 {0, 0, 2, 0, /* 0x24 */
330 {1, 0, 3, 0, /* 0x25 */
337 {0, 0, 2, 0, /* 0x26 */
344 {1, 0, 2, 0, /* 0x27 */
351 {0, 0, 2, 0, /* 0x28 */
358 {1, 0, 3, 0, /* 0x29 */
365 {0, 0, 3, 0, /* 0x2a */
372 {1, 0, 3, 0, /* 0x2b */
379 {0, 0, 2, 0, /* 0x2c */
386 {1, 0, 3, 0, /* 0x2d */
393 {0, 0, 2, 0, /* 0x2e */
400 {1, 0, 2, 0, /* 0x2f */
407 {0, 0, 1, 0, /* 0x30 */
414 {1, 0, 2, 0, /* 0x31 */
421 {0, 0, 2, 0, /* 0x32 */
428 {1, 0, 2, 0, /* 0x33 */
435 {0, 0, 2, 0, /* 0x34 */
442 {1, 0, 3, 0, /* 0x35 */
449 {0, 0, 2, 0, /* 0x36 */
456 {1, 0, 2, 0, /* 0x37 */
463 {0, 0, 1, 0, /* 0x38 */
470 {1, 0, 2, 0, /* 0x39 */
477 {0, 0, 2, 0, /* 0x3a */
484 {1, 0, 2, 0, /* 0x3b */
491 {0, 0, 1, 0, /* 0x3c */
498 {1, 0, 2, 0, /* 0x3d */
505 {0, 0, 1, 0, /* 0x3e */
512 {1, 0, 1, 0, /* 0x3f */
519 {0, 0, 1, 0, /* 0x40 */
526 {1, 0, 2, 0, /* 0x41 */
533 {0, 0, 2, 0, /* 0x42 */
540 {1, 0, 2, 0, /* 0x43 */
547 {0, 0, 2, 0, /* 0x44 */
554 {1, 0, 3, 0, /* 0x45 */
561 {0, 0, 2, 0, /* 0x46 */
568 {1, 0, 2, 0, /* 0x47 */
575 {0, 0, 2, 0, /* 0x48 */
582 {1, 0, 3, 0, /* 0x49 */
589 {0, 0, 3, 0, /* 0x4a */
596 {1, 0, 3, 0, /* 0x4b */
603 {0, 0, 2, 0, /* 0x4c */
610 {1, 0, 3, 0, /* 0x4d */
617 {0, 0, 2, 0, /* 0x4e */
624 {1, 0, 2, 0, /* 0x4f */
631 {0, 0, 2, 0, /* 0x50 */
638 {1, 0, 3, 0, /* 0x51 */
645 {0, 0, 3, 0, /* 0x52 */
652 {1, 0, 3, 0, /* 0x53 */
659 {0, 0, 3, 0, /* 0x54 */
666 {1, 0, 4, 0, /* 0x55 */
673 {0, 0, 3, 0, /* 0x56 */
680 {1, 0, 3, 0, /* 0x57 */
687 {0, 0, 2, 0, /* 0x58 */
694 {1, 0, 3, 0, /* 0x59 */
701 {0, 0, 3, 0, /* 0x5a */
708 {1, 0, 3, 0, /* 0x5b */
715 {0, 0, 2, 0, /* 0x5c */
722 {1, 0, 3, 0, /* 0x5d */
729 {0, 0, 2, 0, /* 0x5e */
736 {1, 0, 2, 0, /* 0x5f */
743 {0, 0, 1, 0, /* 0x60 */
750 {1, 0, 2, 0, /* 0x61 */
757 {0, 0, 2, 0, /* 0x62 */
764 {1, 0, 2, 0, /* 0x63 */
771 {0, 0, 2, 0, /* 0x64 */
778 {1, 0, 3, 0, /* 0x65 */
785 {0, 0, 2, 0, /* 0x66 */
792 {1, 0, 2, 0, /* 0x67 */
799 {0, 0, 2, 0, /* 0x68 */
806 {1, 0, 3, 0, /* 0x69 */
813 {0, 0, 3, 0, /* 0x6a */
820 {1, 0, 3, 0, /* 0x6b */
827 {0, 0, 2, 0, /* 0x6c */
834 {1, 0, 3, 0, /* 0x6d */
841 {0, 0, 2, 0, /* 0x6e */
848 {1, 0, 2, 0, /* 0x6f */
855 {0, 0, 1, 0, /* 0x70 */
862 {1, 0, 2, 0, /* 0x71 */
869 {0, 0, 2, 0, /* 0x72 */
876 {1, 0, 2, 0, /* 0x73 */
883 {0, 0, 2, 0, /* 0x74 */
890 {1, 0, 3, 0, /* 0x75 */
897 {0, 0, 2, 0, /* 0x76 */
904 {1, 0, 2, 0, /* 0x77 */
911 {0, 0, 1, 0, /* 0x78 */
918 {1, 0, 2, 0, /* 0x79 */
925 {0, 0, 2, 0, /* 0x7a */
932 {1, 0, 2, 0, /* 0x7b */
939 {0, 0, 1, 0, /* 0x7c */
946 {1, 0, 2, 0, /* 0x7d */
953 {0, 0, 1, 0, /* 0x7e */
960 {1, 0, 1, 0, /* 0x7f */
967 {0, 1, 1, 0, /* 0x80 */
974 {1, 1, 2, 0, /* 0x81 */
981 {0, 1, 2, 0, /* 0x82 */
988 {1, 1, 2, 0, /* 0x83 */
995 {0, 1, 2, 0, /* 0x84 */
1002 {1, 1, 3, 0, /* 0x85 */
1009 {0, 1, 2, 0, /* 0x86 */
1016 {1, 1, 2, 0, /* 0x87 */
1023 {0, 1, 2, 0, /* 0x88 */
1030 {1, 1, 3, 0, /* 0x89 */
1037 {0, 1, 3, 0, /* 0x8a */
1044 {1, 1, 3, 0, /* 0x8b */
1051 {0, 1, 2, 0, /* 0x8c */
1058 {1, 1, 3, 0, /* 0x8d */
1065 {0, 1, 2, 0, /* 0x8e */
1072 {1, 1, 2, 0, /* 0x8f */
1079 {0, 1, 2, 0, /* 0x90 */
1086 {1, 1, 3, 0, /* 0x91 */
1093 {0, 1, 3, 0, /* 0x92 */
1100 {1, 1, 3, 0, /* 0x93 */
1107 {0, 1, 3, 0, /* 0x94 */
1114 {1, 1, 4, 0, /* 0x95 */
1121 {0, 1, 3, 0, /* 0x96 */
1128 {1, 1, 3, 0, /* 0x97 */
1135 {0, 1, 2, 0, /* 0x98 */
1142 {1, 1, 3, 0, /* 0x99 */
1149 {0, 1, 3, 0, /* 0x9a */
1156 {1, 1, 3, 0, /* 0x9b */
1163 {0, 1, 2, 0, /* 0x9c */
1170 {1, 1, 3, 0, /* 0x9d */
1177 {0, 1, 2, 0, /* 0x9e */
1184 {1, 1, 2, 0, /* 0x9f */
1191 {0, 1, 2, 0, /* 0xa0 */
1198 {1, 1, 3, 0, /* 0xa1 */
1205 {0, 1, 3, 0, /* 0xa2 */
1212 {1, 1, 3, 0, /* 0xa3 */
1219 {0, 1, 3, 0, /* 0xa4 */
1226 {1, 1, 4, 0, /* 0xa5 */
1233 {0, 1, 3, 0, /* 0xa6 */
1240 {1, 1, 3, 0, /* 0xa7 */
1247 {0, 1, 3, 0, /* 0xa8 */
1254 {1, 1, 4, 0, /* 0xa9 */
1261 {0, 1, 4, 0, /* 0xaa */
1268 {1, 1, 4, 0, /* 0xab */
1275 {0, 1, 3, 0, /* 0xac */
1282 {1, 1, 4, 0, /* 0xad */
1289 {0, 1, 3, 0, /* 0xae */
1296 {1, 1, 3, 0, /* 0xaf */
1303 {0, 1, 2, 0, /* 0xb0 */
1310 {1, 1, 3, 0, /* 0xb1 */
1317 {0, 1, 3, 0, /* 0xb2 */
1324 {1, 1, 3, 0, /* 0xb3 */
1331 {0, 1, 3, 0, /* 0xb4 */
1338 {1, 1, 4, 0, /* 0xb5 */
1345 {0, 1, 3, 0, /* 0xb6 */
1352 {1, 1, 3, 0, /* 0xb7 */
1359 {0, 1, 2, 0, /* 0xb8 */
1366 {1, 1, 3, 0, /* 0xb9 */
1373 {0, 1, 3, 0, /* 0xba */
1380 {1, 1, 3, 0, /* 0xbb */
1387 {0, 1, 2, 0, /* 0xbc */
1394 {1, 1, 3, 0, /* 0xbd */
1401 {0, 1, 2, 0, /* 0xbe */
1408 {1, 1, 2, 0, /* 0xbf */
1415 {0, 1, 1, 0, /* 0xc0 */
1422 {1, 1, 2, 0, /* 0xc1 */
1429 {0, 1, 2, 0, /* 0xc2 */
1436 {1, 1, 2, 0, /* 0xc3 */
1443 {0, 1, 2, 0, /* 0xc4 */
1450 {1, 1, 3, 0, /* 0xc5 */
1457 {0, 1, 2, 0, /* 0xc6 */
1464 {1, 1, 2, 0, /* 0xc7 */
1471 {0, 1, 2, 0, /* 0xc8 */
1478 {1, 1, 3, 0, /* 0xc9 */
1485 {0, 1, 3, 0, /* 0xca */
1492 {1, 1, 3, 0, /* 0xcb */
1499 {0, 1, 2, 0, /* 0xcc */
1506 {1, 1, 3, 0, /* 0xcd */
1513 {0, 1, 2, 0, /* 0xce */
1520 {1, 1, 2, 0, /* 0xcf */
1527 {0, 1, 2, 0, /* 0xd0 */
1534 {1, 1, 3, 0, /* 0xd1 */
1541 {0, 1, 3, 0, /* 0xd2 */
1548 {1, 1, 3, 0, /* 0xd3 */
1555 {0, 1, 3, 0, /* 0xd4 */
1562 {1, 1, 4, 0, /* 0xd5 */
1569 {0, 1, 3, 0, /* 0xd6 */
1576 {1, 1, 3, 0, /* 0xd7 */
1583 {0, 1, 2, 0, /* 0xd8 */
1590 {1, 1, 3, 0, /* 0xd9 */
1597 {0, 1, 3, 0, /* 0xda */
1604 {1, 1, 3, 0, /* 0xdb */
1611 {0, 1, 2, 0, /* 0xdc */
1618 {1, 1, 3, 0, /* 0xdd */
1625 {0, 1, 2, 0, /* 0xde */
1632 {1, 1, 2, 0, /* 0xdf */
1639 {0, 1, 1, 0, /* 0xe0 */
1646 {1, 1, 2, 0, /* 0xe1 */
1653 {0, 1, 2, 0, /* 0xe2 */
1660 {1, 1, 2, 0, /* 0xe3 */
1667 {0, 1, 2, 0, /* 0xe4 */
1674 {1, 1, 3, 0, /* 0xe5 */
1681 {0, 1, 2, 0, /* 0xe6 */
1688 {1, 1, 2, 0, /* 0xe7 */
1695 {0, 1, 2, 0, /* 0xe8 */
1702 {1, 1, 3, 0, /* 0xe9 */
1709 {0, 1, 3, 0, /* 0xea */
1716 {1, 1, 3, 0, /* 0xeb */
1723 {0, 1, 2, 0, /* 0xec */
1730 {1, 1, 3, 0, /* 0xed */
1737 {0, 1, 2, 0, /* 0xee */
1744 {1, 1, 2, 0, /* 0xef */
1751 {0, 1, 1, 0, /* 0xf0 */
1758 {1, 1, 2, 0, /* 0xf1 */
1765 {0, 1, 2, 0, /* 0xf2 */
1772 {1, 1, 2, 0, /* 0xf3 */
1779 {0, 1, 2, 0, /* 0xf4 */
1786 {1, 1, 3, 0, /* 0xf5 */
1793 {0, 1, 2, 0, /* 0xf6 */
1800 {1, 1, 2, 0, /* 0xf7 */
1807 {0, 1, 1, 0, /* 0xf8 */
1814 {1, 1, 2, 0, /* 0xf9 */
1821 {0, 1, 2, 0, /* 0xfa */
1828 {1, 1, 2, 0, /* 0xfb */
1835 {0, 1, 1, 0, /* 0xfc */
1842 {1, 1, 2, 0, /* 0xfd */
1849 {0, 1, 1, 0, /* 0xfe */
1856 {1, 1, 1, 0, /* 0xff */
1867 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1868 struct sctp_scoping *scope,
1871 if ((scope->loopback_scope == 0) &&
1872 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1874 * skip loopback if not in scope *
1878 switch (ifa->address.sa.sa_family) {
1881 if (scope->ipv4_addr_legal) {
1882 struct sockaddr_in *sin;
1884 sin = &ifa->address.sin;
1885 if (sin->sin_addr.s_addr == 0) {
1886 /* not in scope , unspecified */
1889 if ((scope->ipv4_local_scope == 0) &&
1890 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1891 /* private address not in scope */
1901 if (scope->ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1909 sctp_gather_internal_ifa_flags(ifa);
1911 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1914 /* ok to use deprecated addresses? */
1915 sin6 = &ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1924 if ((scope->site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1939 static struct mbuf *
1940 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
1942 #if defined(INET) || defined(INET6)
1943 struct sctp_paramhdr *parmh;
1949 switch (ifa->address.sa.sa_family) {
1952 plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
1957 plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
1963 #if defined(INET) || defined(INET6)
1964 if (M_TRAILINGSPACE(m) >= plen) {
1965 /* easy side we just drop it on the end */
1966 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1969 /* Need more space */
1971 while (SCTP_BUF_NEXT(mret) != NULL) {
1972 mret = SCTP_BUF_NEXT(mret);
1974 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1975 if (SCTP_BUF_NEXT(mret) == NULL) {
1976 /* We are hosed, can't add more addresses */
1979 mret = SCTP_BUF_NEXT(mret);
1980 parmh = mtod(mret, struct sctp_paramhdr *);
1982 /* now add the parameter */
1983 switch (ifa->address.sa.sa_family) {
1987 struct sctp_ipv4addr_param *ipv4p;
1988 struct sockaddr_in *sin;
1990 sin = &ifa->address.sin;
1991 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1992 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1993 parmh->param_length = htons(plen);
1994 ipv4p->addr = sin->sin_addr.s_addr;
1995 SCTP_BUF_LEN(mret) += plen;
2002 struct sctp_ipv6addr_param *ipv6p;
2003 struct sockaddr_in6 *sin6;
2005 sin6 = &ifa->address.sin6;
2006 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2007 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2008 parmh->param_length = htons(plen);
2009 memcpy(ipv6p->addr, &sin6->sin6_addr,
2010 sizeof(ipv6p->addr));
2011 /* clear embedded scope in the address */
2012 in6_clearscope((struct in6_addr *)ipv6p->addr);
2013 SCTP_BUF_LEN(mret) += plen;
2029 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2030 struct sctp_scoping *scope,
2031 struct mbuf *m_at, int cnt_inits_to,
2032 uint16_t * padding_len, uint16_t * chunk_len)
2034 struct sctp_vrf *vrf = NULL;
2035 int cnt, limit_out = 0, total_count;
2038 vrf_id = inp->def_vrf_id;
2039 SCTP_IPI_ADDR_RLOCK();
2040 vrf = sctp_find_vrf(vrf_id);
2042 SCTP_IPI_ADDR_RUNLOCK();
2045 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2046 struct sctp_ifa *sctp_ifap;
2047 struct sctp_ifn *sctp_ifnp;
2050 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2052 cnt = SCTP_ADDRESS_LIMIT;
2055 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2056 if ((scope->loopback_scope == 0) &&
2057 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2059 * Skip loopback devices if loopback_scope
2064 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2066 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2067 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2068 &sctp_ifap->address.sin.sin_addr) != 0)) {
2073 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2074 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2075 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2079 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2082 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2086 if (cnt > SCTP_ADDRESS_LIMIT) {
2090 if (cnt > SCTP_ADDRESS_LIMIT) {
2097 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2099 if ((scope->loopback_scope == 0) &&
2100 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2102 * Skip loopback devices if
2103 * loopback_scope not set
2107 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2109 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2110 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2111 &sctp_ifap->address.sin.sin_addr) != 0)) {
2116 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2117 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2118 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2122 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2125 if (sctp_is_address_in_scope(sctp_ifap,
2129 if ((chunk_len != NULL) &&
2130 (padding_len != NULL) &&
2131 (*padding_len > 0)) {
2132 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2133 SCTP_BUF_LEN(m_at) += *padding_len;
2134 *chunk_len += *padding_len;
2137 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2148 if (total_count > SCTP_ADDRESS_LIMIT) {
2149 /* No more addresses */
2157 struct sctp_laddr *laddr;
2160 /* First, how many ? */
2161 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2162 if (laddr->ifa == NULL) {
2165 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2167 * Address being deleted by the system, dont
2171 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2173 * Address being deleted on this ep don't
2178 if (sctp_is_address_in_scope(laddr->ifa,
2185 * To get through a NAT we only list addresses if we have
2186 * more than one. That way if you just bind a single address
2187 * we let the source of the init dictate our address.
2191 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2192 if (laddr->ifa == NULL) {
2195 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2198 if (sctp_is_address_in_scope(laddr->ifa,
2202 if ((chunk_len != NULL) &&
2203 (padding_len != NULL) &&
2204 (*padding_len > 0)) {
2205 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2206 SCTP_BUF_LEN(m_at) += *padding_len;
2207 *chunk_len += *padding_len;
2210 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2212 if (cnt >= SCTP_ADDRESS_LIMIT) {
2218 SCTP_IPI_ADDR_RUNLOCK();
2222 static struct sctp_ifa *
2223 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2224 uint8_t dest_is_loop,
2225 uint8_t dest_is_priv,
2228 uint8_t dest_is_global = 0;
2230 /* dest_is_priv is true if destination is a private address */
2231 /* dest_is_loop is true if destination is a loopback addresses */
2234 * Here we determine if its a preferred address. A preferred address
2235 * means it is the same scope or higher scope then the destination.
2236 * L = loopback, P = private, G = global
2237 * -----------------------------------------
2238 * src | dest | result
2239 * ----------------------------------------
2241 * -----------------------------------------
2242 * P | L | yes-v4 no-v6
2243 * -----------------------------------------
2244 * G | L | yes-v4 no-v6
2245 * -----------------------------------------
2247 * -----------------------------------------
2249 * -----------------------------------------
2251 * -----------------------------------------
2253 * -----------------------------------------
2255 * -----------------------------------------
2257 * -----------------------------------------
2260 if (ifa->address.sa.sa_family != fam) {
2261 /* forget mis-matched family */
2264 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2267 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2268 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2269 /* Ok the address may be ok */
2271 if (fam == AF_INET6) {
2272 /* ok to use deprecated addresses? no lets not! */
2273 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2274 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2277 if (ifa->src_is_priv && !ifa->src_is_loop) {
2279 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2283 if (ifa->src_is_glob) {
2285 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2292 * Now that we know what is what, implement or table this could in
2293 * theory be done slicker (it used to be), but this is
2294 * straightforward and easier to validate :-)
2296 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2297 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2298 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2299 dest_is_loop, dest_is_priv, dest_is_global);
2301 if ((ifa->src_is_loop) && (dest_is_priv)) {
2302 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2305 if ((ifa->src_is_glob) && (dest_is_priv)) {
2306 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2309 if ((ifa->src_is_loop) && (dest_is_global)) {
2310 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2313 if ((ifa->src_is_priv) && (dest_is_global)) {
2314 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2317 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2318 /* its a preferred address */
2322 static struct sctp_ifa *
2323 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2324 uint8_t dest_is_loop,
2325 uint8_t dest_is_priv,
2328 uint8_t dest_is_global = 0;
2331 * Here we determine if its a acceptable address. A acceptable
2332 * address means it is the same scope or higher scope but we can
2333 * allow for NAT which means its ok to have a global dest and a
2336 * L = loopback, P = private, G = global
2337 * -----------------------------------------
2338 * src | dest | result
2339 * -----------------------------------------
2341 * -----------------------------------------
2342 * P | L | yes-v4 no-v6
2343 * -----------------------------------------
2345 * -----------------------------------------
2347 * -----------------------------------------
2349 * -----------------------------------------
2350 * G | P | yes - May not work
2351 * -----------------------------------------
2353 * -----------------------------------------
2354 * P | G | yes - May not work
2355 * -----------------------------------------
2357 * -----------------------------------------
2360 if (ifa->address.sa.sa_family != fam) {
2361 /* forget non matching family */
2362 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2363 ifa->address.sa.sa_family, fam);
2366 /* Ok the address may be ok */
2367 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2368 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2369 dest_is_loop, dest_is_priv);
2370 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2374 if (fam == AF_INET6) {
2375 /* ok to use deprecated addresses? */
2376 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2379 if (ifa->src_is_priv) {
2380 /* Special case, linklocal to loop */
2387 * Now that we know what is what, implement our table. This could in
2388 * theory be done slicker (it used to be), but this is
2389 * straightforward and easier to validate :-)
2391 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2394 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2397 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2400 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2403 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2404 /* its an acceptable address */
2409 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2411 struct sctp_laddr *laddr;
2414 /* There are no restrictions, no TCB :-) */
2417 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2418 if (laddr->ifa == NULL) {
2419 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2423 if (laddr->ifa == ifa) {
2424 /* Yes it is on the list */
2433 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2435 struct sctp_laddr *laddr;
2439 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2440 if (laddr->ifa == NULL) {
2441 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2445 if ((laddr->ifa == ifa) && laddr->action == 0)
2454 static struct sctp_ifa *
2455 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2458 int non_asoc_addr_ok,
2459 uint8_t dest_is_priv,
2460 uint8_t dest_is_loop,
2463 struct sctp_laddr *laddr, *starting_point;
2466 struct sctp_ifn *sctp_ifn;
2467 struct sctp_ifa *sctp_ifa, *sifa;
2468 struct sctp_vrf *vrf;
2471 vrf = sctp_find_vrf(vrf_id);
2475 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2476 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2477 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2479 * first question, is the ifn we will emit on in our list, if so, we
2480 * want such an address. Note that we first looked for a preferred
2484 /* is a preferred one on the interface we route out? */
2485 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2487 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2488 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2489 &sctp_ifa->address.sin.sin_addr) != 0)) {
2494 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2495 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2496 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2500 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2501 (non_asoc_addr_ok == 0))
2503 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2508 if (sctp_is_addr_in_ep(inp, sifa)) {
2509 atomic_add_int(&sifa->refcount, 1);
2515 * ok, now we now need to find one on the list of the addresses. We
2516 * can't get one on the emitting interface so let's find first a
2517 * preferred one. If not that an acceptable one otherwise... we
2520 starting_point = inp->next_addr_touse;
2522 if (inp->next_addr_touse == NULL) {
2523 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2526 for (laddr = inp->next_addr_touse; laddr;
2527 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2528 if (laddr->ifa == NULL) {
2529 /* address has been removed */
2532 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2533 /* address is being deleted */
2536 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2540 atomic_add_int(&sifa->refcount, 1);
2543 if (resettotop == 0) {
2544 inp->next_addr_touse = NULL;
2547 inp->next_addr_touse = starting_point;
2550 if (inp->next_addr_touse == NULL) {
2551 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2554 /* ok, what about an acceptable address in the inp */
2555 for (laddr = inp->next_addr_touse; laddr;
2556 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2557 if (laddr->ifa == NULL) {
2558 /* address has been removed */
2561 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2562 /* address is being deleted */
2565 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2569 atomic_add_int(&sifa->refcount, 1);
2572 if (resettotop == 0) {
2573 inp->next_addr_touse = NULL;
2574 goto once_again_too;
2577 * no address bound can be a source for the destination we are in
2585 static struct sctp_ifa *
2586 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2587 struct sctp_tcb *stcb,
2590 uint8_t dest_is_priv,
2591 uint8_t dest_is_loop,
2592 int non_asoc_addr_ok,
2595 struct sctp_laddr *laddr, *starting_point;
2597 struct sctp_ifn *sctp_ifn;
2598 struct sctp_ifa *sctp_ifa, *sifa;
2599 uint8_t start_at_beginning = 0;
2600 struct sctp_vrf *vrf;
2604 * first question, is the ifn we will emit on in our list, if so, we
2607 vrf = sctp_find_vrf(vrf_id);
2611 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2612 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2613 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2616 * first question, is the ifn we will emit on in our list? If so,
2617 * we want that one. First we look for a preferred. Second, we go
2618 * for an acceptable.
2621 /* first try for a preferred address on the ep */
2622 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2624 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2625 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2626 &sctp_ifa->address.sin.sin_addr) != 0)) {
2631 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2632 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2633 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2637 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2639 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2640 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2643 if (((non_asoc_addr_ok == 0) &&
2644 (sctp_is_addr_restricted(stcb, sifa))) ||
2645 (non_asoc_addr_ok &&
2646 (sctp_is_addr_restricted(stcb, sifa)) &&
2647 (!sctp_is_addr_pending(stcb, sifa)))) {
2648 /* on the no-no list */
2651 atomic_add_int(&sifa->refcount, 1);
2655 /* next try for an acceptable address on the ep */
2656 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2658 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2659 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2660 &sctp_ifa->address.sin.sin_addr) != 0)) {
2665 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2666 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2667 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2671 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2673 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2674 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2677 if (((non_asoc_addr_ok == 0) &&
2678 (sctp_is_addr_restricted(stcb, sifa))) ||
2679 (non_asoc_addr_ok &&
2680 (sctp_is_addr_restricted(stcb, sifa)) &&
2681 (!sctp_is_addr_pending(stcb, sifa)))) {
2682 /* on the no-no list */
2685 atomic_add_int(&sifa->refcount, 1);
2692 * if we can't find one like that then we must look at all addresses
2693 * bound to pick one at first preferable then secondly acceptable.
2695 starting_point = stcb->asoc.last_used_address;
2697 if (stcb->asoc.last_used_address == NULL) {
2698 start_at_beginning = 1;
2699 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2701 /* search beginning with the last used address */
2702 for (laddr = stcb->asoc.last_used_address; laddr;
2703 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2704 if (laddr->ifa == NULL) {
2705 /* address has been removed */
2708 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2709 /* address is being deleted */
2712 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2715 if (((non_asoc_addr_ok == 0) &&
2716 (sctp_is_addr_restricted(stcb, sifa))) ||
2717 (non_asoc_addr_ok &&
2718 (sctp_is_addr_restricted(stcb, sifa)) &&
2719 (!sctp_is_addr_pending(stcb, sifa)))) {
2720 /* on the no-no list */
2723 stcb->asoc.last_used_address = laddr;
2724 atomic_add_int(&sifa->refcount, 1);
2727 if (start_at_beginning == 0) {
2728 stcb->asoc.last_used_address = NULL;
2729 goto sctp_from_the_top;
2731 /* now try for any higher scope than the destination */
2732 stcb->asoc.last_used_address = starting_point;
2733 start_at_beginning = 0;
2735 if (stcb->asoc.last_used_address == NULL) {
2736 start_at_beginning = 1;
2737 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2739 /* search beginning with the last used address */
2740 for (laddr = stcb->asoc.last_used_address; laddr;
2741 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2742 if (laddr->ifa == NULL) {
2743 /* address has been removed */
2746 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2747 /* address is being deleted */
2750 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2754 if (((non_asoc_addr_ok == 0) &&
2755 (sctp_is_addr_restricted(stcb, sifa))) ||
2756 (non_asoc_addr_ok &&
2757 (sctp_is_addr_restricted(stcb, sifa)) &&
2758 (!sctp_is_addr_pending(stcb, sifa)))) {
2759 /* on the no-no list */
2762 stcb->asoc.last_used_address = laddr;
2763 atomic_add_int(&sifa->refcount, 1);
2766 if (start_at_beginning == 0) {
2767 stcb->asoc.last_used_address = NULL;
2768 goto sctp_from_the_top2;
2773 static struct sctp_ifa *
2774 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2775 struct sctp_inpcb *inp,
2776 struct sctp_tcb *stcb,
2777 int non_asoc_addr_ok,
2778 uint8_t dest_is_loop,
2779 uint8_t dest_is_priv,
2785 struct sctp_ifa *ifa, *sifa;
2786 int num_eligible_addr = 0;
2789 struct sockaddr_in6 sin6, lsa6;
2791 if (fam == AF_INET6) {
2792 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2793 (void)sa6_recoverscope(&sin6);
2796 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2798 if ((ifa->address.sa.sa_family == AF_INET) &&
2799 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2800 &ifa->address.sin.sin_addr) != 0)) {
2805 if ((ifa->address.sa.sa_family == AF_INET6) &&
2806 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2807 &ifa->address.sin6.sin6_addr) != 0)) {
2811 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2812 (non_asoc_addr_ok == 0))
2814 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2819 if (fam == AF_INET6 &&
2821 sifa->src_is_loop && sifa->src_is_priv) {
2823 * don't allow fe80::1 to be a src on loop ::1, we
2824 * don't list it to the peer so we will get an
2829 if (fam == AF_INET6 &&
2830 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2831 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2833 * link-local <-> link-local must belong to the same
2836 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2837 (void)sa6_recoverscope(&lsa6);
2838 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2845 * Check if the IPv6 address matches to next-hop. In the
2846 * mobile case, old IPv6 address may be not deleted from the
2847 * interface. Then, the interface has previous and new
2848 * addresses. We should use one corresponding to the
2849 * next-hop. (by micchie)
2852 if (stcb && fam == AF_INET6 &&
2853 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2854 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2861 /* Avoid topologically incorrect IPv4 address */
2862 if (stcb && fam == AF_INET &&
2863 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2864 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2870 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2873 if (((non_asoc_addr_ok == 0) &&
2874 (sctp_is_addr_restricted(stcb, sifa))) ||
2875 (non_asoc_addr_ok &&
2876 (sctp_is_addr_restricted(stcb, sifa)) &&
2877 (!sctp_is_addr_pending(stcb, sifa)))) {
2879 * It is restricted for some reason..
2880 * probably not yet added.
2885 if (num_eligible_addr >= addr_wanted) {
2888 num_eligible_addr++;
2895 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2896 struct sctp_inpcb *inp,
2897 struct sctp_tcb *stcb,
2898 int non_asoc_addr_ok,
2899 uint8_t dest_is_loop,
2900 uint8_t dest_is_priv,
2903 struct sctp_ifa *ifa, *sifa;
2904 int num_eligible_addr = 0;
2906 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2908 if ((ifa->address.sa.sa_family == AF_INET) &&
2909 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2910 &ifa->address.sin.sin_addr) != 0)) {
2915 if ((ifa->address.sa.sa_family == AF_INET6) &&
2917 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2918 &ifa->address.sin6.sin6_addr) != 0)) {
2922 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2923 (non_asoc_addr_ok == 0)) {
2926 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2932 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2935 if (((non_asoc_addr_ok == 0) &&
2936 (sctp_is_addr_restricted(stcb, sifa))) ||
2937 (non_asoc_addr_ok &&
2938 (sctp_is_addr_restricted(stcb, sifa)) &&
2939 (!sctp_is_addr_pending(stcb, sifa)))) {
2941 * It is restricted for some reason..
2942 * probably not yet added.
2947 num_eligible_addr++;
2949 return (num_eligible_addr);
2952 static struct sctp_ifa *
2953 sctp_choose_boundall(struct sctp_inpcb *inp,
2954 struct sctp_tcb *stcb,
2955 struct sctp_nets *net,
2958 uint8_t dest_is_priv,
2959 uint8_t dest_is_loop,
2960 int non_asoc_addr_ok,
2963 int cur_addr_num = 0, num_preferred = 0;
2965 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2966 struct sctp_ifa *sctp_ifa, *sifa;
2968 struct sctp_vrf *vrf;
2976 * For boundall we can use any address in the association.
2977 * If non_asoc_addr_ok is set we can use any address (at least in
2978 * theory). So we look for preferred addresses first. If we find one,
2979 * we use it. Otherwise we next try to get an address on the
2980 * interface, which we should be able to do (unless non_asoc_addr_ok
2981 * is false and we are routed out that way). In these cases where we
2982 * can't use the address of the interface we go through all the
2983 * ifn's looking for an address we can use and fill that in. Punting
2984 * means we send back address 0, which will probably cause problems
2985 * actually since then IP will fill in the address of the route ifn,
2986 * which means we probably already rejected it.. i.e. here comes an
2989 vrf = sctp_find_vrf(vrf_id);
2993 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2994 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2995 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2996 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2997 if (sctp_ifn == NULL) {
2998 /* ?? We don't have this guy ?? */
2999 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
3000 goto bound_all_plan_b;
3002 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
3003 ifn_index, sctp_ifn->ifn_name);
3006 cur_addr_num = net->indx_of_eligible_next_to_use;
3008 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3013 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3014 num_preferred, sctp_ifn->ifn_name);
3015 if (num_preferred == 0) {
3017 * no eligible addresses, we must use some other interface
3018 * address if we can find one.
3020 goto bound_all_plan_b;
3023 * Ok we have num_eligible_addr set with how many we can use, this
3024 * may vary from call to call due to addresses being deprecated
3027 if (cur_addr_num >= num_preferred) {
3031 * select the nth address from the list (where cur_addr_num is the
3032 * nth) and 0 is the first one, 1 is the second one etc...
3034 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3036 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3037 dest_is_priv, cur_addr_num, fam, ro);
3039 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3041 atomic_add_int(&sctp_ifa->refcount, 1);
3043 /* save off where the next one we will want */
3044 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3049 * plan_b: Look at all interfaces and find a preferred address. If
3050 * no preferred fall through to plan_c.
3053 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3054 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3055 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3056 sctp_ifn->ifn_name);
3057 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3058 /* wrong base scope */
3059 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3062 if ((sctp_ifn == looked_at) && looked_at) {
3063 /* already looked at this guy */
3064 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3067 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3068 dest_is_loop, dest_is_priv, fam);
3069 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3070 "Found ifn:%p %d preferred source addresses\n",
3071 ifn, num_preferred);
3072 if (num_preferred == 0) {
3073 /* None on this interface. */
3074 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
3077 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3078 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3079 num_preferred, (void *)sctp_ifn, cur_addr_num);
3082 * Ok we have num_eligible_addr set with how many we can
3083 * use, this may vary from call to call due to addresses
3084 * being deprecated etc..
3086 if (cur_addr_num >= num_preferred) {
3089 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3090 dest_is_priv, cur_addr_num, fam, ro);
3094 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3095 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3097 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3098 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3099 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3100 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3102 atomic_add_int(&sifa->refcount, 1);
3106 again_with_private_addresses_allowed:
3108 /* plan_c: do we have an acceptable address on the emit interface */
3110 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3111 if (emit_ifn == NULL) {
3112 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3115 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3116 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3118 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3119 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3120 &sctp_ifa->address.sin.sin_addr) != 0)) {
3121 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3126 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3127 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3128 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3129 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3133 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3134 (non_asoc_addr_ok == 0)) {
3135 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3138 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3141 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3145 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3146 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3150 if (((non_asoc_addr_ok == 0) &&
3151 (sctp_is_addr_restricted(stcb, sifa))) ||
3152 (non_asoc_addr_ok &&
3153 (sctp_is_addr_restricted(stcb, sifa)) &&
3154 (!sctp_is_addr_pending(stcb, sifa)))) {
3156 * It is restricted for some reason..
3157 * probably not yet added.
3159 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3164 atomic_add_int(&sifa->refcount, 1);
3169 * plan_d: We are in trouble. No preferred address on the emit
3170 * interface. And not even a preferred address on all interfaces. Go
3171 * out and see if we can find an acceptable address somewhere
3172 * amongst all interfaces.
3174 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3175 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3176 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3177 /* wrong base scope */
3180 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3182 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3183 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3184 &sctp_ifa->address.sin.sin_addr) != 0)) {
3189 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3190 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3191 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3195 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3196 (non_asoc_addr_ok == 0))
3198 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3204 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3208 if (((non_asoc_addr_ok == 0) &&
3209 (sctp_is_addr_restricted(stcb, sifa))) ||
3210 (non_asoc_addr_ok &&
3211 (sctp_is_addr_restricted(stcb, sifa)) &&
3212 (!sctp_is_addr_pending(stcb, sifa)))) {
3214 * It is restricted for some
3215 * reason.. probably not yet added.
3226 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3227 stcb->asoc.scope.ipv4_local_scope = 1;
3229 goto again_with_private_addresses_allowed;
3230 } else if (retried == 1) {
3231 stcb->asoc.scope.ipv4_local_scope = 0;
3239 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3240 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3241 /* wrong base scope */
3244 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3245 struct sctp_ifa *tmp_sifa;
3248 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3249 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3250 &sctp_ifa->address.sin.sin_addr) != 0)) {
3255 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3256 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3257 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3261 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3262 (non_asoc_addr_ok == 0))
3264 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3267 if (tmp_sifa == NULL) {
3270 if (tmp_sifa == sifa) {
3274 if (sctp_is_address_in_scope(tmp_sifa,
3275 &stcb->asoc.scope, 0) == 0) {
3278 if (((non_asoc_addr_ok == 0) &&
3279 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3280 (non_asoc_addr_ok &&
3281 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3282 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3292 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3293 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3294 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3299 atomic_add_int(&sifa->refcount, 1);
3307 /* tcb may be NULL */
3309 sctp_source_address_selection(struct sctp_inpcb *inp,
3310 struct sctp_tcb *stcb,
3312 struct sctp_nets *net,
3313 int non_asoc_addr_ok, uint32_t vrf_id)
3315 struct sctp_ifa *answer;
3316 uint8_t dest_is_priv, dest_is_loop;
3320 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3324 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3330 * - Find the route if needed, cache if I can.
3331 * - Look at interface address in route, Is it in the bound list. If so we
3332 * have the best source.
3333 * - If not we must rotate amongst the addresses.
3337 * Do we need to pay attention to scope. We can have a private address
3338 * or a global address we are sourcing or sending to. So if we draw
3340 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3342 * ------------------------------------------
3343 * source * dest * result
3344 * -----------------------------------------
3345 * <a> Private * Global * NAT
3346 * -----------------------------------------
3347 * <b> Private * Private * No problem
3348 * -----------------------------------------
3349 * <c> Global * Private * Huh, How will this work?
3350 * -----------------------------------------
3351 * <d> Global * Global * No Problem
3352 *------------------------------------------
3353 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3355 *------------------------------------------
3356 * source * dest * result
3357 * -----------------------------------------
3358 * <a> Linklocal * Global *
3359 * -----------------------------------------
3360 * <b> Linklocal * Linklocal * No problem
3361 * -----------------------------------------
3362 * <c> Global * Linklocal * Huh, How will this work?
3363 * -----------------------------------------
3364 * <d> Global * Global * No Problem
3365 *------------------------------------------
3366 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3368 * And then we add to that what happens if there are multiple addresses
3369 * assigned to an interface. Remember the ifa on a ifn is a linked
3370 * list of addresses. So one interface can have more than one IP
3371 * address. What happens if we have both a private and a global
3372 * address? Do we then use context of destination to sort out which
3373 * one is best? And what about NAT's sending P->G may get you a NAT
3374 * translation, or should you select the G thats on the interface in
3379 * - count the number of addresses on the interface.
3380 * - if it is one, no problem except case <c>.
3381 * For <a> we will assume a NAT out there.
3382 * - if there are more than one, then we need to worry about scope P
3383 * or G. We should prefer G -> G and P -> P if possible.
3384 * Then as a secondary fall back to mixed types G->P being a last
3386 * - The above all works for bound all, but bound specific we need to
3387 * use the same concept but instead only consider the bound
3388 * addresses. If the bound set is NOT assigned to the interface then
3389 * we must use rotation amongst the bound addresses..
3391 if (ro->ro_rt == NULL) {
3393 * Need a route to cache.
3395 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3397 if (ro->ro_rt == NULL) {
3400 fam = ro->ro_dst.sa_family;
3401 dest_is_priv = dest_is_loop = 0;
3402 /* Setup our scopes for the destination */
3406 /* Scope based on outbound address */
3407 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3410 /* mark it as local */
3411 net->addr_is_local = 1;
3413 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3420 /* Scope based on outbound address */
3421 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3422 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3424 * If the address is a loopback address, which
3425 * consists of "::1" OR "fe80::1%lo0", we are
3426 * loopback scope. But we don't use dest_is_priv
3427 * (link local addresses).
3431 /* mark it as local */
3432 net->addr_is_local = 1;
3434 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3440 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3441 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3442 SCTP_IPI_ADDR_RLOCK();
3443 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3447 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3448 dest_is_priv, dest_is_loop,
3449 non_asoc_addr_ok, fam);
3450 SCTP_IPI_ADDR_RUNLOCK();
3457 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3458 vrf_id, dest_is_priv,
3460 non_asoc_addr_ok, fam);
3462 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3467 SCTP_IPI_ADDR_RUNLOCK();
3472 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3475 int tlen, at, found;
3476 struct sctp_sndinfo sndinfo;
3477 struct sctp_prinfo prinfo;
3478 struct sctp_authinfo authinfo;
3480 tlen = SCTP_BUF_LEN(control);
3484 * Independent of how many mbufs, find the c_type inside the control
3485 * structure and copy out the data.
3488 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3489 /* There is not enough room for one more. */
3492 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3493 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3494 /* We dont't have a complete CMSG header. */
3497 if (((int)cmh.cmsg_len + at) > tlen) {
3498 /* We don't have the complete CMSG. */
3501 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3502 ((c_type == cmh.cmsg_type) ||
3503 ((c_type == SCTP_SNDRCV) &&
3504 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3505 (cmh.cmsg_type == SCTP_PRINFO) ||
3506 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3507 if (c_type == cmh.cmsg_type) {
3508 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3511 /* It is exactly what we want. Copy it out. */
3512 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), (int)cpsize, (caddr_t)data);
3515 struct sctp_sndrcvinfo *sndrcvinfo;
3517 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3519 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3522 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3524 switch (cmh.cmsg_type) {
3526 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3529 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3530 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3531 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3532 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3533 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3534 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3537 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3540 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3541 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3542 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3544 sndrcvinfo->sinfo_timetolive = 0;
3546 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3549 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3552 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3553 sndrcvinfo->sinfo_keynumber_valid = 1;
3554 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3562 at += CMSG_ALIGN(cmh.cmsg_len);
3568 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3572 struct sctp_initmsg initmsg;
3575 struct sockaddr_in sin;
3579 struct sockaddr_in6 sin6;
3583 tlen = SCTP_BUF_LEN(control);
3586 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3587 /* There is not enough room for one more. */
3591 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3592 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3593 /* We dont't have a complete CMSG header. */
3597 if (((int)cmh.cmsg_len + at) > tlen) {
3598 /* We don't have the complete CMSG. */
3602 if (cmh.cmsg_level == IPPROTO_SCTP) {
3603 switch (cmh.cmsg_type) {
3605 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3609 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3610 if (initmsg.sinit_max_attempts)
3611 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3612 if (initmsg.sinit_num_ostreams)
3613 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3614 if (initmsg.sinit_max_instreams)
3615 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3616 if (initmsg.sinit_max_init_timeo)
3617 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3618 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3619 struct sctp_stream_out *tmp_str;
3622 #if defined(SCTP_DETAILED_STR_STATS)
3627 /* Default is NOT correct */
3628 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3629 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3630 SCTP_TCB_UNLOCK(stcb);
3631 SCTP_MALLOC(tmp_str,
3632 struct sctp_stream_out *,
3633 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3635 SCTP_TCB_LOCK(stcb);
3636 if (tmp_str != NULL) {
3637 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3638 stcb->asoc.strmout = tmp_str;
3639 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3641 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3643 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3644 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3645 stcb->asoc.strmout[i].chunks_on_queues = 0;
3646 stcb->asoc.strmout[i].next_sequence_send = 0;
3647 #if defined(SCTP_DETAILED_STR_STATS)
3648 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3649 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3650 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3653 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3654 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3656 stcb->asoc.strmout[i].stream_no = i;
3657 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3658 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3659 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3664 case SCTP_DSTADDRV4:
3665 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3669 memset(&sin, 0, sizeof(struct sockaddr_in));
3670 sin.sin_family = AF_INET;
3671 sin.sin_len = sizeof(struct sockaddr_in);
3672 sin.sin_port = stcb->rport;
3673 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3674 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3675 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3676 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3680 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3681 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3688 case SCTP_DSTADDRV6:
3689 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3693 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3694 sin6.sin6_family = AF_INET6;
3695 sin6.sin6_len = sizeof(struct sockaddr_in6);
3696 sin6.sin6_port = stcb->rport;
3697 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3698 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3699 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3704 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3705 in6_sin6_2_sin(&sin, &sin6);
3706 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3707 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3708 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3712 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3713 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3719 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3720 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3730 at += CMSG_ALIGN(cmh.cmsg_len);
3735 static struct sctp_tcb *
3736 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3738 struct mbuf *control,
3739 struct sctp_nets **net_p,
3744 struct sctp_tcb *stcb;
3745 struct sockaddr *addr;
3748 struct sockaddr_in sin;
3752 struct sockaddr_in6 sin6;
3756 tlen = SCTP_BUF_LEN(control);
3759 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3760 /* There is not enough room for one more. */
3764 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3765 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3766 /* We dont't have a complete CMSG header. */
3770 if (((int)cmh.cmsg_len + at) > tlen) {
3771 /* We don't have the complete CMSG. */
3775 if (cmh.cmsg_level == IPPROTO_SCTP) {
3776 switch (cmh.cmsg_type) {
3778 case SCTP_DSTADDRV4:
3779 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3783 memset(&sin, 0, sizeof(struct sockaddr_in));
3784 sin.sin_family = AF_INET;
3785 sin.sin_len = sizeof(struct sockaddr_in);
3786 sin.sin_port = port;
3787 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3788 addr = (struct sockaddr *)&sin;
3792 case SCTP_DSTADDRV6:
3793 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3797 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3798 sin6.sin6_family = AF_INET6;
3799 sin6.sin6_len = sizeof(struct sockaddr_in6);
3800 sin6.sin6_port = port;
3801 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3803 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3804 in6_sin6_2_sin(&sin, &sin6);
3805 addr = (struct sockaddr *)&sin;
3808 addr = (struct sockaddr *)&sin6;
3816 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3822 at += CMSG_ALIGN(cmh.cmsg_len);
3827 static struct mbuf *
3828 sctp_add_cookie(struct mbuf *init, int init_offset,
3829 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3831 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3832 struct sctp_state_cookie *stc;
3833 struct sctp_paramhdr *ph;
3838 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3839 sizeof(struct sctp_paramhdr)), 0,
3840 M_NOWAIT, 1, MT_DATA);
3844 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3845 if (copy_init == NULL) {
3849 #ifdef SCTP_MBUF_LOGGING
3850 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3851 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3854 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3856 if (copy_initack == NULL) {
3858 sctp_m_freem(copy_init);
3861 #ifdef SCTP_MBUF_LOGGING
3862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3863 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3866 /* easy side we just drop it on the end */
3867 ph = mtod(mret, struct sctp_paramhdr *);
3868 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3869 sizeof(struct sctp_paramhdr);
3870 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3871 sizeof(struct sctp_paramhdr));
3872 ph->param_type = htons(SCTP_STATE_COOKIE);
3873 ph->param_length = 0; /* fill in at the end */
3874 /* Fill in the stc cookie data */
3875 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3877 /* tack the INIT and then the INIT-ACK onto the chain */
3879 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3880 cookie_sz += SCTP_BUF_LEN(m_at);
3881 if (SCTP_BUF_NEXT(m_at) == NULL) {
3882 SCTP_BUF_NEXT(m_at) = copy_init;
3886 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3887 cookie_sz += SCTP_BUF_LEN(m_at);
3888 if (SCTP_BUF_NEXT(m_at) == NULL) {
3889 SCTP_BUF_NEXT(m_at) = copy_initack;
3893 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3894 cookie_sz += SCTP_BUF_LEN(m_at);
3895 if (SCTP_BUF_NEXT(m_at) == NULL) {
3899 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3901 /* no space, so free the entire chain */
3905 SCTP_BUF_LEN(sig) = 0;
3906 SCTP_BUF_NEXT(m_at) = sig;
3908 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3909 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3911 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3912 cookie_sz += SCTP_SIGNATURE_SIZE;
3913 ph->param_length = htons(cookie_sz);
3919 sctp_get_ect(struct sctp_tcb *stcb)
3921 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3922 return (SCTP_ECT0_BIT);
3928 #if defined(INET) || defined(INET6)
3930 sctp_handle_no_route(struct sctp_tcb *stcb,
3931 struct sctp_nets *net,
3934 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3937 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3938 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3939 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3940 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3941 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3942 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3946 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3947 net->dest_state &= ~SCTP_ADDR_PF;
3951 if (net == stcb->asoc.primary_destination) {
3952 /* need a new primary */
3953 struct sctp_nets *alt;
3955 alt = sctp_find_alternate_net(stcb, net, 0);
3957 if (stcb->asoc.alternate) {
3958 sctp_free_remote_addr(stcb->asoc.alternate);
3960 stcb->asoc.alternate = alt;
3961 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3962 if (net->ro._s_addr) {
3963 sctp_free_ifa(net->ro._s_addr);
3964 net->ro._s_addr = NULL;
3966 net->src_addr_selected = 0;
3976 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3977 struct sctp_tcb *stcb, /* may be NULL */
3978 struct sctp_nets *net,
3979 struct sockaddr *to,
3981 uint32_t auth_offset,
3982 struct sctp_auth_chunk *auth,
3983 uint16_t auth_keyid,
3984 int nofragment_flag,
3991 union sctp_sockstore *over_addr,
3992 uint8_t mflowtype, uint32_t mflowid,
3993 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3994 int so_locked SCTP_UNUSED
3999 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4002 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4003 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4004 * - fill in the HMAC digest of any AUTH chunk in the packet.
4005 * - calculate and fill in the SCTP checksum.
4006 * - prepend an IP address header.
4007 * - if boundall use INADDR_ANY.
4008 * - if boundspecific do source address selection.
4009 * - set fragmentation option for ipV4.
4010 * - On return from IP output, check/adjust mtu size of output
4011 * interface and smallest_mtu size as well.
4013 /* Will need ifdefs around this */
4015 struct sctphdr *sctphdr;
4019 #if defined(INET) || defined(INET6)
4023 #if defined(INET) || defined(INET6)
4025 sctp_route_t *ro = NULL;
4026 struct udphdr *udp = NULL;
4031 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4032 struct socket *so = NULL;
4036 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4037 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4041 #if defined(INET) || defined(INET6)
4043 vrf_id = stcb->asoc.vrf_id;
4045 vrf_id = inp->def_vrf_id;
4048 /* fill in the HMAC digest for any AUTH chunk in the packet */
4049 if ((auth != NULL) && (stcb != NULL)) {
4050 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4053 tos_value = net->dscp;
4055 tos_value = stcb->asoc.default_dscp;
4057 tos_value = inp->sctp_ep.default_dscp;
4060 switch (to->sa_family) {
4064 struct ip *ip = NULL;
4065 sctp_route_t iproute;
4068 len = SCTP_MIN_V4_OVERHEAD;
4070 len += sizeof(struct udphdr);
4072 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4075 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4078 SCTP_ALIGN_TO_END(newm, len);
4079 SCTP_BUF_LEN(newm) = len;
4080 SCTP_BUF_NEXT(newm) = m;
4083 m->m_pkthdr.flowid = net->flowid;
4084 M_HASHTYPE_SET(m, net->flowtype);
4086 m->m_pkthdr.flowid = mflowid;
4087 M_HASHTYPE_SET(m, mflowtype);
4089 packet_length = sctp_calculate_len(m);
4090 ip = mtod(m, struct ip *);
4091 ip->ip_v = IPVERSION;
4092 ip->ip_hl = (sizeof(struct ip) >> 2);
4093 if (tos_value == 0) {
4095 * This means especially, that it is not set
4096 * at the SCTP layer. So use the value from
4099 tos_value = inp->ip_inp.inp.inp_ip_tos;
4103 tos_value |= sctp_get_ect(stcb);
4105 if ((nofragment_flag) && (port == 0)) {
4106 ip->ip_off = htons(IP_DF);
4108 ip->ip_off = htons(0);
4110 /* FreeBSD has a function for ip_id's */
4113 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4114 ip->ip_len = htons(packet_length);
4115 ip->ip_tos = tos_value;
4117 ip->ip_p = IPPROTO_UDP;
4119 ip->ip_p = IPPROTO_SCTP;
4124 memset(&iproute, 0, sizeof(iproute));
4125 memcpy(&ro->ro_dst, to, to->sa_len);
4127 ro = (sctp_route_t *) & net->ro;
4129 /* Now the address selection part */
4130 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4132 /* call the routine to select the src address */
4133 if (net && out_of_asoc_ok == 0) {
4134 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4135 sctp_free_ifa(net->ro._s_addr);
4136 net->ro._s_addr = NULL;
4137 net->src_addr_selected = 0;
4143 if (net->src_addr_selected == 0) {
4144 /* Cache the source address */
4145 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4148 net->src_addr_selected = 1;
4150 if (net->ro._s_addr == NULL) {
4151 /* No route to host */
4152 net->src_addr_selected = 0;
4153 sctp_handle_no_route(stcb, net, so_locked);
4154 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4156 return (EHOSTUNREACH);
4158 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4160 if (over_addr == NULL) {
4161 struct sctp_ifa *_lsrc;
4163 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4167 if (_lsrc == NULL) {
4168 sctp_handle_no_route(stcb, net, so_locked);
4169 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4171 return (EHOSTUNREACH);
4173 ip->ip_src = _lsrc->address.sin.sin_addr;
4174 sctp_free_ifa(_lsrc);
4176 ip->ip_src = over_addr->sin.sin_addr;
4177 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4181 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4182 sctp_handle_no_route(stcb, net, so_locked);
4183 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4185 return (EHOSTUNREACH);
4187 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4188 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4189 udp->uh_dport = port;
4190 udp->uh_ulen = htons((uint16_t) (packet_length - sizeof(struct ip)));
4192 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4196 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4198 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4201 sctphdr->src_port = src_port;
4202 sctphdr->dest_port = dest_port;
4203 sctphdr->v_tag = v_tag;
4204 sctphdr->checksum = 0;
4207 * If source address selection fails and we find no
4208 * route then the ip_output should fail as well with
4209 * a NO_ROUTE_TO_HOST type error. We probably should
4210 * catch that somewhere and abort the association
4211 * right away (assuming this is an INIT being sent).
4213 if (ro->ro_rt == NULL) {
4215 * src addr selection failed to find a route
4216 * (or valid source addr), so we can't get
4217 * there from here (yet)!
4219 sctp_handle_no_route(stcb, net, so_locked);
4220 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4222 return (EHOSTUNREACH);
4224 if (ro != &iproute) {
4225 memcpy(&iproute, ro, sizeof(*ro));
4227 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4228 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4229 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4230 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4231 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4234 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4235 /* failed to prepend data, give up */
4236 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4240 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4242 #if defined(SCTP_WITH_NO_CSUM)
4243 SCTP_STAT_INCR(sctps_sendnocrc);
4245 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4246 SCTP_STAT_INCR(sctps_sendswcrc);
4249 SCTP_ENABLE_UDP_CSUM(o_pak);
4252 #if defined(SCTP_WITH_NO_CSUM)
4253 SCTP_STAT_INCR(sctps_sendnocrc);
4255 m->m_pkthdr.csum_flags = CSUM_SCTP;
4256 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4257 SCTP_STAT_INCR(sctps_sendhwcrc);
4260 #ifdef SCTP_PACKET_LOGGING
4261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4262 sctp_packet_log(o_pak);
4264 /* send it out. table id is taken from stcb */
4265 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4266 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4267 so = SCTP_INP_SO(inp);
4268 SCTP_SOCKET_UNLOCK(so, 0);
4271 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4272 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4273 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4274 atomic_add_int(&stcb->asoc.refcnt, 1);
4275 SCTP_TCB_UNLOCK(stcb);
4276 SCTP_SOCKET_LOCK(so, 0);
4277 SCTP_TCB_LOCK(stcb);
4278 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4281 SCTP_STAT_INCR(sctps_sendpackets);
4282 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4284 SCTP_STAT_INCR(sctps_senderrors);
4286 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4288 /* free tempy routes */
4292 * PMTU check versus smallest asoc MTU goes
4295 if ((ro->ro_rt != NULL) &&
4296 (net->ro._s_addr)) {
4299 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4301 mtu -= sizeof(struct udphdr);
4303 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4304 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4307 } else if (ro->ro_rt == NULL) {
4308 /* route was freed */
4309 if (net->ro._s_addr &&
4310 net->src_addr_selected) {
4311 sctp_free_ifa(net->ro._s_addr);
4312 net->ro._s_addr = NULL;
4314 net->src_addr_selected = 0;
4323 uint32_t flowlabel, flowinfo;
4324 struct ip6_hdr *ip6h;
4325 struct route_in6 ip6route;
4327 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4329 struct sockaddr_in6 lsa6_storage;
4331 u_short prev_port = 0;
4335 flowlabel = net->flowlabel;
4337 flowlabel = stcb->asoc.default_flowlabel;
4339 flowlabel = inp->sctp_ep.default_flowlabel;
4341 if (flowlabel == 0) {
4343 * This means especially, that it is not set
4344 * at the SCTP layer. So use the value from
4347 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4349 flowlabel &= 0x000fffff;
4350 len = SCTP_MIN_OVERHEAD;
4352 len += sizeof(struct udphdr);
4354 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4357 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4360 SCTP_ALIGN_TO_END(newm, len);
4361 SCTP_BUF_LEN(newm) = len;
4362 SCTP_BUF_NEXT(newm) = m;
4365 m->m_pkthdr.flowid = net->flowid;
4366 M_HASHTYPE_SET(m, net->flowtype);
4368 m->m_pkthdr.flowid = mflowid;
4369 M_HASHTYPE_SET(m, mflowtype);
4371 packet_length = sctp_calculate_len(m);
4373 ip6h = mtod(m, struct ip6_hdr *);
4374 /* protect *sin6 from overwrite */
4375 sin6 = (struct sockaddr_in6 *)to;
4379 /* KAME hack: embed scopeid */
4380 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4381 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4385 memset(&ip6route, 0, sizeof(ip6route));
4386 ro = (sctp_route_t *) & ip6route;
4387 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4389 ro = (sctp_route_t *) & net->ro;
4392 * We assume here that inp_flow is in host byte
4393 * order within the TCB!
4395 if (tos_value == 0) {
4397 * This means especially, that it is not set
4398 * at the SCTP layer. So use the value from
4401 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4405 tos_value |= sctp_get_ect(stcb);
4409 flowinfo |= tos_value;
4411 flowinfo |= flowlabel;
4412 ip6h->ip6_flow = htonl(flowinfo);
4414 ip6h->ip6_nxt = IPPROTO_UDP;
4416 ip6h->ip6_nxt = IPPROTO_SCTP;
4418 ip6h->ip6_plen = (uint16_t) (packet_length - sizeof(struct ip6_hdr));
4419 ip6h->ip6_dst = sin6->sin6_addr;
4422 * Add SRC address selection here: we can only reuse
4423 * to a limited degree the kame src-addr-sel, since
4424 * we can try their selection but it may not be
4427 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4428 lsa6_tmp.sin6_family = AF_INET6;
4429 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4431 if (net && out_of_asoc_ok == 0) {
4432 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4433 sctp_free_ifa(net->ro._s_addr);
4434 net->ro._s_addr = NULL;
4435 net->src_addr_selected = 0;
4441 if (net->src_addr_selected == 0) {
4442 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4443 /* KAME hack: embed scopeid */
4444 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4445 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4448 /* Cache the source address */
4449 net->ro._s_addr = sctp_source_address_selection(inp,
4455 (void)sa6_recoverscope(sin6);
4456 net->src_addr_selected = 1;
4458 if (net->ro._s_addr == NULL) {
4459 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4460 net->src_addr_selected = 0;
4461 sctp_handle_no_route(stcb, net, so_locked);
4462 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4464 return (EHOSTUNREACH);
4466 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4468 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4469 /* KAME hack: embed scopeid */
4470 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4471 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4474 if (over_addr == NULL) {
4475 struct sctp_ifa *_lsrc;
4477 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4481 if (_lsrc == NULL) {
4482 sctp_handle_no_route(stcb, net, so_locked);
4483 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4485 return (EHOSTUNREACH);
4487 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4488 sctp_free_ifa(_lsrc);
4490 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4491 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4493 (void)sa6_recoverscope(sin6);
4495 lsa6->sin6_port = inp->sctp_lport;
4497 if (ro->ro_rt == NULL) {
4499 * src addr selection failed to find a route
4500 * (or valid source addr), so we can't get
4503 sctp_handle_no_route(stcb, net, so_locked);
4504 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4506 return (EHOSTUNREACH);
4509 * XXX: sa6 may not have a valid sin6_scope_id in
4510 * the non-SCOPEDROUTING case.
4512 bzero(&lsa6_storage, sizeof(lsa6_storage));
4513 lsa6_storage.sin6_family = AF_INET6;
4514 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4515 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4516 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4517 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4522 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4523 lsa6_storage.sin6_port = inp->sctp_lport;
4524 lsa6 = &lsa6_storage;
4525 ip6h->ip6_src = lsa6->sin6_addr;
4528 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4529 sctp_handle_no_route(stcb, net, so_locked);
4530 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4532 return (EHOSTUNREACH);
4534 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4535 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4536 udp->uh_dport = port;
4537 udp->uh_ulen = htons((uint16_t) (packet_length - sizeof(struct ip6_hdr)));
4539 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4541 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4544 sctphdr->src_port = src_port;
4545 sctphdr->dest_port = dest_port;
4546 sctphdr->v_tag = v_tag;
4547 sctphdr->checksum = 0;
4550 * We set the hop limit now since there is a good
4551 * chance that our ro pointer is now filled
4553 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4554 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4557 /* Copy to be sure something bad is not happening */
4558 sin6->sin6_addr = ip6h->ip6_dst;
4559 lsa6->sin6_addr = ip6h->ip6_src;
4562 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4563 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4564 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4565 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4566 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4568 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4570 * preserve the port and scope for link
4573 prev_scope = sin6->sin6_scope_id;
4574 prev_port = sin6->sin6_port;
4576 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4577 /* failed to prepend data, give up */
4579 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4582 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4584 #if defined(SCTP_WITH_NO_CSUM)
4585 SCTP_STAT_INCR(sctps_sendnocrc);
4587 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4588 SCTP_STAT_INCR(sctps_sendswcrc);
4590 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4591 udp->uh_sum = 0xffff;
4594 #if defined(SCTP_WITH_NO_CSUM)
4595 SCTP_STAT_INCR(sctps_sendnocrc);
4597 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4598 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4599 SCTP_STAT_INCR(sctps_sendhwcrc);
4602 /* send it out. table id is taken from stcb */
4603 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4604 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4605 so = SCTP_INP_SO(inp);
4606 SCTP_SOCKET_UNLOCK(so, 0);
4609 #ifdef SCTP_PACKET_LOGGING
4610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4611 sctp_packet_log(o_pak);
4613 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4614 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4615 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4616 atomic_add_int(&stcb->asoc.refcnt, 1);
4617 SCTP_TCB_UNLOCK(stcb);
4618 SCTP_SOCKET_LOCK(so, 0);
4619 SCTP_TCB_LOCK(stcb);
4620 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4624 /* for link local this must be done */
4625 sin6->sin6_scope_id = prev_scope;
4626 sin6->sin6_port = prev_port;
4628 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4629 SCTP_STAT_INCR(sctps_sendpackets);
4630 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4632 SCTP_STAT_INCR(sctps_senderrors);
4635 /* Now if we had a temp route free it */
4639 * PMTU check versus smallest asoc MTU goes
4642 if (ro->ro_rt == NULL) {
4643 /* Route was freed */
4644 if (net->ro._s_addr &&
4645 net->src_addr_selected) {
4646 sctp_free_ifa(net->ro._s_addr);
4647 net->ro._s_addr = NULL;
4649 net->src_addr_selected = 0;
4651 if ((ro->ro_rt != NULL) &&
4652 (net->ro._s_addr)) {
4655 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4657 (stcb->asoc.smallest_mtu > mtu)) {
4658 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4661 net->mtu -= sizeof(struct udphdr);
4665 if (ND_IFINFO(ifp)->linkmtu &&
4666 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4667 sctp_mtu_size_reset(inp,
4669 ND_IFINFO(ifp)->linkmtu);
4677 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4678 ((struct sockaddr *)to)->sa_family);
4680 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4687 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4688 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4693 struct mbuf *m, *m_last;
4694 struct sctp_nets *net;
4695 struct sctp_init_chunk *init;
4696 struct sctp_supported_addr_param *sup_addr;
4697 struct sctp_adaptation_layer_indication *ali;
4698 struct sctp_supported_chunk_types_param *pr_supported;
4699 struct sctp_paramhdr *ph;
4700 int cnt_inits_to = 0;
4702 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4704 /* INIT's always go to the primary (and usually ONLY address) */
4705 net = stcb->asoc.primary_destination;
4707 net = TAILQ_FIRST(&stcb->asoc.nets);
4712 /* we confirm any address we send an INIT to */
4713 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4714 (void)sctp_set_primary_addr(stcb, NULL, net);
4716 /* we confirm any address we send an INIT to */
4717 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4719 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4721 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4723 * special hook, if we are sending to link local it will not
4724 * show up in our private address count.
4726 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4730 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4731 /* This case should not happen */
4732 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4735 /* start the INIT timer */
4736 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4738 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4740 /* No memory, INIT timer will re-attempt. */
4741 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4744 chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
4746 /* Now lets put the chunk header in place */
4747 init = mtod(m, struct sctp_init_chunk *);
4748 /* now the chunk header */
4749 init->ch.chunk_type = SCTP_INITIATION;
4750 init->ch.chunk_flags = 0;
4751 /* fill in later from mbuf we build */
4752 init->ch.chunk_length = 0;
4753 /* place in my tag */
4754 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4755 /* set up some of the credits. */
4756 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4757 SCTP_MINIMAL_RWND));
4758 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4759 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4760 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4762 /* Adaptation layer indication parameter */
4763 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4764 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
4765 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4766 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4767 ali->ph.param_length = htons(parameter_len);
4768 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
4769 chunk_len += parameter_len;
4772 if (stcb->asoc.ecn_supported == 1) {
4773 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4774 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4775 ph->param_type = htons(SCTP_ECN_CAPABLE);
4776 ph->param_length = htons(parameter_len);
4777 chunk_len += parameter_len;
4779 /* PR-SCTP supported parameter */
4780 if (stcb->asoc.prsctp_supported == 1) {
4781 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4782 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4783 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4784 ph->param_length = htons(parameter_len);
4785 chunk_len += parameter_len;
4787 /* Add NAT friendly parameter. */
4788 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4789 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4790 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4791 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4792 ph->param_length = htons(parameter_len);
4793 chunk_len += parameter_len;
4795 /* And now tell the peer which extensions we support */
4797 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4798 if (stcb->asoc.prsctp_supported == 1) {
4799 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4800 if (stcb->asoc.idata_supported) {
4801 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4804 if (stcb->asoc.auth_supported == 1) {
4805 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4807 if (stcb->asoc.asconf_supported == 1) {
4808 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4809 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4811 if (stcb->asoc.reconfig_supported == 1) {
4812 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4814 if (stcb->asoc.idata_supported) {
4815 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4817 if (stcb->asoc.nrsack_supported == 1) {
4818 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4820 if (stcb->asoc.pktdrop_supported == 1) {
4821 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4824 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4825 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4826 pr_supported->ph.param_length = htons(parameter_len);
4827 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4828 chunk_len += parameter_len;
4830 /* add authentication parameters */
4831 if (stcb->asoc.auth_supported) {
4832 /* attach RANDOM parameter, if available */
4833 if (stcb->asoc.authinfo.random != NULL) {
4834 struct sctp_auth_random *randp;
4836 if (padding_len > 0) {
4837 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4838 chunk_len += padding_len;
4841 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4842 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4843 /* random key already contains the header */
4844 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4845 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4846 chunk_len += parameter_len;
4848 /* add HMAC_ALGO parameter */
4849 if (stcb->asoc.local_hmacs != NULL) {
4850 struct sctp_auth_hmac_algo *hmacs;
4852 if (padding_len > 0) {
4853 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4854 chunk_len += padding_len;
4857 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4858 parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
4859 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4860 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4861 hmacs->ph.param_length = htons(parameter_len);
4862 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
4863 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4864 chunk_len += parameter_len;
4866 /* add CHUNKS parameter */
4867 if (stcb->asoc.local_auth_chunks != NULL) {
4868 struct sctp_auth_chunk_list *chunks;
4870 if (padding_len > 0) {
4871 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4872 chunk_len += padding_len;
4875 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4876 parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
4877 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4878 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4879 chunks->ph.param_length = htons(parameter_len);
4880 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4881 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4882 chunk_len += parameter_len;
4885 /* now any cookie time extensions */
4886 if (stcb->asoc.cookie_preserve_req) {
4887 struct sctp_cookie_perserve_param *cookie_preserve;
4889 if (padding_len > 0) {
4890 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4891 chunk_len += padding_len;
4894 parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
4895 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4896 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4897 cookie_preserve->ph.param_length = htons(parameter_len);
4898 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4899 stcb->asoc.cookie_preserve_req = 0;
4900 chunk_len += parameter_len;
4902 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4905 if (padding_len > 0) {
4906 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4907 chunk_len += padding_len;
4910 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4911 if (stcb->asoc.scope.ipv4_addr_legal) {
4912 parameter_len += (uint16_t) sizeof(uint16_t);
4914 if (stcb->asoc.scope.ipv6_addr_legal) {
4915 parameter_len += (uint16_t) sizeof(uint16_t);
4917 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4918 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4919 sup_addr->ph.param_length = htons(parameter_len);
4921 if (stcb->asoc.scope.ipv4_addr_legal) {
4922 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4924 if (stcb->asoc.scope.ipv6_addr_legal) {
4925 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4927 padding_len = 4 - 2 * i;
4928 chunk_len += parameter_len;
4930 SCTP_BUF_LEN(m) = chunk_len;
4931 /* now the addresses */
4933 * To optimize this we could put the scoping stuff into a structure
4934 * and remove the individual uint8's from the assoc structure. Then
4935 * we could just sifa in the address within the stcb. But for now
4936 * this is a quick hack to get the address stuff teased apart.
4938 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4940 &padding_len, &chunk_len);
4942 init->ch.chunk_length = htons(chunk_len);
4943 if (padding_len > 0) {
4944 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4949 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4950 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4951 (struct sockaddr *)&net->ro._l_addr,
4952 m, 0, NULL, 0, 0, 0, 0,
4953 inp->sctp_lport, stcb->rport, htonl(0),
4957 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4958 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4959 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4963 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4964 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4967 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4968 * being equal to the beginning of the params i.e. (iphlen +
4969 * sizeof(struct sctp_init_msg) parse through the parameters to the
4970 * end of the mbuf verifying that all parameters are known.
4972 * For unknown parameters build and return a mbuf with
4973 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4974 * processing this chunk stop, and set *abort_processing to 1.
4976 * By having param_offset be pre-set to where parameters begin it is
4977 * hoped that this routine may be reused in the future by new
4980 struct sctp_paramhdr *phdr, params;
4982 struct mbuf *mat, *op_err;
4983 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4984 int at, limit, pad_needed;
4985 uint16_t ptype, plen, padded_size;
4988 *abort_processing = 0;
4991 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4994 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4995 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4996 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4997 ptype = ntohs(phdr->param_type);
4998 plen = ntohs(phdr->param_length);
4999 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5000 /* wacked parameter */
5001 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5004 limit -= SCTP_SIZE32(plen);
5006 * All parameters for all chunks that we know/understand are
5007 * listed here. We process them other places and make
5008 * appropriate stop actions per the upper bits. However this
5009 * is the generic routine processor's can call to get back
5010 * an operr.. to either incorporate (init-ack) or send.
5012 padded_size = SCTP_SIZE32(plen);
5014 /* Param's with variable size */
5015 case SCTP_HEARTBEAT_INFO:
5016 case SCTP_STATE_COOKIE:
5017 case SCTP_UNRECOG_PARAM:
5018 case SCTP_ERROR_CAUSE_IND:
5022 /* Param's with variable size within a range */
5023 case SCTP_CHUNK_LIST:
5024 case SCTP_SUPPORTED_CHUNK_EXT:
5025 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5026 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5031 case SCTP_SUPPORTED_ADDRTYPE:
5032 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5033 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5039 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5040 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5045 case SCTP_SET_PRIM_ADDR:
5046 case SCTP_DEL_IP_ADDRESS:
5047 case SCTP_ADD_IP_ADDRESS:
5048 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5049 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5050 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5055 /* Param's with a fixed size */
5056 case SCTP_IPV4_ADDRESS:
5057 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5058 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5063 case SCTP_IPV6_ADDRESS:
5064 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5065 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5070 case SCTP_COOKIE_PRESERVE:
5071 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5072 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5077 case SCTP_HAS_NAT_SUPPORT:
5080 case SCTP_PRSCTP_SUPPORTED:
5081 if (padded_size != sizeof(struct sctp_paramhdr)) {
5082 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5087 case SCTP_ECN_CAPABLE:
5088 if (padded_size != sizeof(struct sctp_paramhdr)) {
5089 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5094 case SCTP_ULP_ADAPTATION:
5095 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5096 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5101 case SCTP_SUCCESS_REPORT:
5102 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5103 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5108 case SCTP_HOSTNAME_ADDRESS:
5110 /* We can NOT handle HOST NAME addresses!! */
5113 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5114 *abort_processing = 1;
5115 if (op_err == NULL) {
5116 /* Ok need to try to get a mbuf */
5118 l_len = SCTP_MIN_OVERHEAD;
5120 l_len = SCTP_MIN_V4_OVERHEAD;
5122 l_len += sizeof(struct sctp_chunkhdr);
5124 l_len += sizeof(struct sctp_paramhdr);
5125 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5127 SCTP_BUF_LEN(op_err) = 0;
5129 * pre-reserve space for ip
5130 * and sctp header and
5134 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5136 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5138 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5139 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5143 /* If we have space */
5144 struct sctp_paramhdr s;
5147 uint32_t cpthis = 0;
5149 pad_needed = 4 - (err_at % 4);
5150 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5151 err_at += pad_needed;
5153 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5154 s.param_length = htons(sizeof(s) + plen);
5155 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5156 err_at += sizeof(s);
5157 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5159 sctp_m_freem(op_err);
5161 * we are out of memory but
5162 * we still need to have a
5163 * look at what to do (the
5164 * system is in trouble
5169 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5176 * we do not recognize the parameter figure out what
5179 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5180 if ((ptype & 0x4000) == 0x4000) {
5181 /* Report bit is set?? */
5182 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5183 if (op_err == NULL) {
5186 /* Ok need to try to get an mbuf */
5188 l_len = SCTP_MIN_OVERHEAD;
5190 l_len = SCTP_MIN_V4_OVERHEAD;
5192 l_len += sizeof(struct sctp_chunkhdr);
5194 l_len += sizeof(struct sctp_paramhdr);
5195 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5197 SCTP_BUF_LEN(op_err) = 0;
5199 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5201 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5203 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5204 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5208 /* If we have space */
5209 struct sctp_paramhdr s;
5212 uint32_t cpthis = 0;
5214 pad_needed = 4 - (err_at % 4);
5215 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5216 err_at += pad_needed;
5218 s.param_type = htons(SCTP_UNRECOG_PARAM);
5219 s.param_length = htons(sizeof(s) + plen);
5220 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5221 err_at += sizeof(s);
5222 if (plen > sizeof(tempbuf)) {
5223 plen = sizeof(tempbuf);
5225 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5227 sctp_m_freem(op_err);
5229 * we are out of memory but
5230 * we still need to have a
5231 * look at what to do (the
5232 * system is in trouble
5236 goto more_processing;
5238 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5243 if ((ptype & 0x8000) == 0x0000) {
5244 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5247 /* skip this chunk and continue processing */
5248 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5249 at += SCTP_SIZE32(plen);
5254 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5258 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5259 *abort_processing = 1;
5260 if ((op_err == NULL) && phdr) {
5264 l_len = SCTP_MIN_OVERHEAD;
5266 l_len = SCTP_MIN_V4_OVERHEAD;
5268 l_len += sizeof(struct sctp_chunkhdr);
5269 l_len += (2 * sizeof(struct sctp_paramhdr));
5270 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5272 SCTP_BUF_LEN(op_err) = 0;
5274 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5276 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5278 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5279 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5282 if ((op_err) && phdr) {
5283 struct sctp_paramhdr s;
5286 uint32_t cpthis = 0;
5288 pad_needed = 4 - (err_at % 4);
5289 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5290 err_at += pad_needed;
5292 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5293 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5294 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5295 err_at += sizeof(s);
5296 /* Only copy back the p-hdr that caused the issue */
5297 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5303 sctp_are_there_new_addresses(struct sctp_association *asoc,
5304 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5307 * Given a INIT packet, look through the packet to verify that there
5308 * are NO new addresses. As we go through the parameters add reports
5309 * of any un-understood parameters that require an error. Also we
5310 * must return (1) to drop the packet if we see a un-understood
5311 * parameter that tells us to drop the chunk.
5313 struct sockaddr *sa_touse;
5314 struct sockaddr *sa;
5315 struct sctp_paramhdr *phdr, params;
5316 uint16_t ptype, plen;
5318 struct sctp_nets *net;
5322 struct sockaddr_in sin4, *sa4;
5326 struct sockaddr_in6 sin6, *sa6;
5331 memset(&sin4, 0, sizeof(sin4));
5332 sin4.sin_family = AF_INET;
5333 sin4.sin_len = sizeof(sin4);
5336 memset(&sin6, 0, sizeof(sin6));
5337 sin6.sin6_family = AF_INET6;
5338 sin6.sin6_len = sizeof(sin6);
5340 /* First what about the src address of the pkt ? */
5342 switch (src->sa_family) {
5345 if (asoc->scope.ipv4_addr_legal) {
5352 if (asoc->scope.ipv6_addr_legal) {
5363 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5364 sa = (struct sockaddr *)&net->ro._l_addr;
5365 if (sa->sa_family == src->sa_family) {
5367 if (sa->sa_family == AF_INET) {
5368 struct sockaddr_in *src4;
5370 sa4 = (struct sockaddr_in *)sa;
5371 src4 = (struct sockaddr_in *)src;
5372 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5379 if (sa->sa_family == AF_INET6) {
5380 struct sockaddr_in6 *src6;
5382 sa6 = (struct sockaddr_in6 *)sa;
5383 src6 = (struct sockaddr_in6 *)src;
5384 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5393 /* New address added! no need to look futher. */
5397 /* Ok so far lets munge through the rest of the packet */
5398 offset += sizeof(struct sctp_init_chunk);
5399 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5402 ptype = ntohs(phdr->param_type);
5403 plen = ntohs(phdr->param_length);
5406 case SCTP_IPV4_ADDRESS:
5408 struct sctp_ipv4addr_param *p4, p4_buf;
5410 phdr = sctp_get_next_param(in_initpkt, offset,
5411 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5412 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5416 if (asoc->scope.ipv4_addr_legal) {
5417 p4 = (struct sctp_ipv4addr_param *)phdr;
5418 sin4.sin_addr.s_addr = p4->addr;
5419 sa_touse = (struct sockaddr *)&sin4;
5425 case SCTP_IPV6_ADDRESS:
5427 struct sctp_ipv6addr_param *p6, p6_buf;
5429 phdr = sctp_get_next_param(in_initpkt, offset,
5430 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5431 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5435 if (asoc->scope.ipv6_addr_legal) {
5436 p6 = (struct sctp_ipv6addr_param *)phdr;
5437 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5439 sa_touse = (struct sockaddr *)&sin6;
5449 /* ok, sa_touse points to one to check */
5451 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5452 sa = (struct sockaddr *)&net->ro._l_addr;
5453 if (sa->sa_family != sa_touse->sa_family) {
5457 if (sa->sa_family == AF_INET) {
5458 sa4 = (struct sockaddr_in *)sa;
5459 if (sa4->sin_addr.s_addr ==
5460 sin4.sin_addr.s_addr) {
5467 if (sa->sa_family == AF_INET6) {
5468 sa6 = (struct sockaddr_in6 *)sa;
5469 if (SCTP6_ARE_ADDR_EQUAL(
5478 /* New addr added! no need to look further */
5482 offset += SCTP_SIZE32(plen);
5483 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5489 * Given a MBUF chain that was sent into us containing an INIT. Build a
5490 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5491 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5492 * message (i.e. the struct sctp_init_msg).
5495 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5496 struct sctp_nets *src_net, struct mbuf *init_pkt,
5497 int iphlen, int offset,
5498 struct sockaddr *src, struct sockaddr *dst,
5499 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5500 uint8_t mflowtype, uint32_t mflowid,
5501 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5503 struct sctp_association *asoc;
5504 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5505 struct sctp_init_ack_chunk *initack;
5506 struct sctp_adaptation_layer_indication *ali;
5507 struct sctp_supported_chunk_types_param *pr_supported;
5508 struct sctp_paramhdr *ph;
5509 union sctp_sockstore *over_addr;
5510 struct sctp_scoping scp;
5513 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5514 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5515 struct sockaddr_in *sin;
5519 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5520 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5521 struct sockaddr_in6 *sin6;
5524 struct sockaddr *to;
5525 struct sctp_state_cookie stc;
5526 struct sctp_nets *net = NULL;
5527 uint8_t *signature = NULL;
5528 int cnt_inits_to = 0;
5529 uint16_t his_limit, i_want;
5531 int nat_friendly = 0;
5533 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5540 if ((asoc != NULL) &&
5541 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT)) {
5542 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5544 * new addresses, out of here in non-cookie-wait
5547 * Send an ABORT, without the new address error cause.
5548 * This looks no different than if no listener was
5551 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5553 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5554 mflowtype, mflowid, inp->fibnum,
5558 if (src_net != NULL && (src_net->port != port)) {
5560 * change of remote encapsulation port, out of here
5561 * in non-cookie-wait states
5563 * Send an ABORT, without an specific error cause. This
5564 * looks no different than if no listener was
5567 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5568 "Remote encapsulation port changed");
5569 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5570 mflowtype, mflowid, inp->fibnum,
5576 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5577 (offset + sizeof(struct sctp_init_chunk)),
5578 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5581 if (op_err == NULL) {
5582 char msg[SCTP_DIAG_INFO_LEN];
5584 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5585 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5588 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5589 init_chk->init.initiate_tag, op_err,
5590 mflowtype, mflowid, inp->fibnum,
5594 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5596 /* No memory, INIT timer will re-attempt. */
5598 sctp_m_freem(op_err);
5601 chunk_len = (uint16_t) sizeof(struct sctp_init_ack_chunk);
5605 * We might not overwrite the identification[] completely and on
5606 * some platforms time_entered will contain some padding. Therefore
5607 * zero out the cookie to avoid putting uninitialized memory on the
5610 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5612 /* the time I built cookie */
5613 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5615 /* populate any tie tags */
5617 /* unlock before tag selections */
5618 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5619 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5620 stc.cookie_life = asoc->cookie_life;
5621 net = asoc->primary_destination;
5623 stc.tie_tag_my_vtag = 0;
5624 stc.tie_tag_peer_vtag = 0;
5625 /* life I will award this cookie */
5626 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5629 /* copy in the ports for later check */
5630 stc.myport = sh->dest_port;
5631 stc.peerport = sh->src_port;
5634 * If we wanted to honor cookie life extentions, we would add to
5635 * stc.cookie_life. For now we should NOT honor any extension
5637 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5638 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5639 stc.ipv6_addr_legal = 1;
5640 if (SCTP_IPV6_V6ONLY(inp)) {
5641 stc.ipv4_addr_legal = 0;
5643 stc.ipv4_addr_legal = 1;
5646 stc.ipv6_addr_legal = 0;
5647 stc.ipv4_addr_legal = 1;
5652 switch (dst->sa_family) {
5656 /* lookup address */
5657 stc.address[0] = src4->sin_addr.s_addr;
5661 stc.addr_type = SCTP_IPV4_ADDRESS;
5662 /* local from address */
5663 stc.laddress[0] = dst4->sin_addr.s_addr;
5664 stc.laddress[1] = 0;
5665 stc.laddress[2] = 0;
5666 stc.laddress[3] = 0;
5667 stc.laddr_type = SCTP_IPV4_ADDRESS;
5668 /* scope_id is only for v6 */
5670 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5671 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5674 /* Must use the address in this case */
5675 if (sctp_is_address_on_local_host(src, vrf_id)) {
5676 stc.loopback_scope = 1;
5679 stc.local_scope = 0;
5687 stc.addr_type = SCTP_IPV6_ADDRESS;
5688 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5689 stc.scope_id = in6_getscope(&src6->sin6_addr);
5690 if (sctp_is_address_on_local_host(src, vrf_id)) {
5691 stc.loopback_scope = 1;
5692 stc.local_scope = 0;
5695 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5696 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5698 * If the new destination or source
5699 * is a LINK_LOCAL we must have
5700 * common both site and local scope.
5701 * Don't set local scope though
5702 * since we must depend on the
5703 * source to be added implicitly. We
5704 * cannot assure just because we
5705 * share one link that all links are
5708 stc.local_scope = 0;
5712 * we start counting for the private
5713 * address stuff at 1. since the
5714 * link local we source from won't
5715 * show up in our scoped count.
5719 * pull out the scope_id from
5722 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5723 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5725 * If the new destination or source
5726 * is SITE_LOCAL then we must have
5727 * site scope in common.
5731 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5732 stc.laddr_type = SCTP_IPV6_ADDRESS;
5742 /* set the scope per the existing tcb */
5745 struct sctp_nets *lnet;
5749 stc.loopback_scope = asoc->scope.loopback_scope;
5750 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5751 stc.site_scope = asoc->scope.site_scope;
5752 stc.local_scope = asoc->scope.local_scope;
5754 /* Why do we not consider IPv4 LL addresses? */
5755 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5756 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5757 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5759 * if we have a LL address, start
5767 /* use the net pointer */
5768 to = (struct sockaddr *)&net->ro._l_addr;
5769 switch (to->sa_family) {
5772 sin = (struct sockaddr_in *)to;
5773 stc.address[0] = sin->sin_addr.s_addr;
5777 stc.addr_type = SCTP_IPV4_ADDRESS;
5778 if (net->src_addr_selected == 0) {
5780 * strange case here, the INIT should have
5781 * did the selection.
5783 net->ro._s_addr = sctp_source_address_selection(inp,
5784 stcb, (sctp_route_t *) & net->ro,
5786 if (net->ro._s_addr == NULL)
5789 net->src_addr_selected = 1;
5792 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5793 stc.laddress[1] = 0;
5794 stc.laddress[2] = 0;
5795 stc.laddress[3] = 0;
5796 stc.laddr_type = SCTP_IPV4_ADDRESS;
5797 /* scope_id is only for v6 */
5803 sin6 = (struct sockaddr_in6 *)to;
5804 memcpy(&stc.address, &sin6->sin6_addr,
5805 sizeof(struct in6_addr));
5806 stc.addr_type = SCTP_IPV6_ADDRESS;
5807 stc.scope_id = sin6->sin6_scope_id;
5808 if (net->src_addr_selected == 0) {
5810 * strange case here, the INIT should have
5811 * done the selection.
5813 net->ro._s_addr = sctp_source_address_selection(inp,
5814 stcb, (sctp_route_t *) & net->ro,
5816 if (net->ro._s_addr == NULL)
5819 net->src_addr_selected = 1;
5821 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5822 sizeof(struct in6_addr));
5823 stc.laddr_type = SCTP_IPV6_ADDRESS;
5828 /* Now lets put the SCTP header in place */
5829 initack = mtod(m, struct sctp_init_ack_chunk *);
5830 /* Save it off for quick ref */
5831 stc.peers_vtag = init_chk->init.initiate_tag;
5833 memcpy(stc.identification, SCTP_VERSION_STRING,
5834 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5835 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5836 /* now the chunk header */
5837 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5838 initack->ch.chunk_flags = 0;
5839 /* fill in later from mbuf we build */
5840 initack->ch.chunk_length = 0;
5841 /* place in my tag */
5842 if ((asoc != NULL) &&
5843 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5844 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5845 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5846 /* re-use the v-tags and init-seq here */
5847 initack->init.initiate_tag = htonl(asoc->my_vtag);
5848 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5850 uint32_t vtag, itsn;
5852 if (hold_inp_lock) {
5853 SCTP_INP_INCR_REF(inp);
5854 SCTP_INP_RUNLOCK(inp);
5857 atomic_add_int(&asoc->refcnt, 1);
5858 SCTP_TCB_UNLOCK(stcb);
5860 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5861 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5863 * Got a duplicate vtag on some guy behind a
5864 * nat make sure we don't use it.
5868 initack->init.initiate_tag = htonl(vtag);
5869 /* get a TSN to use too */
5870 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5871 initack->init.initial_tsn = htonl(itsn);
5872 SCTP_TCB_LOCK(stcb);
5873 atomic_add_int(&asoc->refcnt, -1);
5875 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5876 initack->init.initiate_tag = htonl(vtag);
5877 /* get a TSN to use too */
5878 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5880 if (hold_inp_lock) {
5881 SCTP_INP_RLOCK(inp);
5882 SCTP_INP_DECR_REF(inp);
5885 /* save away my tag to */
5886 stc.my_vtag = initack->init.initiate_tag;
5888 /* set up some of the credits. */
5889 so = inp->sctp_socket;
5891 /* memory problem */
5895 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5897 /* set what I want */
5898 his_limit = ntohs(init_chk->init.num_inbound_streams);
5899 /* choose what I want */
5901 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5902 i_want = asoc->streamoutcnt;
5904 i_want = asoc->pre_open_streams;
5907 i_want = inp->sctp_ep.pre_open_stream_count;
5909 if (his_limit < i_want) {
5910 /* I Want more :< */
5911 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5913 /* I can have what I want :> */
5914 initack->init.num_outbound_streams = htons(i_want);
5916 /* tell him his limit. */
5917 initack->init.num_inbound_streams =
5918 htons(inp->sctp_ep.max_open_streams_intome);
5920 /* adaptation layer indication parameter */
5921 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5922 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
5923 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5924 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5925 ali->ph.param_length = htons(parameter_len);
5926 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5927 chunk_len += parameter_len;
5930 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5931 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5932 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
5933 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5934 ph->param_type = htons(SCTP_ECN_CAPABLE);
5935 ph->param_length = htons(parameter_len);
5936 chunk_len += parameter_len;
5938 /* PR-SCTP supported parameter */
5939 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5940 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5941 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
5942 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5943 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5944 ph->param_length = htons(parameter_len);
5945 chunk_len += parameter_len;
5947 /* Add NAT friendly parameter */
5949 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
5950 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5951 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5952 ph->param_length = htons(parameter_len);
5953 chunk_len += parameter_len;
5955 /* And now tell the peer which extensions we support */
5957 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5958 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5959 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5960 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5961 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5962 ((asoc == NULL) && (inp->idata_supported == 1))) {
5963 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5966 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5967 ((asoc == NULL) && (inp->auth_supported == 1))) {
5968 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5970 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5971 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5972 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5973 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5975 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5976 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5977 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5979 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5980 ((asoc == NULL) && (inp->idata_supported == 1))) {
5981 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5983 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5984 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5985 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5987 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
5988 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
5989 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5992 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5993 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5994 pr_supported->ph.param_length = htons(parameter_len);
5995 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5996 chunk_len += parameter_len;
5998 /* add authentication parameters */
5999 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6000 ((asoc == NULL) && (inp->auth_supported == 1))) {
6001 struct sctp_auth_random *randp;
6002 struct sctp_auth_hmac_algo *hmacs;
6003 struct sctp_auth_chunk_list *chunks;
6005 if (padding_len > 0) {
6006 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6007 chunk_len += padding_len;
6010 /* generate and add RANDOM parameter */
6011 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
6012 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) +
6013 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6014 randp->ph.param_type = htons(SCTP_RANDOM);
6015 randp->ph.param_length = htons(parameter_len);
6016 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6017 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6018 chunk_len += parameter_len;
6020 if (padding_len > 0) {
6021 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6022 chunk_len += padding_len;
6025 /* add HMAC_ALGO parameter */
6026 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
6027 parameter_len = (uint16_t) sizeof(struct sctp_auth_hmac_algo) +
6028 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6029 (uint8_t *) hmacs->hmac_ids);
6030 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6031 hmacs->ph.param_length = htons(parameter_len);
6032 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6033 chunk_len += parameter_len;
6035 if (padding_len > 0) {
6036 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6037 chunk_len += padding_len;
6040 /* add CHUNKS parameter */
6041 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6042 parameter_len = (uint16_t) sizeof(struct sctp_auth_chunk_list) +
6043 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6044 chunks->chunk_types);
6045 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6046 chunks->ph.param_length = htons(parameter_len);
6047 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6048 chunk_len += parameter_len;
6050 SCTP_BUF_LEN(m) = chunk_len;
6052 /* now the addresses */
6054 * To optimize this we could put the scoping stuff into a structure
6055 * and remove the individual uint8's from the stc structure. Then we
6056 * could just sifa in the address within the stc.. but for now this
6057 * is a quick hack to get the address stuff teased apart.
6059 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6060 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6061 scp.loopback_scope = stc.loopback_scope;
6062 scp.ipv4_local_scope = stc.ipv4_scope;
6063 scp.local_scope = stc.local_scope;
6064 scp.site_scope = stc.site_scope;
6065 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6067 &padding_len, &chunk_len);
6068 /* padding_len can only be positive, if no addresses have been added */
6069 if (padding_len > 0) {
6070 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6071 chunk_len += padding_len;
6072 SCTP_BUF_LEN(m) += padding_len;
6075 /* tack on the operational error if present */
6078 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6079 parameter_len += SCTP_BUF_LEN(m_tmp);
6081 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6082 SCTP_BUF_NEXT(m_last) = op_err;
6083 while (SCTP_BUF_NEXT(m_last) != NULL) {
6084 m_last = SCTP_BUF_NEXT(m_last);
6086 chunk_len += parameter_len;
6088 if (padding_len > 0) {
6089 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6090 if (m_last == NULL) {
6091 /* Houston we have a problem, no space */
6095 chunk_len += padding_len;
6098 /* Now we must build a cookie */
6099 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6100 if (m_cookie == NULL) {
6101 /* memory problem */
6105 /* Now append the cookie to the end and update the space/size */
6106 SCTP_BUF_NEXT(m_last) = m_cookie;
6108 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6109 parameter_len += SCTP_BUF_LEN(m_tmp);
6110 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6114 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6115 chunk_len += parameter_len;
6118 * Place in the size, but we don't include the last pad (if any) in
6121 initack->ch.chunk_length = htons(chunk_len);
6124 * Time to sign the cookie, we don't sign over the cookie signature
6125 * though thus we set trailer.
6127 (void)sctp_hmac_m(SCTP_HMAC,
6128 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6129 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6130 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6132 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6133 * here since the timer will drive a retranmission.
6135 if (padding_len > 0) {
6136 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6141 if (stc.loopback_scope) {
6142 over_addr = (union sctp_sockstore *)dst;
6147 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6149 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6152 SCTP_SO_NOT_LOCKED);
6153 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6158 sctp_prune_prsctp(struct sctp_tcb *stcb,
6159 struct sctp_association *asoc,
6160 struct sctp_sndrcvinfo *srcv,
6164 struct sctp_tmit_chunk *chk, *nchk;
6166 SCTP_TCB_LOCK_ASSERT(stcb);
6167 if ((asoc->prsctp_supported) &&
6168 (asoc->sent_queue_cnt_removeable > 0)) {
6169 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6171 * Look for chunks marked with the PR_SCTP flag AND
6172 * the buffer space flag. If the one being sent is
6173 * equal or greater priority then purge the old one
6174 * and free some space.
6176 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6178 * This one is PR-SCTP AND buffer space
6181 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6183 * Lower numbers equates to higher
6184 * priority so if the one we are
6185 * looking at has a larger or equal
6186 * priority we want to drop the data
6187 * and NOT retransmit it.
6191 * We release the book_size
6192 * if the mbuf is here
6197 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6201 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6204 freed_spc += ret_spc;
6205 if (freed_spc >= dataout) {
6208 } /* if chunk was present */
6209 } /* if of sufficent priority */
6210 } /* if chunk has enabled */
6211 } /* tailqforeach */
6213 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6214 /* Here we must move to the sent queue and mark */
6215 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6216 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6219 * We release the book_size
6220 * if the mbuf is here
6224 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6227 freed_spc += ret_spc;
6228 if (freed_spc >= dataout) {
6231 } /* end if chk->data */
6232 } /* end if right class */
6233 } /* end if chk pr-sctp */
6234 } /* tailqforeachsafe (chk) */
6235 } /* if enabled in asoc */
6239 sctp_get_frag_point(struct sctp_tcb *stcb,
6240 struct sctp_association *asoc)
6245 * For endpoints that have both v6 and v4 addresses we must reserve
6246 * room for the ipv6 header, for those that are only dealing with V4
6247 * we use a larger frag point.
6249 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6250 ovh = SCTP_MIN_OVERHEAD;
6252 ovh = SCTP_MIN_V4_OVERHEAD;
6254 if (stcb->asoc.idata_supported) {
6255 ovh += sizeof(struct sctp_idata_chunk);
6257 ovh += sizeof(struct sctp_data_chunk);
6259 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6260 siz = asoc->smallest_mtu - ovh;
6262 siz = (stcb->asoc.sctp_frag_point - ovh);
6264 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6266 /* A data chunk MUST fit in a cluster */
6267 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6270 /* adjust for an AUTH chunk if DATA requires auth */
6271 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6272 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6275 /* make it an even word boundary please */
6282 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6285 * We assume that the user wants PR_SCTP_TTL if the user provides a
6286 * positive lifetime but does not specify any PR_SCTP policy.
6288 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6289 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6290 } else if (sp->timetolive > 0) {
6291 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6292 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6296 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6297 case CHUNK_FLAGS_PR_SCTP_BUF:
6299 * Time to live is a priority stored in tv_sec when doing
6300 * the buffer drop thing.
6302 sp->ts.tv_sec = sp->timetolive;
6305 case CHUNK_FLAGS_PR_SCTP_TTL:
6309 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6310 tv.tv_sec = sp->timetolive / 1000;
6311 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6313 * TODO sctp_constants.h needs alternative time
6314 * macros when _KERNEL is undefined.
6316 timevaladd(&sp->ts, &tv);
6319 case CHUNK_FLAGS_PR_SCTP_RTX:
6321 * Time to live is a the number or retransmissions stored in
6324 sp->ts.tv_sec = sp->timetolive;
6328 SCTPDBG(SCTP_DEBUG_USRREQ1,
6329 "Unknown PR_SCTP policy %u.\n",
6330 PR_SCTP_POLICY(sp->sinfo_flags));
6336 sctp_msg_append(struct sctp_tcb *stcb,
6337 struct sctp_nets *net,
6339 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6343 struct sctp_stream_queue_pending *sp = NULL;
6344 struct sctp_stream_out *strm;
6347 * Given an mbuf chain, put it into the association send queue and
6348 * place it on the wheel
6350 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6351 /* Invalid stream number */
6352 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6356 if ((stcb->asoc.stream_locked) &&
6357 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6358 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6362 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6363 /* Now can we send this? */
6364 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6365 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6366 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6367 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6368 /* got data while shutting down */
6369 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6373 sctp_alloc_a_strmoq(stcb, sp);
6375 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6379 sp->sinfo_flags = srcv->sinfo_flags;
6380 sp->timetolive = srcv->sinfo_timetolive;
6381 sp->ppid = srcv->sinfo_ppid;
6382 sp->context = srcv->sinfo_context;
6384 sp->msg_id = atomic_fetchadd_int(&stcb->asoc.assoc_msg_id, 1);
6385 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6387 atomic_add_int(&sp->net->ref_count, 1);
6391 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6392 sp->stream = srcv->sinfo_stream;
6393 sp->msg_is_complete = 1;
6394 sp->sender_all_done = 1;
6397 sp->tail_mbuf = NULL;
6398 sctp_set_prsctp_policy(sp);
6400 * We could in theory (for sendall) sifa the length in, but we would
6401 * still have to hunt through the chain since we need to setup the
6405 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6406 if (SCTP_BUF_NEXT(at) == NULL)
6408 sp->length += SCTP_BUF_LEN(at);
6410 if (srcv->sinfo_keynumber_valid) {
6411 sp->auth_keyid = srcv->sinfo_keynumber;
6413 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6415 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6416 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6417 sp->holds_key_ref = 1;
6419 if (hold_stcb_lock == 0) {
6420 SCTP_TCB_SEND_LOCK(stcb);
6422 sctp_snd_sb_alloc(stcb, sp->length);
6423 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6424 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6425 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6427 if (hold_stcb_lock == 0) {
6428 SCTP_TCB_SEND_UNLOCK(stcb);
6438 static struct mbuf *
6439 sctp_copy_mbufchain(struct mbuf *clonechain,
6440 struct mbuf *outchain,
6441 struct mbuf **endofchain,
6444 uint8_t copy_by_ref)
6447 struct mbuf *appendchain;
6451 if (endofchain == NULL) {
6455 sctp_m_freem(outchain);
6458 if (can_take_mbuf) {
6459 appendchain = clonechain;
6462 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6464 /* Its not in a cluster */
6465 if (*endofchain == NULL) {
6466 /* lets get a mbuf cluster */
6467 if (outchain == NULL) {
6468 /* This is the general case */
6470 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6471 if (outchain == NULL) {
6474 SCTP_BUF_LEN(outchain) = 0;
6475 *endofchain = outchain;
6476 /* get the prepend space */
6477 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6480 * We really should not get a NULL
6486 if (SCTP_BUF_NEXT(m) == NULL) {
6490 m = SCTP_BUF_NEXT(m);
6493 if (*endofchain == NULL) {
6495 * huh, TSNH XXX maybe we
6498 sctp_m_freem(outchain);
6502 /* get the new end of length */
6503 len = (int)M_TRAILINGSPACE(*endofchain);
6505 /* how much is left at the end? */
6506 len = (int)M_TRAILINGSPACE(*endofchain);
6508 /* Find the end of the data, for appending */
6509 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6511 /* Now lets copy it out */
6512 if (len >= sizeofcpy) {
6513 /* It all fits, copy it in */
6514 m_copydata(clonechain, 0, sizeofcpy, cp);
6515 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6517 /* fill up the end of the chain */
6519 m_copydata(clonechain, 0, len, cp);
6520 SCTP_BUF_LEN((*endofchain)) += len;
6521 /* now we need another one */
6524 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6529 SCTP_BUF_NEXT((*endofchain)) = m;
6531 cp = mtod((*endofchain), caddr_t);
6532 m_copydata(clonechain, len, sizeofcpy, cp);
6533 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6537 /* copy the old fashion way */
6538 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6539 #ifdef SCTP_MBUF_LOGGING
6540 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6541 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6546 if (appendchain == NULL) {
6549 sctp_m_freem(outchain);
6553 /* tack on to the end */
6554 if (*endofchain != NULL) {
6555 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6559 if (SCTP_BUF_NEXT(m) == NULL) {
6560 SCTP_BUF_NEXT(m) = appendchain;
6563 m = SCTP_BUF_NEXT(m);
6567 * save off the end and update the end-chain postion
6571 if (SCTP_BUF_NEXT(m) == NULL) {
6575 m = SCTP_BUF_NEXT(m);
6579 /* save off the end and update the end-chain postion */
6582 if (SCTP_BUF_NEXT(m) == NULL) {
6586 m = SCTP_BUF_NEXT(m);
6588 return (appendchain);
6593 sctp_med_chunk_output(struct sctp_inpcb *inp,
6594 struct sctp_tcb *stcb,
6595 struct sctp_association *asoc,
6598 int control_only, int from_where,
6599 struct timeval *now, int *now_filled, int frag_point, int so_locked
6600 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6606 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6607 uint32_t val SCTP_UNUSED)
6609 struct sctp_copy_all *ca;
6612 int added_control = 0;
6613 int un_sent, do_chunk_output = 1;
6614 struct sctp_association *asoc;
6615 struct sctp_nets *net;
6617 ca = (struct sctp_copy_all *)ptr;
6618 if (ca->m == NULL) {
6621 if (ca->inp != inp) {
6625 if (ca->sndlen > 0) {
6626 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6628 /* can't copy so we are done */
6632 #ifdef SCTP_MBUF_LOGGING
6633 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6634 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6640 SCTP_TCB_LOCK_ASSERT(stcb);
6641 if (stcb->asoc.alternate) {
6642 net = stcb->asoc.alternate;
6644 net = stcb->asoc.primary_destination;
6646 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6647 /* Abort this assoc with m as the user defined reason */
6649 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6651 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6652 0, M_NOWAIT, 1, MT_DATA);
6653 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6656 struct sctp_paramhdr *ph;
6658 ph = mtod(m, struct sctp_paramhdr *);
6659 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6660 ph->param_length = htons((uint16_t) (sizeof(struct sctp_paramhdr) + ca->sndlen));
6663 * We add one here to keep the assoc from dis-appearing on
6666 atomic_add_int(&stcb->asoc.refcnt, 1);
6667 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6669 * sctp_abort_an_association calls sctp_free_asoc() free
6670 * association will NOT free it since we incremented the
6671 * refcnt .. we do this to prevent it being freed and things
6672 * getting tricky since we could end up (from free_asoc)
6673 * calling inpcb_free which would get a recursive lock call
6674 * to the iterator lock.. But as a consequence of that the
6675 * stcb will return to us un-locked.. since free_asoc
6676 * returns with either no TCB or the TCB unlocked, we must
6677 * relock.. to unlock in the iterator timer :-0
6679 SCTP_TCB_LOCK(stcb);
6680 atomic_add_int(&stcb->asoc.refcnt, -1);
6681 goto no_chunk_output;
6684 ret = sctp_msg_append(stcb, net, m,
6688 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6689 /* shutdown this assoc */
6692 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6694 if (TAILQ_EMPTY(&asoc->send_queue) &&
6695 TAILQ_EMPTY(&asoc->sent_queue) &&
6697 if (asoc->locked_on_sending) {
6701 * there is nothing queued to send, so I'm
6704 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6705 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6706 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6708 * only send SHUTDOWN the first time
6711 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6712 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6714 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6715 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6716 sctp_stop_timers_for_shutdown(stcb);
6717 sctp_send_shutdown(stcb, net);
6718 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6720 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6721 asoc->primary_destination);
6723 do_chunk_output = 0;
6727 * we still got (or just got) data to send,
6728 * so set SHUTDOWN_PENDING
6731 * XXX sockets draft says that SCTP_EOF
6732 * should be sent with no data. currently,
6733 * we will allow user data to be sent first
6734 * and move to SHUTDOWN-PENDING
6736 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6737 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6738 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6739 if (asoc->locked_on_sending) {
6741 * Locked to send out the
6744 struct sctp_stream_queue_pending *sp;
6746 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6748 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6749 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6752 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6753 if (TAILQ_EMPTY(&asoc->send_queue) &&
6754 TAILQ_EMPTY(&asoc->sent_queue) &&
6755 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6756 struct mbuf *op_err;
6757 char msg[SCTP_DIAG_INFO_LEN];
6760 snprintf(msg, sizeof(msg),
6761 "%s:%d at %s", __FILE__, __LINE__, __func__);
6762 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6764 atomic_add_int(&stcb->asoc.refcnt, 1);
6765 sctp_abort_an_association(stcb->sctp_ep, stcb,
6766 op_err, SCTP_SO_NOT_LOCKED);
6767 atomic_add_int(&stcb->asoc.refcnt, -1);
6768 goto no_chunk_output;
6770 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6771 asoc->primary_destination);
6777 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6778 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6780 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6781 (stcb->asoc.total_flight > 0) &&
6782 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6783 do_chunk_output = 0;
6785 if (do_chunk_output)
6786 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6787 else if (added_control) {
6788 int num_out, reason, now_filled = 0;
6792 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6793 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6794 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6805 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6807 struct sctp_copy_all *ca;
6809 ca = (struct sctp_copy_all *)ptr;
6811 * Do a notify here? Kacheong suggests that the notify be done at
6812 * the send time.. so you would push up a notification if any send
6813 * failed. Don't know if this is feasable since the only failures we
6814 * have is "memory" related and if you cannot get an mbuf to send
6815 * the data you surely can't get an mbuf to send up to notify the
6816 * user you can't send the data :->
6819 /* now free everything */
6820 sctp_m_freem(ca->m);
6821 SCTP_FREE(ca, SCTP_M_COPYAL);
6824 static struct mbuf *
6825 sctp_copy_out_all(struct uio *uio, int len)
6827 struct mbuf *ret, *at;
6828 int left, willcpy, cancpy, error;
6830 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6836 SCTP_BUF_LEN(ret) = 0;
6837 /* save space for the data chunk header */
6838 cancpy = (int)M_TRAILINGSPACE(ret);
6839 willcpy = min(cancpy, left);
6842 /* Align data to the end */
6843 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6849 SCTP_BUF_LEN(at) = willcpy;
6850 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6853 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
6854 if (SCTP_BUF_NEXT(at) == NULL) {
6857 at = SCTP_BUF_NEXT(at);
6858 SCTP_BUF_LEN(at) = 0;
6859 cancpy = (int)M_TRAILINGSPACE(at);
6860 willcpy = min(cancpy, left);
6867 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6868 struct sctp_sndrcvinfo *srcv)
6871 struct sctp_copy_all *ca;
6873 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6877 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6880 memset(ca, 0, sizeof(struct sctp_copy_all));
6884 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6887 * take off the sendall flag, it would be bad if we failed to do
6890 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6891 /* get length and mbuf chain */
6893 ca->sndlen = (int)uio->uio_resid;
6894 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6895 if (ca->m == NULL) {
6896 SCTP_FREE(ca, SCTP_M_COPYAL);
6897 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6901 /* Gather the length of the send */
6905 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6906 ca->sndlen += SCTP_BUF_LEN(mat);
6909 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6910 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6911 SCTP_ASOC_ANY_STATE,
6913 sctp_sendall_completes, inp, 1);
6915 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6916 SCTP_FREE(ca, SCTP_M_COPYAL);
6917 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6925 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6927 struct sctp_tmit_chunk *chk, *nchk;
6929 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6930 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6931 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6933 sctp_m_freem(chk->data);
6936 asoc->ctrl_queue_cnt--;
6937 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6943 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6945 struct sctp_association *asoc;
6946 struct sctp_tmit_chunk *chk, *nchk;
6947 struct sctp_asconf_chunk *acp;
6950 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6951 /* find SCTP_ASCONF chunk in queue */
6952 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6954 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6955 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6960 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6962 sctp_m_freem(chk->data);
6965 asoc->ctrl_queue_cnt--;
6966 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6973 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6974 struct sctp_association *asoc,
6975 struct sctp_tmit_chunk **data_list,
6977 struct sctp_nets *net)
6980 struct sctp_tmit_chunk *tp1;
6982 for (i = 0; i < bundle_at; i++) {
6983 /* off of the send queue */
6984 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6985 asoc->send_queue_cnt--;
6988 * Any chunk NOT 0 you zap the time chunk 0 gets
6989 * zapped or set based on if a RTO measurment is
6992 data_list[i]->do_rtt = 0;
6995 data_list[i]->sent_rcv_time = net->last_sent_time;
6996 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6997 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6998 if (data_list[i]->whoTo == NULL) {
6999 data_list[i]->whoTo = net;
7000 atomic_add_int(&net->ref_count, 1);
7002 /* on to the sent queue */
7003 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7004 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7005 struct sctp_tmit_chunk *tpp;
7007 /* need to move back */
7009 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7011 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7015 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
7018 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7020 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7025 /* This does not lower until the cum-ack passes it */
7026 asoc->sent_queue_cnt++;
7027 if ((asoc->peers_rwnd <= 0) &&
7028 (asoc->total_flight == 0) &&
7030 /* Mark the chunk as being a window probe */
7031 SCTP_STAT_INCR(sctps_windowprobed);
7033 #ifdef SCTP_AUDITING_ENABLED
7034 sctp_audit_log(0xC2, 3);
7036 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7037 data_list[i]->snd_count = 1;
7038 data_list[i]->rec.data.chunk_was_revoked = 0;
7039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7040 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7041 data_list[i]->whoTo->flight_size,
7042 data_list[i]->book_size,
7043 (uint32_t) (uintptr_t) data_list[i]->whoTo,
7044 data_list[i]->rec.data.TSN_seq);
7046 sctp_flight_size_increase(data_list[i]);
7047 sctp_total_flight_increase(stcb, data_list[i]);
7048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7049 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7050 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7052 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7053 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7054 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7055 /* SWS sender side engages */
7056 asoc->peers_rwnd = 0;
7059 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7060 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7065 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7066 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7071 struct sctp_tmit_chunk *chk, *nchk;
7073 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7074 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7075 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7076 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7077 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7078 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7079 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7080 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7081 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7082 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7083 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7084 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7085 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7086 /* Stray chunks must be cleaned up */
7088 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7090 sctp_m_freem(chk->data);
7093 asoc->ctrl_queue_cnt--;
7094 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7095 asoc->fwd_tsn_cnt--;
7096 sctp_free_a_chunk(stcb, chk, so_locked);
7097 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7098 /* special handling, we must look into the param */
7099 if (chk != asoc->str_reset) {
7100 goto clean_up_anyway;
7108 sctp_can_we_split_this(struct sctp_tcb *stcb,
7110 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7113 * Make a decision on if I should split a msg into multiple parts.
7114 * This is only asked of incomplete messages.
7118 * If we are doing EEOR we need to always send it if its the
7119 * entire thing, since it might be all the guy is putting in
7122 if (goal_mtu >= length) {
7124 * If we have data outstanding,
7125 * we get another chance when the sack
7126 * arrives to transmit - wait for more data
7128 if (stcb->asoc.total_flight == 0) {
7130 * If nothing is in flight, we zero the
7138 /* You can fill the rest */
7143 * For those strange folk that make the send buffer
7144 * smaller than our fragmentation point, we can't
7145 * get a full msg in so we have to allow splitting.
7147 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7150 if ((length <= goal_mtu) ||
7151 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7152 /* Sub-optimial residual don't split in non-eeor mode. */
7156 * If we reach here length is larger than the goal_mtu. Do we wish
7157 * to split it for the sake of packet putting together?
7159 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7160 /* Its ok to split it */
7161 return (min(goal_mtu, frag_point));
7163 /* Nope, can't split */
7169 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7170 struct sctp_stream_out *strq,
7172 uint32_t frag_point,
7178 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7183 /* Move from the stream to the send_queue keeping track of the total */
7184 struct sctp_association *asoc;
7185 struct sctp_stream_queue_pending *sp;
7186 struct sctp_tmit_chunk *chk;
7187 struct sctp_data_chunk *dchkh = NULL;
7188 struct sctp_idata_chunk *ndchkh = NULL;
7189 uint32_t to_move, length, leading;
7190 uint8_t rcv_flags = 0;
7192 uint8_t send_lock_up = 0;
7194 SCTP_TCB_LOCK_ASSERT(stcb);
7197 /* sa_ignore FREED_MEMORY */
7199 sp = TAILQ_FIRST(&strq->outqueue);
7202 if (send_lock_up == 0) {
7203 SCTP_TCB_SEND_LOCK(stcb);
7206 sp = TAILQ_FIRST(&strq->outqueue);
7210 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7211 (stcb->asoc.idata_supported == 0) &&
7212 (strq->last_msg_incomplete)) {
7213 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7215 strq->last_msg_incomplete);
7216 strq->last_msg_incomplete = 0;
7220 SCTP_TCB_SEND_UNLOCK(stcb);
7225 if ((sp->msg_is_complete) && (sp->length == 0)) {
7226 if (sp->sender_all_done) {
7228 * We are doing differed cleanup. Last time through
7229 * when we took all the data the sender_all_done was
7232 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7233 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7234 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7235 sp->sender_all_done,
7237 sp->msg_is_complete,
7241 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7242 SCTP_TCB_SEND_LOCK(stcb);
7245 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7246 TAILQ_REMOVE(&strq->outqueue, sp, next);
7247 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7248 (strq->chunks_on_queues == 0) &&
7249 TAILQ_EMPTY(&strq->outqueue)) {
7250 stcb->asoc.trigger_reset = 1;
7252 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7254 sctp_free_remote_addr(sp->net);
7258 sctp_m_freem(sp->data);
7261 sctp_free_a_strmoq(stcb, sp, so_locked);
7262 /* we can't be locked to it */
7264 stcb->asoc.locked_on_sending = NULL;
7266 SCTP_TCB_SEND_UNLOCK(stcb);
7269 /* back to get the next msg */
7273 * sender just finished this but still holds a
7276 if (stcb->asoc.idata_supported == 0)
7283 /* is there some to get */
7284 if (sp->length == 0) {
7286 if (stcb->asoc.idata_supported == 0)
7291 } else if (sp->discard_rest) {
7292 if (send_lock_up == 0) {
7293 SCTP_TCB_SEND_LOCK(stcb);
7296 /* Whack down the size */
7297 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7298 if ((stcb->sctp_socket != NULL) &&
7299 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7300 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7301 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7304 sctp_m_freem(sp->data);
7306 sp->tail_mbuf = NULL;
7310 if (stcb->asoc.idata_supported == 0)
7317 some_taken = sp->some_taken;
7319 length = sp->length;
7320 if (sp->msg_is_complete) {
7321 /* The message is complete */
7322 to_move = min(length, frag_point);
7323 if (to_move == length) {
7324 /* All of it fits in the MTU */
7325 if (sp->some_taken) {
7326 rcv_flags |= SCTP_DATA_LAST_FRAG;
7327 sp->put_last_out = 1;
7329 rcv_flags |= SCTP_DATA_NOT_FRAG;
7330 sp->put_last_out = 1;
7333 /* Not all of it fits, we fragment */
7334 if (sp->some_taken == 0) {
7335 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7340 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7343 * We use a snapshot of length in case it
7344 * is expanding during the compare.
7349 if (to_move >= llen) {
7351 if (send_lock_up == 0) {
7353 * We are taking all of an incomplete msg
7354 * thus we need a send lock.
7356 SCTP_TCB_SEND_LOCK(stcb);
7358 if (sp->msg_is_complete) {
7360 * the sender finished the
7367 if (sp->some_taken == 0) {
7368 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7372 /* Nothing to take. */
7373 if ((sp->some_taken) &&
7374 (stcb->asoc.idata_supported == 0)) {
7383 /* If we reach here, we can copy out a chunk */
7384 sctp_alloc_a_chunk(stcb, chk);
7386 /* No chunk memory */
7392 * Setup for unordered if needed by looking at the user sent info
7395 if (sp->sinfo_flags & SCTP_UNORDERED) {
7396 rcv_flags |= SCTP_DATA_UNORDERED;
7398 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7399 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7400 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7402 /* clear out the chunk before setting up */
7403 memset(chk, 0, sizeof(*chk));
7404 chk->rec.data.rcv_flags = rcv_flags;
7406 if (to_move >= length) {
7407 /* we think we can steal the whole thing */
7408 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7409 SCTP_TCB_SEND_LOCK(stcb);
7412 if (to_move < sp->length) {
7413 /* bail, it changed */
7416 chk->data = sp->data;
7417 chk->last_mbuf = sp->tail_mbuf;
7418 /* register the stealing */
7419 sp->data = sp->tail_mbuf = NULL;
7424 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7425 chk->last_mbuf = NULL;
7426 if (chk->data == NULL) {
7427 sp->some_taken = some_taken;
7428 sctp_free_a_chunk(stcb, chk, so_locked);
7433 #ifdef SCTP_MBUF_LOGGING
7434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7435 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7438 /* Pull off the data */
7439 m_adj(sp->data, to_move);
7440 /* Now lets work our way down and compact it */
7442 while (m && (SCTP_BUF_LEN(m) == 0)) {
7443 sp->data = SCTP_BUF_NEXT(m);
7444 SCTP_BUF_NEXT(m) = NULL;
7445 if (sp->tail_mbuf == m) {
7447 * Freeing tail? TSNH since
7448 * we supposedly were taking less
7449 * than the sp->length.
7452 panic("Huh, freing tail? - TSNH");
7454 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7455 sp->tail_mbuf = sp->data = NULL;
7464 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7465 chk->copy_by_ref = 1;
7467 chk->copy_by_ref = 0;
7470 * get last_mbuf and counts of mb useage This is ugly but hopefully
7471 * its only one mbuf.
7473 if (chk->last_mbuf == NULL) {
7474 chk->last_mbuf = chk->data;
7475 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7476 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7479 if (to_move > length) {
7480 /*- This should not happen either
7481 * since we always lower to_move to the size
7482 * of sp->length if its larger.
7485 panic("Huh, how can to_move be larger?");
7487 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7491 atomic_subtract_int(&sp->length, to_move);
7493 if (stcb->asoc.idata_supported == 0) {
7494 leading = (int)sizeof(struct sctp_data_chunk);
7496 leading = (int)sizeof(struct sctp_idata_chunk);
7498 if (M_LEADINGSPACE(chk->data) < leading) {
7499 /* Not enough room for a chunk header, get some */
7502 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7505 * we're in trouble here. _PREPEND below will free
7506 * all the data if there is no leading space, so we
7507 * must put the data back and restore.
7509 if (send_lock_up == 0) {
7510 SCTP_TCB_SEND_LOCK(stcb);
7513 if (sp->data == NULL) {
7514 /* unsteal the data */
7515 sp->data = chk->data;
7516 sp->tail_mbuf = chk->last_mbuf;
7520 /* reassemble the data */
7522 sp->data = chk->data;
7523 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7525 sp->some_taken = some_taken;
7526 atomic_add_int(&sp->length, to_move);
7529 sctp_free_a_chunk(stcb, chk, so_locked);
7533 SCTP_BUF_LEN(m) = 0;
7534 SCTP_BUF_NEXT(m) = chk->data;
7536 M_ALIGN(chk->data, 4);
7539 if (stcb->asoc.idata_supported == 0) {
7540 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7542 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_idata_chunk), M_NOWAIT);
7544 if (chk->data == NULL) {
7545 /* HELP, TSNH since we assured it would not above? */
7547 panic("prepend failes HELP?");
7549 SCTP_PRINTF("prepend fails HELP?\n");
7550 sctp_free_a_chunk(stcb, chk, so_locked);
7556 if (stcb->asoc.idata_supported == 0) {
7557 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7558 chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_data_chunk));
7560 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_idata_chunk));
7561 chk->book_size = chk->send_size = (uint16_t) (to_move + sizeof(struct sctp_idata_chunk));
7563 chk->book_size_scale = 0;
7564 chk->sent = SCTP_DATAGRAM_UNSENT;
7567 chk->asoc = &stcb->asoc;
7568 chk->pad_inplace = 0;
7569 chk->no_fr_allowed = 0;
7570 chk->rec.data.stream_seq = strq->next_sequence_send;
7571 if ((rcv_flags & SCTP_DATA_LAST_FRAG) &&
7572 !(rcv_flags & SCTP_DATA_UNORDERED)) {
7573 strq->next_sequence_send++;
7575 chk->rec.data.stream_number = sp->stream;
7576 chk->rec.data.payloadtype = sp->ppid;
7577 chk->rec.data.context = sp->context;
7578 chk->rec.data.doing_fast_retransmit = 0;
7580 chk->rec.data.timetodrop = sp->ts;
7581 chk->flags = sp->act_flags;
7584 chk->whoTo = sp->net;
7585 atomic_add_int(&chk->whoTo->ref_count, 1);
7589 if (sp->holds_key_ref) {
7590 chk->auth_keyid = sp->auth_keyid;
7591 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7592 chk->holds_key_ref = 1;
7594 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7596 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7597 (uint32_t) (uintptr_t) stcb, sp->length,
7598 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7599 chk->rec.data.TSN_seq);
7601 if (stcb->asoc.idata_supported == 0) {
7602 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7604 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7607 * Put the rest of the things in place now. Size was done earlier in
7608 * previous loop prior to padding.
7611 #ifdef SCTP_ASOCLOG_OF_TSNS
7612 SCTP_TCB_LOCK_ASSERT(stcb);
7613 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7614 asoc->tsn_out_at = 0;
7615 asoc->tsn_out_wrapped = 1;
7617 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7618 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7619 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7620 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7621 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7622 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7623 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7624 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7627 if (stcb->asoc.idata_supported == 0) {
7628 dchkh->ch.chunk_type = SCTP_DATA;
7629 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7630 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7631 dchkh->dp.stream_id = htons((strq->stream_no & 0x0000ffff));
7632 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7633 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7634 dchkh->ch.chunk_length = htons(chk->send_size);
7636 ndchkh->ch.chunk_type = SCTP_IDATA;
7637 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7638 ndchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7639 ndchkh->dp.stream_id = htons(strq->stream_no);
7640 /* WHAT DO WE DO HERE??? */
7641 ndchkh->dp.reserved = htons(0);
7642 ndchkh->dp.msg_id = htonl(sp->msg_id);
7644 ndchkh->dp.protocol_id = chk->rec.data.payloadtype;
7646 ndchkh->dp.fsn = htonl(sp->fsn);
7648 ndchkh->ch.chunk_length = htons(chk->send_size);
7650 /* Now advance the chk->send_size by the actual pad needed. */
7651 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7656 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7657 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7659 chk->last_mbuf = lm;
7660 chk->pad_inplace = 1;
7662 chk->send_size += pads;
7664 if (PR_SCTP_ENABLED(chk->flags)) {
7665 asoc->pr_sctp_cnt++;
7667 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7668 /* All done pull and kill the message */
7669 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7670 if (sp->put_last_out == 0) {
7671 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7672 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7673 sp->sender_all_done,
7675 sp->msg_is_complete,
7679 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7680 SCTP_TCB_SEND_LOCK(stcb);
7683 TAILQ_REMOVE(&strq->outqueue, sp, next);
7684 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7685 (strq->chunks_on_queues == 0) &&
7686 TAILQ_EMPTY(&strq->outqueue)) {
7687 stcb->asoc.trigger_reset = 1;
7689 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7691 sctp_free_remote_addr(sp->net);
7695 sctp_m_freem(sp->data);
7698 sctp_free_a_strmoq(stcb, sp, so_locked);
7700 /* we can't be locked to it */
7702 stcb->asoc.locked_on_sending = NULL;
7704 /* more to go, we are locked */
7705 if (stcb->asoc.idata_supported == 0)
7708 asoc->chunks_on_out_queue++;
7709 strq->chunks_on_queues++;
7710 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7711 asoc->send_queue_cnt++;
7714 SCTP_TCB_SEND_UNLOCK(stcb);
7721 sctp_fill_outqueue(struct sctp_tcb *stcb,
7722 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7723 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7728 struct sctp_association *asoc;
7729 struct sctp_stream_out *strq;
7730 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7733 SCTP_TCB_LOCK_ASSERT(stcb);
7735 switch (net->ro._l_addr.sa.sa_family) {
7738 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7743 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7748 goal_mtu = net->mtu;
7751 /* Need an allowance for the data chunk header too */
7752 if (stcb->asoc.idata_supported == 0) {
7753 goal_mtu -= sizeof(struct sctp_data_chunk);
7755 goal_mtu -= sizeof(struct sctp_idata_chunk);
7758 /* must make even word boundary */
7759 goal_mtu &= 0xfffffffc;
7760 if (asoc->locked_on_sending) {
7761 /* We are stuck on one stream until the message completes. */
7762 strq = asoc->locked_on_sending;
7765 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7768 while ((goal_mtu > 0) && strq) {
7771 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7772 &giveup, eeor_mode, &bail, so_locked);
7774 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7777 asoc->locked_on_sending = strq;
7778 if ((moved_how_much == 0) || (giveup) || bail)
7779 /* no more to move for now */
7782 asoc->locked_on_sending = NULL;
7783 if ((giveup) || bail) {
7786 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7791 total_moved += moved_how_much;
7792 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7793 goal_mtu &= 0xfffffffc;
7798 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7800 if (total_moved == 0) {
7801 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7802 (net == stcb->asoc.primary_destination)) {
7803 /* ran dry for primary network net */
7804 SCTP_STAT_INCR(sctps_primary_randry);
7805 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7806 /* ran dry with CMT on */
7807 SCTP_STAT_INCR(sctps_cmt_randry);
7813 sctp_fix_ecn_echo(struct sctp_association *asoc)
7815 struct sctp_tmit_chunk *chk;
7817 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7818 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7819 chk->sent = SCTP_DATAGRAM_UNSENT;
7825 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7827 struct sctp_association *asoc;
7828 struct sctp_tmit_chunk *chk;
7829 struct sctp_stream_queue_pending *sp;
7836 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7837 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7838 if (sp->net == net) {
7839 sctp_free_remote_addr(sp->net);
7844 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7845 if (chk->whoTo == net) {
7846 sctp_free_remote_addr(chk->whoTo);
7853 sctp_med_chunk_output(struct sctp_inpcb *inp,
7854 struct sctp_tcb *stcb,
7855 struct sctp_association *asoc,
7858 int control_only, int from_where,
7859 struct timeval *now, int *now_filled, int frag_point, int so_locked
7860 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7866 * Ok this is the generic chunk service queue. we must do the
7868 * - Service the stream queue that is next, moving any
7869 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7870 * LAST to the out queue in one pass) and assigning TSN's. This
7871 * only applys though if the peer does not support NDATA. For NDATA
7872 * chunks its ok to not send the entire message ;-)
7873 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
7874 * fomulate and send the low level chunks. Making sure to combine
7875 * any control in the control chunk queue also.
7877 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7878 struct mbuf *outchain, *endoutchain;
7879 struct sctp_tmit_chunk *chk, *nchk;
7881 /* temp arrays for unlinking */
7882 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7883 int no_fragmentflg, error;
7884 unsigned int max_rwnd_per_dest, max_send_per_dest;
7885 int one_chunk, hbflag, skip_data_for_this_net;
7886 int asconf, cookie, no_out_cnt;
7887 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7888 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7890 uint32_t auth_offset = 0;
7891 struct sctp_auth_chunk *auth = NULL;
7892 uint16_t auth_keyid;
7893 int override_ok = 1;
7894 int skip_fill_up = 0;
7895 int data_auth_reqd = 0;
7898 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7905 auth_keyid = stcb->asoc.authinfo.active_keyid;
7906 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7907 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7908 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7913 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7915 * First lets prime the pump. For each destination, if there is room
7916 * in the flight size, attempt to pull an MTU's worth out of the
7917 * stream queues into the general send_queue
7919 #ifdef SCTP_AUDITING_ENABLED
7920 sctp_audit_log(0xC2, 2);
7922 SCTP_TCB_LOCK_ASSERT(stcb);
7929 /* Nothing to possible to send? */
7930 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7931 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7932 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7933 TAILQ_EMPTY(&asoc->send_queue) &&
7934 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7939 if (asoc->peers_rwnd == 0) {
7940 /* No room in peers rwnd */
7942 if (asoc->total_flight > 0) {
7943 /* we are allowed one chunk in flight */
7947 if (stcb->asoc.ecn_echo_cnt_onq) {
7948 /* Record where a sack goes, if any */
7949 if (no_data_chunks &&
7950 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7951 /* Nothing but ECNe to send - we don't do that */
7952 goto nothing_to_send;
7954 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7955 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7956 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7957 sack_goes_to = chk->whoTo;
7962 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7963 if (stcb->sctp_socket)
7964 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7966 max_send_per_dest = 0;
7967 if (no_data_chunks == 0) {
7968 /* How many non-directed chunks are there? */
7969 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7970 if (chk->whoTo == NULL) {
7972 * We already have non-directed chunks on
7973 * the queue, no need to do a fill-up.
7981 if ((no_data_chunks == 0) &&
7982 (skip_fill_up == 0) &&
7983 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7984 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7986 * This for loop we are in takes in each net, if
7987 * its's got space in cwnd and has data sent to it
7988 * (when CMT is off) then it calls
7989 * sctp_fill_outqueue for the net. This gets data on
7990 * the send queue for that network.
7992 * In sctp_fill_outqueue TSN's are assigned and data is
7993 * copied out of the stream buffers. Note mostly
7994 * copy by reference (we hope).
7996 net->window_probe = 0;
7997 if ((net != stcb->asoc.alternate) &&
7998 ((net->dest_state & SCTP_ADDR_PF) ||
7999 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
8000 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
8001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8002 sctp_log_cwnd(stcb, net, 1,
8003 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8007 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
8008 (net->flight_size == 0)) {
8009 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
8011 if (net->flight_size >= net->cwnd) {
8012 /* skip this network, no room - can't fill */
8013 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8014 sctp_log_cwnd(stcb, net, 3,
8015 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8019 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8020 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8022 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8024 /* memory alloc failure */
8030 /* now service each destination and send out what we can for it */
8031 /* Nothing to send? */
8032 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8033 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8034 TAILQ_EMPTY(&asoc->send_queue)) {
8038 if (asoc->sctp_cmt_on_off > 0) {
8039 /* get the last start point */
8040 start_at = asoc->last_net_cmt_send_started;
8041 if (start_at == NULL) {
8042 /* null so to beginning */
8043 start_at = TAILQ_FIRST(&asoc->nets);
8045 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8046 if (start_at == NULL) {
8047 start_at = TAILQ_FIRST(&asoc->nets);
8050 asoc->last_net_cmt_send_started = start_at;
8052 start_at = TAILQ_FIRST(&asoc->nets);
8054 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8055 if (chk->whoTo == NULL) {
8056 if (asoc->alternate) {
8057 chk->whoTo = asoc->alternate;
8059 chk->whoTo = asoc->primary_destination;
8061 atomic_add_int(&chk->whoTo->ref_count, 1);
8064 old_start_at = NULL;
8065 again_one_more_time:
8066 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8067 /* how much can we send? */
8068 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8069 if (old_start_at && (old_start_at == net)) {
8070 /* through list ocmpletely. */
8074 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8075 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8076 (net->flight_size >= net->cwnd)) {
8078 * Nothing on control or asconf and flight is full,
8079 * we can skip even in the CMT case.
8084 endoutchain = outchain = NULL;
8087 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8088 skip_data_for_this_net = 1;
8090 skip_data_for_this_net = 0;
8092 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8095 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8100 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8110 if (mtu > asoc->peers_rwnd) {
8111 if (asoc->total_flight > 0) {
8112 /* We have a packet in flight somewhere */
8113 r_mtu = asoc->peers_rwnd;
8115 /* We are always allowed to send one MTU out */
8123 /************************/
8124 /* ASCONF transmission */
8125 /************************/
8126 /* Now first lets go through the asconf queue */
8127 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8128 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8131 if (chk->whoTo == NULL) {
8132 if (asoc->alternate == NULL) {
8133 if (asoc->primary_destination != net) {
8137 if (asoc->alternate != net) {
8142 if (chk->whoTo != net) {
8146 if (chk->data == NULL) {
8149 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8150 chk->sent != SCTP_DATAGRAM_RESEND) {
8154 * if no AUTH is yet included and this chunk
8155 * requires it, make sure to account for it. We
8156 * don't apply the size until the AUTH chunk is
8157 * actually added below in case there is no room for
8158 * this chunk. NOTE: we overload the use of "omtu"
8161 if ((auth == NULL) &&
8162 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8163 stcb->asoc.peer_auth_chunks)) {
8164 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8167 /* Here we do NOT factor the r_mtu */
8168 if ((chk->send_size < (int)(mtu - omtu)) ||
8169 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8171 * We probably should glom the mbuf chain
8172 * from the chk->data for control but the
8173 * problem is it becomes yet one more level
8174 * of tracking to do if for some reason
8175 * output fails. Then I have got to
8176 * reconstruct the merged control chain.. el
8177 * yucko.. for now we take the easy way and
8181 * Add an AUTH chunk, if chunk requires it
8182 * save the offset into the chain for AUTH
8184 if ((auth == NULL) &&
8185 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8186 stcb->asoc.peer_auth_chunks))) {
8187 outchain = sctp_add_auth_chunk(outchain,
8192 chk->rec.chunk_id.id);
8193 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8195 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8196 (int)chk->rec.chunk_id.can_take_data,
8197 chk->send_size, chk->copy_by_ref);
8198 if (outchain == NULL) {
8200 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8203 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8204 /* update our MTU size */
8205 if (mtu > (chk->send_size + omtu))
8206 mtu -= (chk->send_size + omtu);
8209 to_out += (chk->send_size + omtu);
8210 /* Do clear IP_DF ? */
8211 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8214 if (chk->rec.chunk_id.can_take_data)
8217 * set hb flag since we can use these for
8223 * should sysctl this: don't bundle data
8224 * with ASCONF since it requires AUTH
8227 chk->sent = SCTP_DATAGRAM_SENT;
8228 if (chk->whoTo == NULL) {
8230 atomic_add_int(&net->ref_count, 1);
8235 * Ok we are out of room but we can
8236 * output without effecting the
8237 * flight size since this little guy
8238 * is a control only packet.
8240 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8242 * do NOT clear the asconf flag as
8243 * it is used to do appropriate
8244 * source address selection.
8246 if (*now_filled == 0) {
8247 (void)SCTP_GETTIME_TIMEVAL(now);
8250 net->last_sent_time = *now;
8252 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8253 (struct sockaddr *)&net->ro._l_addr,
8254 outchain, auth_offset, auth,
8255 stcb->asoc.authinfo.active_keyid,
8256 no_fragmentflg, 0, asconf,
8257 inp->sctp_lport, stcb->rport,
8258 htonl(stcb->asoc.peer_vtag),
8263 * error, we could not
8266 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8267 if (from_where == 0) {
8268 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8270 if (error == ENOBUFS) {
8271 asoc->ifp_had_enobuf = 1;
8272 SCTP_STAT_INCR(sctps_lowlevelerr);
8274 /* error, could not output */
8275 if (error == EHOSTUNREACH) {
8281 sctp_move_chunks_from_net(stcb, net);
8286 asoc->ifp_had_enobuf = 0;
8289 * increase the number we sent, if a
8290 * cookie is sent we don't tell them
8293 outchain = endoutchain = NULL;
8297 *num_out += ctl_cnt;
8298 /* recalc a clean slate and setup */
8299 switch (net->ro._l_addr.sa.sa_family) {
8302 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8307 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8324 /************************/
8325 /* Control transmission */
8326 /************************/
8327 /* Now first lets go through the control queue */
8328 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8329 if ((sack_goes_to) &&
8330 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8331 (chk->whoTo != sack_goes_to)) {
8333 * if we have a sack in queue, and we are
8334 * looking at an ecn echo that is NOT queued
8335 * to where the sack is going..
8337 if (chk->whoTo == net) {
8339 * Don't transmit it to where its
8340 * going (current net)
8343 } else if (sack_goes_to == net) {
8345 * But do transmit it to this
8348 goto skip_net_check;
8351 if (chk->whoTo == NULL) {
8352 if (asoc->alternate == NULL) {
8353 if (asoc->primary_destination != net) {
8357 if (asoc->alternate != net) {
8362 if (chk->whoTo != net) {
8367 if (chk->data == NULL) {
8370 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8372 * It must be unsent. Cookies and ASCONF's
8373 * hang around but there timers will force
8374 * when marked for resend.
8379 * if no AUTH is yet included and this chunk
8380 * requires it, make sure to account for it. We
8381 * don't apply the size until the AUTH chunk is
8382 * actually added below in case there is no room for
8383 * this chunk. NOTE: we overload the use of "omtu"
8386 if ((auth == NULL) &&
8387 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8388 stcb->asoc.peer_auth_chunks)) {
8389 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8392 /* Here we do NOT factor the r_mtu */
8393 if ((chk->send_size <= (int)(mtu - omtu)) ||
8394 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8396 * We probably should glom the mbuf chain
8397 * from the chk->data for control but the
8398 * problem is it becomes yet one more level
8399 * of tracking to do if for some reason
8400 * output fails. Then I have got to
8401 * reconstruct the merged control chain.. el
8402 * yucko.. for now we take the easy way and
8406 * Add an AUTH chunk, if chunk requires it
8407 * save the offset into the chain for AUTH
8409 if ((auth == NULL) &&
8410 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8411 stcb->asoc.peer_auth_chunks))) {
8412 outchain = sctp_add_auth_chunk(outchain,
8417 chk->rec.chunk_id.id);
8418 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8420 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8421 (int)chk->rec.chunk_id.can_take_data,
8422 chk->send_size, chk->copy_by_ref);
8423 if (outchain == NULL) {
8425 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8428 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8429 /* update our MTU size */
8430 if (mtu > (chk->send_size + omtu))
8431 mtu -= (chk->send_size + omtu);
8434 to_out += (chk->send_size + omtu);
8435 /* Do clear IP_DF ? */
8436 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8439 if (chk->rec.chunk_id.can_take_data)
8441 /* Mark things to be removed, if needed */
8442 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8443 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8444 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8445 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8446 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8447 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8448 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8449 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8450 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8451 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8452 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8453 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8456 /* remove these chunks at the end */
8457 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8458 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8459 /* turn off the timer */
8460 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8461 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8463 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8469 * Other chunks, since they have
8470 * timers running (i.e. COOKIE) we
8471 * just "trust" that it gets sent or
8475 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8478 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8480 * Increment ecne send count
8481 * here this means we may be
8482 * over-zealous in our
8483 * counting if the send
8484 * fails, but its the best
8485 * place to do it (we used
8486 * to do it in the queue of
8487 * the chunk, but that did
8488 * not tell how many times
8491 SCTP_STAT_INCR(sctps_sendecne);
8493 chk->sent = SCTP_DATAGRAM_SENT;
8494 if (chk->whoTo == NULL) {
8496 atomic_add_int(&net->ref_count, 1);
8502 * Ok we are out of room but we can
8503 * output without effecting the
8504 * flight size since this little guy
8505 * is a control only packet.
8508 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8510 * do NOT clear the asconf
8511 * flag as it is used to do
8512 * appropriate source
8513 * address selection.
8517 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8520 /* Only HB or ASCONF advances time */
8522 if (*now_filled == 0) {
8523 (void)SCTP_GETTIME_TIMEVAL(now);
8526 net->last_sent_time = *now;
8529 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8530 (struct sockaddr *)&net->ro._l_addr,
8533 stcb->asoc.authinfo.active_keyid,
8534 no_fragmentflg, 0, asconf,
8535 inp->sctp_lport, stcb->rport,
8536 htonl(stcb->asoc.peer_vtag),
8541 * error, we could not
8544 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8545 if (from_where == 0) {
8546 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8548 if (error == ENOBUFS) {
8549 asoc->ifp_had_enobuf = 1;
8550 SCTP_STAT_INCR(sctps_lowlevelerr);
8552 if (error == EHOSTUNREACH) {
8558 sctp_move_chunks_from_net(stcb, net);
8563 asoc->ifp_had_enobuf = 0;
8566 * increase the number we sent, if a
8567 * cookie is sent we don't tell them
8570 outchain = endoutchain = NULL;
8574 *num_out += ctl_cnt;
8575 /* recalc a clean slate and setup */
8576 switch (net->ro._l_addr.sa.sa_family) {
8579 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8584 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8601 /* JRI: if dest is in PF state, do not send data to it */
8602 if ((asoc->sctp_cmt_on_off > 0) &&
8603 (net != stcb->asoc.alternate) &&
8604 (net->dest_state & SCTP_ADDR_PF)) {
8607 if (net->flight_size >= net->cwnd) {
8610 if ((asoc->sctp_cmt_on_off > 0) &&
8611 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8612 (net->flight_size > max_rwnd_per_dest)) {
8616 * We need a specific accounting for the usage of the send
8617 * buffer. We also need to check the number of messages per
8618 * net. For now, this is better than nothing and it disabled
8621 if ((asoc->sctp_cmt_on_off > 0) &&
8622 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8623 (max_send_per_dest > 0) &&
8624 (net->flight_size > max_send_per_dest)) {
8627 /*********************/
8628 /* Data transmission */
8629 /*********************/
8631 * if AUTH for DATA is required and no AUTH has been added
8632 * yet, account for this in the mtu now... if no data can be
8633 * bundled, this adjustment won't matter anyways since the
8634 * packet will be going out...
8636 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8637 stcb->asoc.peer_auth_chunks);
8638 if (data_auth_reqd && (auth == NULL)) {
8639 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8641 /* now lets add any data within the MTU constraints */
8642 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8645 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8646 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8653 if (net->mtu > SCTP_MIN_OVERHEAD)
8654 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8664 if ((((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8665 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8666 (skip_data_for_this_net == 0)) ||
8668 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8669 if (no_data_chunks) {
8670 /* let only control go out */
8674 if (net->flight_size >= net->cwnd) {
8675 /* skip this net, no room for data */
8679 if ((chk->whoTo != NULL) &&
8680 (chk->whoTo != net)) {
8681 /* Don't send the chunk on this net */
8684 if (asoc->sctp_cmt_on_off == 0) {
8685 if ((asoc->alternate) &&
8686 (asoc->alternate != net) &&
8687 (chk->whoTo == NULL)) {
8689 } else if ((net != asoc->primary_destination) &&
8690 (asoc->alternate == NULL) &&
8691 (chk->whoTo == NULL)) {
8695 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8697 * strange, we have a chunk that is
8698 * to big for its destination and
8699 * yet no fragment ok flag.
8700 * Something went wrong when the
8701 * PMTU changed...we did not mark
8702 * this chunk for some reason?? I
8703 * will fix it here by letting IP
8704 * fragment it for now and printing
8705 * a warning. This really should not
8708 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8709 chk->send_size, mtu);
8710 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8712 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8713 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8714 struct sctp_data_chunk *dchkh;
8716 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8717 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8719 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8720 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8721 /* ok we will add this one */
8724 * Add an AUTH chunk, if chunk
8725 * requires it, save the offset into
8726 * the chain for AUTH
8728 if (data_auth_reqd) {
8730 outchain = sctp_add_auth_chunk(outchain,
8736 auth_keyid = chk->auth_keyid;
8738 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8739 } else if (override_ok) {
8744 auth_keyid = chk->auth_keyid;
8746 } else if (auth_keyid != chk->auth_keyid) {
8754 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8755 chk->send_size, chk->copy_by_ref);
8756 if (outchain == NULL) {
8757 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8758 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8759 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8762 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8765 /* upate our MTU size */
8766 /* Do clear IP_DF ? */
8767 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8770 /* unsigned subtraction of mtu */
8771 if (mtu > chk->send_size)
8772 mtu -= chk->send_size;
8775 /* unsigned subtraction of r_mtu */
8776 if (r_mtu > chk->send_size)
8777 r_mtu -= chk->send_size;
8781 to_out += chk->send_size;
8782 if ((to_out > mx_mtu) && no_fragmentflg) {
8784 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8786 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8790 chk->window_probe = 0;
8791 data_list[bundle_at++] = chk;
8792 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8795 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8796 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8797 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8799 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8801 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8802 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8812 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8814 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8815 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8816 data_list[0]->window_probe = 1;
8817 net->window_probe = 1;
8823 * Must be sent in order of the
8824 * TSN's (on a network)
8828 } /* for (chunk gather loop for this net) */
8829 } /* if asoc.state OPEN */
8831 /* Is there something to send for this destination? */
8833 /* We may need to start a control timer or two */
8835 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8838 * do NOT clear the asconf flag as it is
8839 * used to do appropriate source address
8844 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8847 /* must start a send timer if data is being sent */
8848 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8850 * no timer running on this destination
8853 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8855 if (bundle_at || hbflag) {
8856 /* For data/asconf and hb set time */
8857 if (*now_filled == 0) {
8858 (void)SCTP_GETTIME_TIMEVAL(now);
8861 net->last_sent_time = *now;
8863 /* Now send it, if there is anything to send :> */
8864 if ((error = sctp_lowlevel_chunk_output(inp,
8867 (struct sockaddr *)&net->ro._l_addr,
8875 inp->sctp_lport, stcb->rport,
8876 htonl(stcb->asoc.peer_vtag),
8880 /* error, we could not output */
8881 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8882 if (from_where == 0) {
8883 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8885 if (error == ENOBUFS) {
8886 SCTP_STAT_INCR(sctps_lowlevelerr);
8887 asoc->ifp_had_enobuf = 1;
8889 if (error == EHOSTUNREACH) {
8891 * Destination went unreachable
8894 sctp_move_chunks_from_net(stcb, net);
8898 * I add this line to be paranoid. As far as
8899 * I can tell the continue, takes us back to
8900 * the top of the for, but just to make sure
8901 * I will reset these again here.
8903 ctl_cnt = bundle_at = 0;
8904 continue; /* This takes us back to the
8905 * for() for the nets. */
8907 asoc->ifp_had_enobuf = 0;
8913 *num_out += (ctl_cnt + bundle_at);
8916 /* setup for a RTO measurement */
8917 tsns_sent = data_list[0]->rec.data.TSN_seq;
8918 /* fill time if not already filled */
8919 if (*now_filled == 0) {
8920 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8922 *now = asoc->time_last_sent;
8924 asoc->time_last_sent = *now;
8926 if (net->rto_needed) {
8927 data_list[0]->do_rtt = 1;
8928 net->rto_needed = 0;
8930 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8931 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8938 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8941 if (old_start_at == NULL) {
8942 old_start_at = start_at;
8943 start_at = TAILQ_FIRST(&asoc->nets);
8945 goto again_one_more_time;
8948 * At the end there should be no NON timed chunks hanging on this
8951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8952 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8954 if ((*num_out == 0) && (*reason_code == 0)) {
8959 sctp_clean_up_ctl(stcb, asoc, so_locked);
8964 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8967 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8968 * the control chunk queue.
8970 struct sctp_chunkhdr *hdr;
8971 struct sctp_tmit_chunk *chk;
8972 struct mbuf *mat, *last_mbuf;
8973 uint32_t chunk_length;
8974 uint16_t padding_length;
8976 SCTP_TCB_LOCK_ASSERT(stcb);
8977 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8978 if (op_err == NULL) {
8983 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8984 chunk_length += SCTP_BUF_LEN(mat);
8985 if (SCTP_BUF_NEXT(mat) == NULL) {
8989 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8990 sctp_m_freem(op_err);
8993 padding_length = chunk_length % 4;
8994 if (padding_length != 0) {
8995 padding_length = 4 - padding_length;
8997 if (padding_length != 0) {
8998 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8999 sctp_m_freem(op_err);
9003 sctp_alloc_a_chunk(stcb, chk);
9006 sctp_m_freem(op_err);
9009 chk->copy_by_ref = 0;
9010 chk->send_size = (uint16_t) chunk_length;
9011 chk->sent = SCTP_DATAGRAM_UNSENT;
9013 chk->asoc = &stcb->asoc;
9016 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
9017 chk->rec.chunk_id.can_take_data = 0;
9018 hdr = mtod(op_err, struct sctp_chunkhdr *);
9019 hdr->chunk_type = SCTP_OPERATION_ERROR;
9020 hdr->chunk_flags = 0;
9021 hdr->chunk_length = htons(chk->send_size);
9022 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9023 chk->asoc->ctrl_queue_cnt++;
9027 sctp_send_cookie_echo(struct mbuf *m,
9029 struct sctp_tcb *stcb,
9030 struct sctp_nets *net)
9033 * pull out the cookie and put it at the front of the control chunk
9037 struct mbuf *cookie;
9038 struct sctp_paramhdr parm, *phdr;
9039 struct sctp_chunkhdr *hdr;
9040 struct sctp_tmit_chunk *chk;
9041 uint16_t ptype, plen;
9043 SCTP_TCB_LOCK_ASSERT(stcb);
9044 /* First find the cookie in the param area */
9046 at = offset + sizeof(struct sctp_init_chunk);
9048 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
9052 ptype = ntohs(phdr->param_type);
9053 plen = ntohs(phdr->param_length);
9054 if (ptype == SCTP_STATE_COOKIE) {
9057 /* found the cookie */
9058 if ((pad = (plen % 4))) {
9061 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9062 if (cookie == NULL) {
9066 #ifdef SCTP_MBUF_LOGGING
9067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9068 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9073 at += SCTP_SIZE32(plen);
9075 /* ok, we got the cookie lets change it into a cookie echo chunk */
9076 /* first the change from param to cookie */
9077 hdr = mtod(cookie, struct sctp_chunkhdr *);
9078 hdr->chunk_type = SCTP_COOKIE_ECHO;
9079 hdr->chunk_flags = 0;
9080 /* get the chunk stuff now and place it in the FRONT of the queue */
9081 sctp_alloc_a_chunk(stcb, chk);
9084 sctp_m_freem(cookie);
9087 chk->copy_by_ref = 0;
9088 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9089 chk->rec.chunk_id.can_take_data = 0;
9090 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9091 chk->send_size = plen;
9092 chk->sent = SCTP_DATAGRAM_UNSENT;
9094 chk->asoc = &stcb->asoc;
9097 atomic_add_int(&chk->whoTo->ref_count, 1);
9098 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9099 chk->asoc->ctrl_queue_cnt++;
9104 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9108 struct sctp_nets *net)
9111 * take a HB request and make it into a HB ack and send it.
9113 struct mbuf *outchain;
9114 struct sctp_chunkhdr *chdr;
9115 struct sctp_tmit_chunk *chk;
9119 /* must have a net pointer */
9122 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9123 if (outchain == NULL) {
9124 /* gak out of memory */
9127 #ifdef SCTP_MBUF_LOGGING
9128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9129 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9132 chdr = mtod(outchain, struct sctp_chunkhdr *);
9133 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9134 chdr->chunk_flags = 0;
9135 if (chk_length % 4) {
9137 uint32_t cpthis = 0;
9140 padlen = 4 - (chk_length % 4);
9141 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9143 sctp_alloc_a_chunk(stcb, chk);
9146 sctp_m_freem(outchain);
9149 chk->copy_by_ref = 0;
9150 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9151 chk->rec.chunk_id.can_take_data = 1;
9153 chk->send_size = chk_length;
9154 chk->sent = SCTP_DATAGRAM_UNSENT;
9156 chk->asoc = &stcb->asoc;
9157 chk->data = outchain;
9159 atomic_add_int(&chk->whoTo->ref_count, 1);
9160 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9161 chk->asoc->ctrl_queue_cnt++;
9165 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9167 /* formulate and queue a cookie-ack back to sender */
9168 struct mbuf *cookie_ack;
9169 struct sctp_chunkhdr *hdr;
9170 struct sctp_tmit_chunk *chk;
9172 SCTP_TCB_LOCK_ASSERT(stcb);
9174 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9175 if (cookie_ack == NULL) {
9179 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9180 sctp_alloc_a_chunk(stcb, chk);
9183 sctp_m_freem(cookie_ack);
9186 chk->copy_by_ref = 0;
9187 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9188 chk->rec.chunk_id.can_take_data = 1;
9190 chk->send_size = sizeof(struct sctp_chunkhdr);
9191 chk->sent = SCTP_DATAGRAM_UNSENT;
9193 chk->asoc = &stcb->asoc;
9194 chk->data = cookie_ack;
9195 if (chk->asoc->last_control_chunk_from != NULL) {
9196 chk->whoTo = chk->asoc->last_control_chunk_from;
9197 atomic_add_int(&chk->whoTo->ref_count, 1);
9201 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9202 hdr->chunk_type = SCTP_COOKIE_ACK;
9203 hdr->chunk_flags = 0;
9204 hdr->chunk_length = htons(chk->send_size);
9205 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9206 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9207 chk->asoc->ctrl_queue_cnt++;
9213 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9215 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9216 struct mbuf *m_shutdown_ack;
9217 struct sctp_shutdown_ack_chunk *ack_cp;
9218 struct sctp_tmit_chunk *chk;
9220 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9221 if (m_shutdown_ack == NULL) {
9225 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9226 sctp_alloc_a_chunk(stcb, chk);
9229 sctp_m_freem(m_shutdown_ack);
9232 chk->copy_by_ref = 0;
9233 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9234 chk->rec.chunk_id.can_take_data = 1;
9236 chk->send_size = sizeof(struct sctp_chunkhdr);
9237 chk->sent = SCTP_DATAGRAM_UNSENT;
9240 chk->asoc = &stcb->asoc;
9241 chk->data = m_shutdown_ack;
9244 atomic_add_int(&chk->whoTo->ref_count, 1);
9246 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9247 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9248 ack_cp->ch.chunk_flags = 0;
9249 ack_cp->ch.chunk_length = htons(chk->send_size);
9250 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9251 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9252 chk->asoc->ctrl_queue_cnt++;
9257 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9259 /* formulate and queue a SHUTDOWN to the sender */
9260 struct mbuf *m_shutdown;
9261 struct sctp_shutdown_chunk *shutdown_cp;
9262 struct sctp_tmit_chunk *chk;
9264 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9265 if (m_shutdown == NULL) {
9269 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9270 sctp_alloc_a_chunk(stcb, chk);
9273 sctp_m_freem(m_shutdown);
9276 chk->copy_by_ref = 0;
9277 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9278 chk->rec.chunk_id.can_take_data = 1;
9280 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9281 chk->sent = SCTP_DATAGRAM_UNSENT;
9284 chk->asoc = &stcb->asoc;
9285 chk->data = m_shutdown;
9288 atomic_add_int(&chk->whoTo->ref_count, 1);
9290 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9291 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9292 shutdown_cp->ch.chunk_flags = 0;
9293 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9294 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9295 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9296 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9297 chk->asoc->ctrl_queue_cnt++;
9302 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9305 * formulate and queue an ASCONF to the peer. ASCONF parameters
9306 * should be queued on the assoc queue.
9308 struct sctp_tmit_chunk *chk;
9309 struct mbuf *m_asconf;
9312 SCTP_TCB_LOCK_ASSERT(stcb);
9314 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9315 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9316 /* can't send a new one if there is one in flight already */
9319 /* compose an ASCONF chunk, maximum length is PMTU */
9320 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9321 if (m_asconf == NULL) {
9324 sctp_alloc_a_chunk(stcb, chk);
9327 sctp_m_freem(m_asconf);
9330 chk->copy_by_ref = 0;
9331 chk->rec.chunk_id.id = SCTP_ASCONF;
9332 chk->rec.chunk_id.can_take_data = 0;
9333 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9334 chk->data = m_asconf;
9335 chk->send_size = len;
9336 chk->sent = SCTP_DATAGRAM_UNSENT;
9338 chk->asoc = &stcb->asoc;
9341 atomic_add_int(&chk->whoTo->ref_count, 1);
9343 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9344 chk->asoc->ctrl_queue_cnt++;
9349 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9352 * formulate and queue a asconf-ack back to sender. the asconf-ack
9353 * must be stored in the tcb.
9355 struct sctp_tmit_chunk *chk;
9356 struct sctp_asconf_ack *ack, *latest_ack;
9358 struct sctp_nets *net = NULL;
9360 SCTP_TCB_LOCK_ASSERT(stcb);
9361 /* Get the latest ASCONF-ACK */
9362 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9363 if (latest_ack == NULL) {
9366 if (latest_ack->last_sent_to != NULL &&
9367 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9368 /* we're doing a retransmission */
9369 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9372 if (stcb->asoc.last_control_chunk_from == NULL) {
9373 if (stcb->asoc.alternate) {
9374 net = stcb->asoc.alternate;
9376 net = stcb->asoc.primary_destination;
9379 net = stcb->asoc.last_control_chunk_from;
9384 if (stcb->asoc.last_control_chunk_from == NULL) {
9385 if (stcb->asoc.alternate) {
9386 net = stcb->asoc.alternate;
9388 net = stcb->asoc.primary_destination;
9391 net = stcb->asoc.last_control_chunk_from;
9394 latest_ack->last_sent_to = net;
9396 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9397 if (ack->data == NULL) {
9400 /* copy the asconf_ack */
9401 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9402 if (m_ack == NULL) {
9403 /* couldn't copy it */
9406 #ifdef SCTP_MBUF_LOGGING
9407 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9408 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9412 sctp_alloc_a_chunk(stcb, chk);
9416 sctp_m_freem(m_ack);
9419 chk->copy_by_ref = 0;
9420 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9421 chk->rec.chunk_id.can_take_data = 1;
9422 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9425 atomic_add_int(&chk->whoTo->ref_count, 1);
9428 chk->send_size = ack->len;
9429 chk->sent = SCTP_DATAGRAM_UNSENT;
9431 chk->asoc = &stcb->asoc;
9433 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9434 chk->asoc->ctrl_queue_cnt++;
9441 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9442 struct sctp_tcb *stcb,
9443 struct sctp_association *asoc,
9444 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9445 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9451 * send out one MTU of retransmission. If fast_retransmit is
9452 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9453 * rwnd. For a Cookie or Asconf in the control chunk queue we
9454 * retransmit them by themselves.
9456 * For data chunks we will pick out the lowest TSN's in the sent_queue
9457 * marked for resend and bundle them all together (up to a MTU of
9458 * destination). The address to send to should have been
9459 * selected/changed where the retransmission was marked (i.e. in FR
9460 * or t3-timeout routines).
9462 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9463 struct sctp_tmit_chunk *chk, *fwd;
9464 struct mbuf *m, *endofchain;
9465 struct sctp_nets *net = NULL;
9466 uint32_t tsns_sent = 0;
9467 int no_fragmentflg, bundle_at, cnt_thru;
9469 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9470 struct sctp_auth_chunk *auth = NULL;
9471 uint32_t auth_offset = 0;
9472 uint16_t auth_keyid;
9473 int override_ok = 1;
9474 int data_auth_reqd = 0;
9477 SCTP_TCB_LOCK_ASSERT(stcb);
9478 tmr_started = ctl_cnt = bundle_at = error = 0;
9483 endofchain = m = NULL;
9484 auth_keyid = stcb->asoc.authinfo.active_keyid;
9485 #ifdef SCTP_AUDITING_ENABLED
9486 sctp_audit_log(0xC3, 1);
9488 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9489 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9490 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9491 asoc->sent_queue_retran_cnt);
9492 asoc->sent_queue_cnt = 0;
9493 asoc->sent_queue_cnt_removeable = 0;
9494 /* send back 0/0 so we enter normal transmission */
9498 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9499 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9500 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9501 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9502 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9505 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9506 if (chk != asoc->str_reset) {
9508 * not eligible for retran if its
9515 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9519 * Add an AUTH chunk, if chunk requires it save the
9520 * offset into the chain for AUTH
9522 if ((auth == NULL) &&
9523 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9524 stcb->asoc.peer_auth_chunks))) {
9525 m = sctp_add_auth_chunk(m, &endofchain,
9526 &auth, &auth_offset,
9528 chk->rec.chunk_id.id);
9529 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9531 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9537 /* do we have control chunks to retransmit? */
9539 /* Start a timer no matter if we suceed or fail */
9540 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9541 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9542 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9543 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9544 chk->snd_count++; /* update our count */
9545 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9546 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9547 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9548 no_fragmentflg, 0, 0,
9549 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9550 chk->whoTo->port, NULL,
9553 SCTP_STAT_INCR(sctps_lowlevelerr);
9560 * We don't want to mark the net->sent time here since this
9561 * we use this for HB and retrans cannot measure RTT
9563 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9565 chk->sent = SCTP_DATAGRAM_SENT;
9566 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9570 /* Clean up the fwd-tsn list */
9571 sctp_clean_up_ctl(stcb, asoc, so_locked);
9576 * Ok, it is just data retransmission we need to do or that and a
9577 * fwd-tsn with it all.
9579 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9580 return (SCTP_RETRAN_DONE);
9582 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9583 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9584 /* not yet open, resend the cookie and that is it */
9587 #ifdef SCTP_AUDITING_ENABLED
9588 sctp_auditing(20, inp, stcb, NULL);
9590 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9591 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9592 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9593 /* No, not sent to this net or not ready for rtx */
9596 if (chk->data == NULL) {
9597 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9598 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9601 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9602 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9603 struct mbuf *op_err;
9604 char msg[SCTP_DIAG_INFO_LEN];
9606 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9607 chk->rec.data.TSN_seq, chk->snd_count);
9608 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9610 atomic_add_int(&stcb->asoc.refcnt, 1);
9611 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9613 SCTP_TCB_LOCK(stcb);
9614 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9615 return (SCTP_RETRAN_EXIT);
9617 /* pick up the net */
9619 switch (net->ro._l_addr.sa.sa_family) {
9622 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9627 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9636 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9637 /* No room in peers rwnd */
9640 tsn = asoc->last_acked_seq + 1;
9641 if (tsn == chk->rec.data.TSN_seq) {
9643 * we make a special exception for this
9644 * case. The peer has no rwnd but is missing
9645 * the lowest chunk.. which is probably what
9646 * is holding up the rwnd.
9648 goto one_chunk_around;
9653 if (asoc->peers_rwnd < mtu) {
9655 if ((asoc->peers_rwnd == 0) &&
9656 (asoc->total_flight == 0)) {
9657 chk->window_probe = 1;
9658 chk->whoTo->window_probe = 1;
9661 #ifdef SCTP_AUDITING_ENABLED
9662 sctp_audit_log(0xC3, 2);
9666 net->fast_retran_ip = 0;
9667 if (chk->rec.data.doing_fast_retransmit == 0) {
9669 * if no FR in progress skip destination that have
9670 * flight_size > cwnd.
9672 if (net->flight_size >= net->cwnd) {
9677 * Mark the destination net to have FR recovery
9681 net->fast_retran_ip = 1;
9685 * if no AUTH is yet included and this chunk requires it,
9686 * make sure to account for it. We don't apply the size
9687 * until the AUTH chunk is actually added below in case
9688 * there is no room for this chunk.
9690 if (data_auth_reqd && (auth == NULL)) {
9691 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9695 if ((chk->send_size <= (mtu - dmtu)) ||
9696 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9697 /* ok we will add this one */
9698 if (data_auth_reqd) {
9700 m = sctp_add_auth_chunk(m,
9706 auth_keyid = chk->auth_keyid;
9708 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9709 } else if (override_ok) {
9710 auth_keyid = chk->auth_keyid;
9712 } else if (chk->auth_keyid != auth_keyid) {
9713 /* different keyid, so done bundling */
9717 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9719 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9722 /* Do clear IP_DF ? */
9723 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9726 /* upate our MTU size */
9727 if (mtu > (chk->send_size + dmtu))
9728 mtu -= (chk->send_size + dmtu);
9731 data_list[bundle_at++] = chk;
9732 if (one_chunk && (asoc->total_flight <= 0)) {
9733 SCTP_STAT_INCR(sctps_windowprobed);
9736 if (one_chunk == 0) {
9738 * now are there anymore forward from chk to pick
9741 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9742 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9743 /* Nope, not for retran */
9746 if (fwd->whoTo != net) {
9747 /* Nope, not the net in question */
9750 if (data_auth_reqd && (auth == NULL)) {
9751 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9754 if (fwd->send_size <= (mtu - dmtu)) {
9755 if (data_auth_reqd) {
9757 m = sctp_add_auth_chunk(m,
9763 auth_keyid = fwd->auth_keyid;
9765 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9766 } else if (override_ok) {
9767 auth_keyid = fwd->auth_keyid;
9769 } else if (fwd->auth_keyid != auth_keyid) {
9777 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9779 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9782 /* Do clear IP_DF ? */
9783 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9786 /* upate our MTU size */
9787 if (mtu > (fwd->send_size + dmtu))
9788 mtu -= (fwd->send_size + dmtu);
9791 data_list[bundle_at++] = fwd;
9792 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9796 /* can't fit so we are done */
9801 /* Is there something to send for this destination? */
9804 * No matter if we fail/or suceed we should start a
9805 * timer. A failure is like a lost IP packet :-)
9807 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9809 * no timer running on this destination
9812 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9815 /* Now lets send it, if there is anything to send :> */
9816 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9817 (struct sockaddr *)&net->ro._l_addr, m,
9818 auth_offset, auth, auth_keyid,
9819 no_fragmentflg, 0, 0,
9820 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9824 /* error, we could not output */
9825 SCTP_STAT_INCR(sctps_lowlevelerr);
9833 * We don't want to mark the net->sent time here
9834 * since this we use this for HB and retrans cannot
9837 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9839 /* For auto-close */
9841 if (*now_filled == 0) {
9842 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9843 *now = asoc->time_last_sent;
9846 asoc->time_last_sent = *now;
9848 *cnt_out += bundle_at;
9849 #ifdef SCTP_AUDITING_ENABLED
9850 sctp_audit_log(0xC4, bundle_at);
9853 tsns_sent = data_list[0]->rec.data.TSN_seq;
9855 for (i = 0; i < bundle_at; i++) {
9856 SCTP_STAT_INCR(sctps_sendretransdata);
9857 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9859 * When we have a revoked data, and we
9860 * retransmit it, then we clear the revoked
9861 * flag since this flag dictates if we
9862 * subtracted from the fs
9864 if (data_list[i]->rec.data.chunk_was_revoked) {
9865 /* Deflate the cwnd */
9866 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9867 data_list[i]->rec.data.chunk_was_revoked = 0;
9869 data_list[i]->snd_count++;
9870 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9871 /* record the time */
9872 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9873 if (data_list[i]->book_size_scale) {
9875 * need to double the book size on
9878 data_list[i]->book_size_scale = 0;
9880 * Since we double the booksize, we
9881 * must also double the output queue
9882 * size, since this get shrunk when
9883 * we free by this amount.
9885 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9886 data_list[i]->book_size *= 2;
9890 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9891 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9892 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9894 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9895 (uint32_t) (data_list[i]->send_size +
9896 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9898 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9899 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9900 data_list[i]->whoTo->flight_size,
9901 data_list[i]->book_size,
9902 (uint32_t) (uintptr_t) data_list[i]->whoTo,
9903 data_list[i]->rec.data.TSN_seq);
9905 sctp_flight_size_increase(data_list[i]);
9906 sctp_total_flight_increase(stcb, data_list[i]);
9907 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9908 /* SWS sender side engages */
9909 asoc->peers_rwnd = 0;
9912 (data_list[i]->rec.data.doing_fast_retransmit)) {
9913 SCTP_STAT_INCR(sctps_sendfastretrans);
9914 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9915 (tmr_started == 0)) {
9917 * ok we just fast-retrans'd
9918 * the lowest TSN, i.e the
9919 * first on the list. In
9920 * this case we want to give
9921 * some more time to get a
9922 * SACK back without a
9925 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9926 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
9927 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9931 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9932 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9934 #ifdef SCTP_AUDITING_ENABLED
9935 sctp_auditing(21, inp, stcb, NULL);
9941 if (asoc->sent_queue_retran_cnt <= 0) {
9942 /* all done we have no more to retran */
9943 asoc->sent_queue_retran_cnt = 0;
9947 /* No more room in rwnd */
9950 /* stop the for loop here. we sent out a packet */
9957 sctp_timer_validation(struct sctp_inpcb *inp,
9958 struct sctp_tcb *stcb,
9959 struct sctp_association *asoc)
9961 struct sctp_nets *net;
9963 /* Validate that a timer is running somewhere */
9964 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9965 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9966 /* Here is a timer */
9970 SCTP_TCB_LOCK_ASSERT(stcb);
9971 /* Gak, we did not have a timer somewhere */
9972 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9973 if (asoc->alternate) {
9974 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9976 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9982 sctp_chunk_output(struct sctp_inpcb *inp,
9983 struct sctp_tcb *stcb,
9986 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9992 * Ok this is the generic chunk service queue. we must do the
9994 * - See if there are retransmits pending, if so we must
9996 * - Service the stream queue that is next, moving any
9997 * message (note I must get a complete message i.e.
9998 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10000 * - Check to see if the cwnd/rwnd allows any output, if so we
10001 * go ahead and fomulate and send the low level chunks. Making sure
10002 * to combine any control in the control chunk queue also.
10004 struct sctp_association *asoc;
10005 struct sctp_nets *net;
10006 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10007 unsigned int burst_cnt = 0;
10008 struct timeval now;
10009 int now_filled = 0;
10011 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10014 unsigned int tot_frs = 0;
10016 asoc = &stcb->asoc;
10018 /* The Nagle algorithm is only applied when handling a send call. */
10019 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10020 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10028 SCTP_TCB_LOCK_ASSERT(stcb);
10030 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10032 if ((un_sent <= 0) &&
10033 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10034 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10035 (asoc->sent_queue_retran_cnt == 0) &&
10036 (asoc->trigger_reset == 0)) {
10037 /* Nothing to do unless there is something to be sent left */
10041 * Do we have something to send, data or control AND a sack timer
10042 * running, if so piggy-back the sack.
10044 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10045 sctp_send_sack(stcb, so_locked);
10046 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10048 while (asoc->sent_queue_retran_cnt) {
10050 * Ok, it is retransmission time only, we send out only ONE
10051 * packet with a single call off to the retran code.
10053 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10055 * Special hook for handling cookiess discarded
10056 * by peer that carried data. Send cookie-ack only
10057 * and then the next call with get the retran's.
10059 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10061 &now, &now_filled, frag_point, so_locked);
10063 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10064 /* if its not from a HB then do it */
10066 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10072 * its from any other place, we don't allow retran
10073 * output (only control)
10078 /* Can't send anymore */
10080 * now lets push out control by calling med-level
10081 * output once. this assures that we WILL send HB's
10084 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10086 &now, &now_filled, frag_point, so_locked);
10087 #ifdef SCTP_AUDITING_ENABLED
10088 sctp_auditing(8, inp, stcb, NULL);
10090 sctp_timer_validation(inp, stcb, asoc);
10095 * The count was off.. retran is not happening so do
10096 * the normal retransmission.
10098 #ifdef SCTP_AUDITING_ENABLED
10099 sctp_auditing(9, inp, stcb, NULL);
10101 if (ret == SCTP_RETRAN_EXIT) {
10106 if (from_where == SCTP_OUTPUT_FROM_T3) {
10107 /* Only one transmission allowed out of a timeout */
10108 #ifdef SCTP_AUDITING_ENABLED
10109 sctp_auditing(10, inp, stcb, NULL);
10111 /* Push out any control */
10112 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10113 &now, &now_filled, frag_point, so_locked);
10116 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10117 /* Hit FR burst limit */
10120 if ((num_out == 0) && (ret == 0)) {
10121 /* No more retrans to send */
10125 #ifdef SCTP_AUDITING_ENABLED
10126 sctp_auditing(12, inp, stcb, NULL);
10128 /* Check for bad destinations, if they exist move chunks around. */
10129 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10130 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10132 * if possible move things off of this address we
10133 * still may send below due to the dormant state but
10134 * we try to find an alternate address to send to
10135 * and if we have one we move all queued data on the
10136 * out wheel to this alternate address.
10138 if (net->ref_count > 1)
10139 sctp_move_chunks_from_net(stcb, net);
10142 * if ((asoc->sat_network) || (net->addr_is_local))
10143 * { burst_limit = asoc->max_burst *
10144 * SCTP_SAT_NETWORK_BURST_INCR; }
10146 if (asoc->max_burst > 0) {
10147 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10148 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10150 * JRS - Use the congestion
10151 * control given in the
10152 * congestion control module
10154 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10155 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10156 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10158 SCTP_STAT_INCR(sctps_maxburstqueued);
10160 net->fast_retran_ip = 0;
10162 if (net->flight_size == 0) {
10164 * Should be decaying the
10176 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10177 &reason_code, 0, from_where,
10178 &now, &now_filled, frag_point, so_locked);
10180 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10182 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10185 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10186 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10190 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10192 tot_out += num_out;
10194 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10195 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10196 if (num_out == 0) {
10197 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10202 * When the Nagle algorithm is used, look at how
10203 * much is unsent, then if its smaller than an MTU
10204 * and we have data in flight we stop, except if we
10205 * are handling a fragmented user message.
10207 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10208 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10209 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10210 (stcb->asoc.total_flight > 0) &&
10211 ((stcb->asoc.locked_on_sending == NULL) ||
10212 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10216 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10217 TAILQ_EMPTY(&asoc->send_queue) &&
10218 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10219 /* Nothing left to send */
10222 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10223 /* Nothing left to send */
10226 } while (num_out &&
10227 ((asoc->max_burst == 0) ||
10228 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10229 (burst_cnt < asoc->max_burst)));
10231 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10232 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10233 SCTP_STAT_INCR(sctps_maxburstqueued);
10234 asoc->burst_limit_applied = 1;
10235 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10236 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10239 asoc->burst_limit_applied = 0;
10242 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10243 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10245 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10249 * Now we need to clean up the control chunk chain if a ECNE is on
10250 * it. It must be marked as UNSENT again so next call will continue
10251 * to send it until such time that we get a CWR, to remove it.
10253 if (stcb->asoc.ecn_echo_cnt_onq)
10254 sctp_fix_ecn_echo(asoc);
10256 if (stcb->asoc.trigger_reset) {
10257 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10267 struct sctp_inpcb *inp,
10269 struct sockaddr *addr,
10270 struct mbuf *control,
10275 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10278 if (inp->sctp_socket == NULL) {
10279 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10282 return (sctp_sosend(inp->sctp_socket,
10284 (struct uio *)NULL,
10292 send_forward_tsn(struct sctp_tcb *stcb,
10293 struct sctp_association *asoc)
10295 struct sctp_tmit_chunk *chk;
10296 struct sctp_forward_tsn_chunk *fwdtsn;
10297 uint32_t advance_peer_ack_point;
10300 if (asoc->idata_supported) {
10305 SCTP_TCB_LOCK_ASSERT(stcb);
10306 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10307 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10308 /* mark it to unsent */
10309 chk->sent = SCTP_DATAGRAM_UNSENT;
10310 chk->snd_count = 0;
10311 /* Do we correct its output location? */
10313 sctp_free_remote_addr(chk->whoTo);
10316 goto sctp_fill_in_rest;
10319 /* Ok if we reach here we must build one */
10320 sctp_alloc_a_chunk(stcb, chk);
10324 asoc->fwd_tsn_cnt++;
10325 chk->copy_by_ref = 0;
10327 * We don't do the old thing here since this is used not for on-wire
10328 * but to tell if we are sending a fwd-tsn by the stack during
10329 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10331 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10332 chk->rec.chunk_id.can_take_data = 0;
10336 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10337 if (chk->data == NULL) {
10338 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10341 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10342 chk->sent = SCTP_DATAGRAM_UNSENT;
10343 chk->snd_count = 0;
10344 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10345 asoc->ctrl_queue_cnt++;
10348 * Here we go through and fill out the part that deals with
10349 * stream/seq of the ones we skip.
10351 SCTP_BUF_LEN(chk->data) = 0;
10353 struct sctp_tmit_chunk *at, *tp1, *last;
10354 struct sctp_strseq *strseq;
10355 struct sctp_strseq_mid *strseq_m;
10356 unsigned int cnt_of_space, i, ovh;
10357 unsigned int space_needed;
10358 unsigned int cnt_of_skipped = 0;
10360 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10361 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10362 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10363 /* no more to look at */
10366 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10367 /* We don't report these */
10373 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10374 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10376 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10377 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10379 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10381 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10382 ovh = SCTP_MIN_OVERHEAD;
10384 ovh = SCTP_MIN_V4_OVERHEAD;
10386 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10387 /* trim to a mtu size */
10388 cnt_of_space = asoc->smallest_mtu - ovh;
10390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10391 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10392 0xff, 0, cnt_of_skipped,
10393 asoc->advanced_peer_ack_point);
10396 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10397 if (cnt_of_space < space_needed) {
10399 * ok we must trim down the chunk by lowering the
10400 * advance peer ack point.
10402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10403 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10404 0xff, 0xff, cnt_of_space,
10408 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10409 cnt_of_skipped /= sizeof(struct sctp_strseq);
10411 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10412 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10415 * Go through and find the TSN that will be the one
10418 at = TAILQ_FIRST(&asoc->sent_queue);
10420 for (i = 0; i < cnt_of_skipped; i++) {
10421 tp1 = TAILQ_NEXT(at, sctp_next);
10428 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10429 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10430 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10431 asoc->advanced_peer_ack_point);
10435 * last now points to last one I can report, update
10439 advance_peer_ack_point = last->rec.data.TSN_seq;
10441 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10442 cnt_of_skipped * sizeof(struct sctp_strseq);
10444 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10445 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10448 chk->send_size = space_needed;
10449 /* Setup the chunk */
10450 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10451 fwdtsn->ch.chunk_length = htons(chk->send_size);
10452 fwdtsn->ch.chunk_flags = 0;
10454 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10456 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10458 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10459 SCTP_BUF_LEN(chk->data) = chk->send_size;
10462 * Move pointer to after the fwdtsn and transfer to the
10466 strseq = (struct sctp_strseq *)fwdtsn;
10468 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10471 * Now populate the strseq list. This is done blindly
10472 * without pulling out duplicate stream info. This is
10473 * inefficent but won't harm the process since the peer will
10474 * look at these in sequence and will thus release anything.
10475 * It could mean we exceed the PMTU and chop off some that
10476 * we could have included.. but this is unlikely (aka 1432/4
10477 * would mean 300+ stream seq's would have to be reported in
10478 * one FWD-TSN. With a bit of work we can later FIX this to
10479 * optimize and pull out duplcates.. but it does add more
10480 * overhead. So for now... not!
10482 at = TAILQ_FIRST(&asoc->sent_queue);
10483 for (i = 0; i < cnt_of_skipped; i++) {
10484 tp1 = TAILQ_NEXT(at, sctp_next);
10487 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10488 /* We don't report these */
10493 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10494 at->rec.data.fwd_tsn_cnt = 0;
10497 strseq->stream = ntohs(at->rec.data.stream_number);
10498 strseq->sequence = ntohs(at->rec.data.stream_seq);
10501 strseq_m->stream = ntohs(at->rec.data.stream_number);
10502 strseq_m->reserved = ntohs(0);
10503 strseq_m->msg_id = ntohl(at->rec.data.stream_seq);
10513 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10514 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10520 * Queue up a SACK or NR-SACK in the control queue.
10521 * We must first check to see if a SACK or NR-SACK is
10522 * somehow on the control queue.
10523 * If so, we will take and and remove the old one.
10525 struct sctp_association *asoc;
10526 struct sctp_tmit_chunk *chk, *a_chk;
10527 struct sctp_sack_chunk *sack;
10528 struct sctp_nr_sack_chunk *nr_sack;
10529 struct sctp_gap_ack_block *gap_descriptor;
10530 const struct sack_track *selector;
10535 int limit_reached = 0;
10536 unsigned int i, siz, j;
10537 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10540 uint32_t highest_tsn;
10545 if (stcb->asoc.nrsack_supported == 1) {
10546 type = SCTP_NR_SELECTIVE_ACK;
10548 type = SCTP_SELECTIVE_ACK;
10551 asoc = &stcb->asoc;
10552 SCTP_TCB_LOCK_ASSERT(stcb);
10553 if (asoc->last_data_chunk_from == NULL) {
10554 /* Hmm we never received anything */
10557 sctp_slide_mapping_arrays(stcb);
10558 sctp_set_rwnd(stcb, asoc);
10559 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10560 if (chk->rec.chunk_id.id == type) {
10561 /* Hmm, found a sack already on queue, remove it */
10562 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10563 asoc->ctrl_queue_cnt--;
10566 sctp_m_freem(a_chk->data);
10567 a_chk->data = NULL;
10569 if (a_chk->whoTo) {
10570 sctp_free_remote_addr(a_chk->whoTo);
10571 a_chk->whoTo = NULL;
10576 if (a_chk == NULL) {
10577 sctp_alloc_a_chunk(stcb, a_chk);
10578 if (a_chk == NULL) {
10579 /* No memory so we drop the idea, and set a timer */
10580 if (stcb->asoc.delayed_ack) {
10581 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10582 stcb->sctp_ep, stcb, NULL,
10583 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10584 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10585 stcb->sctp_ep, stcb, NULL);
10587 stcb->asoc.send_sack = 1;
10591 a_chk->copy_by_ref = 0;
10592 a_chk->rec.chunk_id.id = type;
10593 a_chk->rec.chunk_id.can_take_data = 1;
10595 /* Clear our pkt counts */
10596 asoc->data_pkts_seen = 0;
10599 a_chk->asoc = asoc;
10600 a_chk->snd_count = 0;
10601 a_chk->send_size = 0; /* fill in later */
10602 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10603 a_chk->whoTo = NULL;
10605 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
10607 * Ok, the destination for the SACK is unreachable, lets see if
10608 * we can select an alternate to asoc->last_data_chunk_from
10610 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10611 if (a_chk->whoTo == NULL) {
10612 /* Nope, no alternate */
10613 a_chk->whoTo = asoc->last_data_chunk_from;
10616 a_chk->whoTo = asoc->last_data_chunk_from;
10618 if (a_chk->whoTo) {
10619 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10621 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10622 highest_tsn = asoc->highest_tsn_inside_map;
10624 highest_tsn = asoc->highest_tsn_inside_nr_map;
10626 if (highest_tsn == asoc->cumulative_tsn) {
10628 if (type == SCTP_SELECTIVE_ACK) {
10629 space_req = sizeof(struct sctp_sack_chunk);
10631 space_req = sizeof(struct sctp_nr_sack_chunk);
10634 /* gaps get a cluster */
10635 space_req = MCLBYTES;
10637 /* Ok now lets formulate a MBUF with our sack */
10638 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10639 if ((a_chk->data == NULL) ||
10640 (a_chk->whoTo == NULL)) {
10641 /* rats, no mbuf memory */
10643 /* was a problem with the destination */
10644 sctp_m_freem(a_chk->data);
10645 a_chk->data = NULL;
10647 sctp_free_a_chunk(stcb, a_chk, so_locked);
10648 /* sa_ignore NO_NULL_CHK */
10649 if (stcb->asoc.delayed_ack) {
10650 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10651 stcb->sctp_ep, stcb, NULL,
10652 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
10653 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10654 stcb->sctp_ep, stcb, NULL);
10656 stcb->asoc.send_sack = 1;
10660 /* ok, lets go through and fill it in */
10661 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10662 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10663 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10664 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10666 limit = mtod(a_chk->data, caddr_t);
10671 if ((asoc->sctp_cmt_on_off > 0) &&
10672 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10674 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10675 * received, then set high bit to 1, else 0. Reset
10678 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10679 asoc->cmt_dac_pkts_rcvd = 0;
10681 #ifdef SCTP_ASOCLOG_OF_TSNS
10682 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10683 stcb->asoc.cumack_log_atsnt++;
10684 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10685 stcb->asoc.cumack_log_atsnt = 0;
10688 /* reset the readers interpretation */
10689 stcb->freed_by_sorcv_sincelast = 0;
10691 if (type == SCTP_SELECTIVE_ACK) {
10692 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10694 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10695 if (highest_tsn > asoc->mapping_array_base_tsn) {
10696 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10698 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10702 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10703 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10704 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10705 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10707 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10711 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10714 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10716 if (((type == SCTP_SELECTIVE_ACK) &&
10717 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10718 ((type == SCTP_NR_SELECTIVE_ACK) &&
10719 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10720 /* we have a gap .. maybe */
10721 for (i = 0; i < siz; i++) {
10722 tsn_map = asoc->mapping_array[i];
10723 if (type == SCTP_SELECTIVE_ACK) {
10724 tsn_map |= asoc->nr_mapping_array[i];
10728 * Clear all bits corresponding to TSNs
10729 * smaller or equal to the cumulative TSN.
10731 tsn_map &= (~0U << (1 - offset));
10733 selector = &sack_array[tsn_map];
10734 if (mergeable && selector->right_edge) {
10736 * Backup, left and right edges were ok to
10742 if (selector->num_entries == 0)
10745 for (j = 0; j < selector->num_entries; j++) {
10746 if (mergeable && selector->right_edge) {
10748 * do a merge by NOT setting
10754 * no merge, set the left
10758 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10760 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10763 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10769 if (selector->left_edge) {
10773 if (limit_reached) {
10774 /* Reached the limit stop */
10780 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10781 (limit_reached == 0)) {
10785 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10786 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10788 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10791 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10794 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10796 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10797 /* we have a gap .. maybe */
10798 for (i = 0; i < siz; i++) {
10799 tsn_map = asoc->nr_mapping_array[i];
10802 * Clear all bits corresponding to
10803 * TSNs smaller or equal to the
10806 tsn_map &= (~0U << (1 - offset));
10808 selector = &sack_array[tsn_map];
10809 if (mergeable && selector->right_edge) {
10811 * Backup, left and right edges were
10814 num_nr_gap_blocks--;
10817 if (selector->num_entries == 0)
10820 for (j = 0; j < selector->num_entries; j++) {
10821 if (mergeable && selector->right_edge) {
10823 * do a merge by NOT
10830 * no merge, set the
10834 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10836 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10837 num_nr_gap_blocks++;
10839 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10845 if (selector->left_edge) {
10849 if (limit_reached) {
10850 /* Reached the limit stop */
10857 /* now we must add any dups we are going to report. */
10858 if ((limit_reached == 0) && (asoc->numduptsns)) {
10859 dup = (uint32_t *) gap_descriptor;
10860 for (i = 0; i < asoc->numduptsns; i++) {
10861 *dup = htonl(asoc->dup_tsns[i]);
10864 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10869 asoc->numduptsns = 0;
10872 * now that the chunk is prepared queue it to the control chunk
10875 if (type == SCTP_SELECTIVE_ACK) {
10876 a_chk->send_size = (uint16_t) (sizeof(struct sctp_sack_chunk) +
10877 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10878 num_dups * sizeof(int32_t));
10879 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10880 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10881 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10882 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10883 sack->sack.num_dup_tsns = htons(num_dups);
10884 sack->ch.chunk_type = type;
10885 sack->ch.chunk_flags = flags;
10886 sack->ch.chunk_length = htons(a_chk->send_size);
10888 a_chk->send_size = (uint16_t) (sizeof(struct sctp_nr_sack_chunk) +
10889 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10890 num_dups * sizeof(int32_t));
10891 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10892 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10893 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10894 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10895 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10896 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10897 nr_sack->nr_sack.reserved = 0;
10898 nr_sack->ch.chunk_type = type;
10899 nr_sack->ch.chunk_flags = flags;
10900 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10902 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10903 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10904 asoc->ctrl_queue_cnt++;
10905 asoc->send_sack = 0;
10906 SCTP_STAT_INCR(sctps_sendsacks);
10911 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10912 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10917 struct mbuf *m_abort, *m, *m_last;
10918 struct mbuf *m_out, *m_end = NULL;
10919 struct sctp_abort_chunk *abort;
10920 struct sctp_auth_chunk *auth = NULL;
10921 struct sctp_nets *net;
10923 uint32_t auth_offset = 0;
10924 uint16_t cause_len, chunk_len, padding_len;
10926 SCTP_TCB_LOCK_ASSERT(stcb);
10928 * Add an AUTH chunk, if chunk requires it and save the offset into
10929 * the chain for AUTH
10931 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10932 stcb->asoc.peer_auth_chunks)) {
10933 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10934 stcb, SCTP_ABORT_ASSOCIATION);
10935 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10939 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10940 if (m_abort == NULL) {
10942 sctp_m_freem(m_out);
10945 sctp_m_freem(operr);
10949 /* link in any error */
10950 SCTP_BUF_NEXT(m_abort) = operr;
10953 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10954 cause_len += (uint16_t) SCTP_BUF_LEN(m);
10955 if (SCTP_BUF_NEXT(m) == NULL) {
10959 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10960 chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
10961 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10962 if (m_out == NULL) {
10963 /* NO Auth chunk prepended, so reserve space in front */
10964 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10967 /* Put AUTH chunk at the front of the chain */
10968 SCTP_BUF_NEXT(m_end) = m_abort;
10970 if (stcb->asoc.alternate) {
10971 net = stcb->asoc.alternate;
10973 net = stcb->asoc.primary_destination;
10975 /* Fill in the ABORT chunk header. */
10976 abort = mtod(m_abort, struct sctp_abort_chunk *);
10977 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10978 if (stcb->asoc.peer_vtag == 0) {
10979 /* This happens iff the assoc is in COOKIE-WAIT state. */
10980 vtag = stcb->asoc.my_vtag;
10981 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10983 vtag = stcb->asoc.peer_vtag;
10984 abort->ch.chunk_flags = 0;
10986 abort->ch.chunk_length = htons(chunk_len);
10987 /* Add padding, if necessary. */
10988 if (padding_len > 0) {
10989 if ((m_last == NULL) ||
10990 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
10991 sctp_m_freem(m_out);
10995 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10996 (struct sockaddr *)&net->ro._l_addr,
10997 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10998 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10999 stcb->asoc.primary_destination->port, NULL,
11002 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11006 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11007 struct sctp_nets *net,
11010 /* formulate and SEND a SHUTDOWN-COMPLETE */
11011 struct mbuf *m_shutdown_comp;
11012 struct sctp_shutdown_complete_chunk *shutdown_complete;
11016 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11017 if (m_shutdown_comp == NULL) {
11021 if (reflect_vtag) {
11022 flags = SCTP_HAD_NO_TCB;
11023 vtag = stcb->asoc.my_vtag;
11026 vtag = stcb->asoc.peer_vtag;
11028 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11029 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11030 shutdown_complete->ch.chunk_flags = flags;
11031 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11032 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11033 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11034 (struct sockaddr *)&net->ro._l_addr,
11035 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11036 stcb->sctp_ep->sctp_lport, stcb->rport,
11040 SCTP_SO_NOT_LOCKED);
11041 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11046 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11047 struct sctphdr *sh, uint32_t vtag,
11048 uint8_t type, struct mbuf *cause,
11049 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11050 uint32_t vrf_id, uint16_t port)
11052 struct mbuf *o_pak;
11054 struct sctphdr *shout;
11055 struct sctp_chunkhdr *ch;
11057 #if defined(INET) || defined(INET6)
11058 struct udphdr *udp;
11062 int len, cause_len, padding_len;
11065 struct sockaddr_in *src_sin, *dst_sin;
11070 struct sockaddr_in6 *src_sin6, *dst_sin6;
11071 struct ip6_hdr *ip6;
11075 /* Compute the length of the cause and add final padding. */
11077 if (cause != NULL) {
11078 struct mbuf *m_at, *m_last = NULL;
11080 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11081 if (SCTP_BUF_NEXT(m_at) == NULL)
11083 cause_len += SCTP_BUF_LEN(m_at);
11085 padding_len = cause_len % 4;
11086 if (padding_len != 0) {
11087 padding_len = 4 - padding_len;
11089 if (padding_len != 0) {
11090 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11091 sctp_m_freem(cause);
11098 /* Get an mbuf for the header. */
11099 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11100 switch (dst->sa_family) {
11103 len += sizeof(struct ip);
11108 len += sizeof(struct ip6_hdr);
11114 #if defined(INET) || defined(INET6)
11116 len += sizeof(struct udphdr);
11119 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11120 if (mout == NULL) {
11122 sctp_m_freem(cause);
11126 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11127 SCTP_BUF_LEN(mout) = len;
11128 SCTP_BUF_NEXT(mout) = cause;
11129 M_SETFIB(mout, fibnum);
11130 mout->m_pkthdr.flowid = mflowid;
11131 M_HASHTYPE_SET(mout, mflowtype);
11138 switch (dst->sa_family) {
11141 src_sin = (struct sockaddr_in *)src;
11142 dst_sin = (struct sockaddr_in *)dst;
11143 ip = mtod(mout, struct ip *);
11144 ip->ip_v = IPVERSION;
11145 ip->ip_hl = (sizeof(struct ip) >> 2);
11149 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11151 ip->ip_p = IPPROTO_UDP;
11153 ip->ip_p = IPPROTO_SCTP;
11155 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11156 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11158 len = sizeof(struct ip);
11159 shout = (struct sctphdr *)((caddr_t)ip + len);
11164 src_sin6 = (struct sockaddr_in6 *)src;
11165 dst_sin6 = (struct sockaddr_in6 *)dst;
11166 ip6 = mtod(mout, struct ip6_hdr *);
11167 ip6->ip6_flow = htonl(0x60000000);
11168 if (V_ip6_auto_flowlabel) {
11169 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11171 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11173 ip6->ip6_nxt = IPPROTO_UDP;
11175 ip6->ip6_nxt = IPPROTO_SCTP;
11177 ip6->ip6_src = dst_sin6->sin6_addr;
11178 ip6->ip6_dst = src_sin6->sin6_addr;
11179 len = sizeof(struct ip6_hdr);
11180 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11185 shout = mtod(mout, struct sctphdr *);
11188 #if defined(INET) || defined(INET6)
11190 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11191 sctp_m_freem(mout);
11194 udp = (struct udphdr *)shout;
11195 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11196 udp->uh_dport = port;
11198 udp->uh_ulen = htons((uint16_t) (sizeof(struct udphdr) +
11199 sizeof(struct sctphdr) +
11200 sizeof(struct sctp_chunkhdr) +
11201 cause_len + padding_len));
11202 len += sizeof(struct udphdr);
11203 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11208 shout->src_port = sh->dest_port;
11209 shout->dest_port = sh->src_port;
11210 shout->checksum = 0;
11212 shout->v_tag = htonl(vtag);
11214 shout->v_tag = sh->v_tag;
11216 len += sizeof(struct sctphdr);
11217 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11218 ch->chunk_type = type;
11220 ch->chunk_flags = 0;
11222 ch->chunk_flags = SCTP_HAD_NO_TCB;
11224 ch->chunk_length = htons((uint16_t) (sizeof(struct sctp_chunkhdr) + cause_len));
11225 len += sizeof(struct sctp_chunkhdr);
11226 len += cause_len + padding_len;
11228 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11229 sctp_m_freem(mout);
11232 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11233 switch (dst->sa_family) {
11238 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11243 ip->ip_len = htons(len);
11245 #if defined(SCTP_WITH_NO_CSUM)
11246 SCTP_STAT_INCR(sctps_sendnocrc);
11248 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11249 SCTP_STAT_INCR(sctps_sendswcrc);
11252 SCTP_ENABLE_UDP_CSUM(o_pak);
11255 #if defined(SCTP_WITH_NO_CSUM)
11256 SCTP_STAT_INCR(sctps_sendnocrc);
11258 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11259 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11260 SCTP_STAT_INCR(sctps_sendhwcrc);
11263 #ifdef SCTP_PACKET_LOGGING
11264 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11265 sctp_packet_log(o_pak);
11268 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11273 ip6->ip6_plen = (uint16_t) (len - sizeof(struct ip6_hdr));
11275 #if defined(SCTP_WITH_NO_CSUM)
11276 SCTP_STAT_INCR(sctps_sendnocrc);
11278 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11279 SCTP_STAT_INCR(sctps_sendswcrc);
11281 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11282 udp->uh_sum = 0xffff;
11285 #if defined(SCTP_WITH_NO_CSUM)
11286 SCTP_STAT_INCR(sctps_sendnocrc);
11288 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11289 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11290 SCTP_STAT_INCR(sctps_sendhwcrc);
11293 #ifdef SCTP_PACKET_LOGGING
11294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11295 sctp_packet_log(o_pak);
11298 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11302 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11304 sctp_m_freem(mout);
11305 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11308 SCTP_STAT_INCR(sctps_sendpackets);
11309 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11310 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11315 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11316 struct sctphdr *sh,
11317 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11318 uint32_t vrf_id, uint16_t port)
11320 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11321 mflowtype, mflowid, fibnum,
11326 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11327 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11332 struct sctp_tmit_chunk *chk;
11333 struct sctp_heartbeat_chunk *hb;
11334 struct timeval now;
11336 SCTP_TCB_LOCK_ASSERT(stcb);
11340 (void)SCTP_GETTIME_TIMEVAL(&now);
11341 switch (net->ro._l_addr.sa.sa_family) {
11353 sctp_alloc_a_chunk(stcb, chk);
11355 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11358 chk->copy_by_ref = 0;
11359 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11360 chk->rec.chunk_id.can_take_data = 1;
11362 chk->asoc = &stcb->asoc;
11363 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11365 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11366 if (chk->data == NULL) {
11367 sctp_free_a_chunk(stcb, chk, so_locked);
11370 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11371 SCTP_BUF_LEN(chk->data) = chk->send_size;
11372 chk->sent = SCTP_DATAGRAM_UNSENT;
11373 chk->snd_count = 0;
11375 atomic_add_int(&chk->whoTo->ref_count, 1);
11376 /* Now we have a mbuf that we can fill in with the details */
11377 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11378 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11379 /* fill out chunk header */
11380 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11381 hb->ch.chunk_flags = 0;
11382 hb->ch.chunk_length = htons(chk->send_size);
11383 /* Fill out hb parameter */
11384 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11385 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11386 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11387 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11388 /* Did our user request this one, put it in */
11389 hb->heartbeat.hb_info.addr_family = (uint8_t) net->ro._l_addr.sa.sa_family;
11390 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11391 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11393 * we only take from the entropy pool if the address is not
11396 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11397 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11399 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11400 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11402 switch (net->ro._l_addr.sa.sa_family) {
11405 memcpy(hb->heartbeat.hb_info.address,
11406 &net->ro._l_addr.sin.sin_addr,
11407 sizeof(net->ro._l_addr.sin.sin_addr));
11412 memcpy(hb->heartbeat.hb_info.address,
11413 &net->ro._l_addr.sin6.sin6_addr,
11414 sizeof(net->ro._l_addr.sin6.sin6_addr));
11419 sctp_m_freem(chk->data);
11422 sctp_free_a_chunk(stcb, chk, so_locked);
11426 net->hb_responded = 0;
11427 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11428 stcb->asoc.ctrl_queue_cnt++;
11429 SCTP_STAT_INCR(sctps_sendheartbeat);
11434 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11437 struct sctp_association *asoc;
11438 struct sctp_ecne_chunk *ecne;
11439 struct sctp_tmit_chunk *chk;
11444 asoc = &stcb->asoc;
11445 SCTP_TCB_LOCK_ASSERT(stcb);
11446 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11447 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11448 /* found a previous ECN_ECHO update it if needed */
11449 uint32_t cnt, ctsn;
11451 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11452 ctsn = ntohl(ecne->tsn);
11453 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11454 ecne->tsn = htonl(high_tsn);
11455 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11457 cnt = ntohl(ecne->num_pkts_since_cwr);
11459 ecne->num_pkts_since_cwr = htonl(cnt);
11463 /* nope could not find one to update so we must build one */
11464 sctp_alloc_a_chunk(stcb, chk);
11468 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11469 chk->copy_by_ref = 0;
11470 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11471 chk->rec.chunk_id.can_take_data = 0;
11473 chk->asoc = &stcb->asoc;
11474 chk->send_size = sizeof(struct sctp_ecne_chunk);
11475 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11476 if (chk->data == NULL) {
11477 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11480 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11481 SCTP_BUF_LEN(chk->data) = chk->send_size;
11482 chk->sent = SCTP_DATAGRAM_UNSENT;
11483 chk->snd_count = 0;
11485 atomic_add_int(&chk->whoTo->ref_count, 1);
11487 stcb->asoc.ecn_echo_cnt_onq++;
11488 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11489 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11490 ecne->ch.chunk_flags = 0;
11491 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11492 ecne->tsn = htonl(high_tsn);
11493 ecne->num_pkts_since_cwr = htonl(1);
11494 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11495 asoc->ctrl_queue_cnt++;
11499 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11500 struct mbuf *m, int len, int iphlen, int bad_crc)
11502 struct sctp_association *asoc;
11503 struct sctp_pktdrop_chunk *drp;
11504 struct sctp_tmit_chunk *chk;
11510 struct sctp_chunkhdr *ch, chunk_buf;
11511 unsigned int chk_length;
11516 asoc = &stcb->asoc;
11517 SCTP_TCB_LOCK_ASSERT(stcb);
11518 if (asoc->pktdrop_supported == 0) {
11520 * peer must declare support before I send one.
11524 if (stcb->sctp_socket == NULL) {
11527 sctp_alloc_a_chunk(stcb, chk);
11531 chk->copy_by_ref = 0;
11532 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11533 chk->rec.chunk_id.can_take_data = 1;
11536 chk->send_size = len;
11537 /* Validate that we do not have an ABORT in here. */
11538 offset = iphlen + sizeof(struct sctphdr);
11539 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11540 sizeof(*ch), (uint8_t *) & chunk_buf);
11541 while (ch != NULL) {
11542 chk_length = ntohs(ch->chunk_length);
11543 if (chk_length < sizeof(*ch)) {
11544 /* break to abort land */
11547 switch (ch->chunk_type) {
11548 case SCTP_PACKET_DROPPED:
11549 case SCTP_ABORT_ASSOCIATION:
11550 case SCTP_INITIATION_ACK:
11552 * We don't respond with an PKT-DROP to an ABORT
11553 * or PKT-DROP. We also do not respond to an
11554 * INIT-ACK, because we can't know if the initiation
11555 * tag is correct or not.
11557 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11562 offset += SCTP_SIZE32(chk_length);
11563 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11564 sizeof(*ch), (uint8_t *) & chunk_buf);
11567 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11568 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11570 * only send 1 mtu worth, trim off the excess on the end.
11573 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11576 chk->asoc = &stcb->asoc;
11577 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11578 if (chk->data == NULL) {
11580 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11583 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11584 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11586 sctp_m_freem(chk->data);
11590 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11591 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11592 chk->book_size_scale = 0;
11594 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11595 drp->trunc_len = htons(fullsz);
11597 * Len is already adjusted to size minus overhead above take
11598 * out the pkt_drop chunk itself from it.
11600 chk->send_size = (uint16_t) (len - sizeof(struct sctp_pktdrop_chunk));
11601 len = chk->send_size;
11603 /* no truncation needed */
11604 drp->ch.chunk_flags = 0;
11605 drp->trunc_len = htons(0);
11608 drp->ch.chunk_flags |= SCTP_BADCRC;
11610 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11611 SCTP_BUF_LEN(chk->data) = chk->send_size;
11612 chk->sent = SCTP_DATAGRAM_UNSENT;
11613 chk->snd_count = 0;
11615 /* we should hit here */
11617 atomic_add_int(&chk->whoTo->ref_count, 1);
11621 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11622 drp->ch.chunk_length = htons(chk->send_size);
11623 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11627 drp->bottle_bw = htonl(spc);
11628 if (asoc->my_rwnd) {
11629 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11630 asoc->size_on_all_streams +
11631 asoc->my_rwnd_control_len +
11632 stcb->sctp_socket->so_rcv.sb_cc);
11635 * If my rwnd is 0, possibly from mbuf depletion as well as
11636 * space used, tell the peer there is NO space aka onq == bw
11638 drp->current_onq = htonl(spc);
11642 m_copydata(m, iphlen, len, (caddr_t)datap);
11643 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11644 asoc->ctrl_queue_cnt++;
11648 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11650 struct sctp_association *asoc;
11651 struct sctp_cwr_chunk *cwr;
11652 struct sctp_tmit_chunk *chk;
11654 SCTP_TCB_LOCK_ASSERT(stcb);
11658 asoc = &stcb->asoc;
11659 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11660 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11662 * found a previous CWR queued to same destination
11663 * update it if needed
11667 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11668 ctsn = ntohl(cwr->tsn);
11669 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11670 cwr->tsn = htonl(high_tsn);
11672 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11673 /* Make sure override is carried */
11674 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11679 sctp_alloc_a_chunk(stcb, chk);
11683 chk->copy_by_ref = 0;
11684 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11685 chk->rec.chunk_id.can_take_data = 1;
11687 chk->asoc = &stcb->asoc;
11688 chk->send_size = sizeof(struct sctp_cwr_chunk);
11689 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11690 if (chk->data == NULL) {
11691 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11694 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11695 SCTP_BUF_LEN(chk->data) = chk->send_size;
11696 chk->sent = SCTP_DATAGRAM_UNSENT;
11697 chk->snd_count = 0;
11699 atomic_add_int(&chk->whoTo->ref_count, 1);
11700 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11701 cwr->ch.chunk_type = SCTP_ECN_CWR;
11702 cwr->ch.chunk_flags = override;
11703 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11704 cwr->tsn = htonl(high_tsn);
11705 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11706 asoc->ctrl_queue_cnt++;
11710 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
11711 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11713 uint16_t len, old_len, i;
11714 struct sctp_stream_reset_out_request *req_out;
11715 struct sctp_chunkhdr *ch;
11717 int number_entries = 0;
11719 ch = mtod(chk->data, struct sctp_chunkhdr *);
11720 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11721 /* get to new offset for the param. */
11722 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11723 /* now how long will this param be? */
11724 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11725 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11726 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11727 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11731 if (number_entries == 0) {
11734 if (number_entries == stcb->asoc.streamoutcnt) {
11735 number_entries = 0;
11737 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11738 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11740 len = (uint16_t) (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11741 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11742 req_out->ph.param_length = htons(len);
11743 req_out->request_seq = htonl(seq);
11744 req_out->response_seq = htonl(resp_seq);
11745 req_out->send_reset_at_tsn = htonl(last_sent);
11747 if (number_entries) {
11748 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11749 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11750 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11751 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11752 req_out->list_of_streams[at] = htons(i);
11754 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11755 if (at >= number_entries) {
11761 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11762 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11765 if (SCTP_SIZE32(len) > len) {
11767 * Need to worry about the pad we may end up adding to the
11768 * end. This is easy since the struct is either aligned to 4
11769 * bytes or 2 bytes off.
11771 req_out->list_of_streams[number_entries] = 0;
11773 /* now fix the chunk length */
11774 ch->chunk_length = htons(len + old_len);
11775 chk->book_size = len + old_len;
11776 chk->book_size_scale = 0;
11777 chk->send_size = SCTP_SIZE32(chk->book_size);
11778 SCTP_BUF_LEN(chk->data) = chk->send_size;
11783 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11784 int number_entries, uint16_t * list,
11787 uint16_t len, old_len, i;
11788 struct sctp_stream_reset_in_request *req_in;
11789 struct sctp_chunkhdr *ch;
11791 ch = mtod(chk->data, struct sctp_chunkhdr *);
11792 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11794 /* get to new offset for the param. */
11795 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11796 /* now how long will this param be? */
11797 len = (uint16_t) (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11798 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11799 req_in->ph.param_length = htons(len);
11800 req_in->request_seq = htonl(seq);
11801 if (number_entries) {
11802 for (i = 0; i < number_entries; i++) {
11803 req_in->list_of_streams[i] = htons(list[i]);
11806 if (SCTP_SIZE32(len) > len) {
11808 * Need to worry about the pad we may end up adding to the
11809 * end. This is easy since the struct is either aligned to 4
11810 * bytes or 2 bytes off.
11812 req_in->list_of_streams[number_entries] = 0;
11814 /* now fix the chunk length */
11815 ch->chunk_length = htons(len + old_len);
11816 chk->book_size = len + old_len;
11817 chk->book_size_scale = 0;
11818 chk->send_size = SCTP_SIZE32(chk->book_size);
11819 SCTP_BUF_LEN(chk->data) = chk->send_size;
11824 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11827 uint16_t len, old_len;
11828 struct sctp_stream_reset_tsn_request *req_tsn;
11829 struct sctp_chunkhdr *ch;
11831 ch = mtod(chk->data, struct sctp_chunkhdr *);
11832 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11834 /* get to new offset for the param. */
11835 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11836 /* now how long will this param be? */
11837 len = sizeof(struct sctp_stream_reset_tsn_request);
11838 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11839 req_tsn->ph.param_length = htons(len);
11840 req_tsn->request_seq = htonl(seq);
11842 /* now fix the chunk length */
11843 ch->chunk_length = htons(len + old_len);
11844 chk->send_size = len + old_len;
11845 chk->book_size = SCTP_SIZE32(chk->send_size);
11846 chk->book_size_scale = 0;
11847 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11852 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11853 uint32_t resp_seq, uint32_t result)
11855 uint16_t len, old_len;
11856 struct sctp_stream_reset_response *resp;
11857 struct sctp_chunkhdr *ch;
11859 ch = mtod(chk->data, struct sctp_chunkhdr *);
11860 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11862 /* get to new offset for the param. */
11863 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11864 /* now how long will this param be? */
11865 len = sizeof(struct sctp_stream_reset_response);
11866 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11867 resp->ph.param_length = htons(len);
11868 resp->response_seq = htonl(resp_seq);
11869 resp->result = ntohl(result);
11871 /* now fix the chunk length */
11872 ch->chunk_length = htons(len + old_len);
11873 chk->book_size = len + old_len;
11874 chk->book_size_scale = 0;
11875 chk->send_size = SCTP_SIZE32(chk->book_size);
11876 SCTP_BUF_LEN(chk->data) = chk->send_size;
11881 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
11882 struct sctp_stream_reset_list *ent,
11885 struct sctp_association *asoc;
11886 struct sctp_tmit_chunk *chk;
11887 struct sctp_chunkhdr *ch;
11889 asoc = &stcb->asoc;
11892 * Reset our last reset action to the new one IP -> response
11893 * (PERFORMED probably). This assures that if we fail to send, a
11894 * retran from the peer will get the new response.
11896 asoc->last_reset_action[0] = response;
11897 if (asoc->stream_reset_outstanding) {
11900 sctp_alloc_a_chunk(stcb, chk);
11902 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11905 chk->copy_by_ref = 0;
11906 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11907 chk->rec.chunk_id.can_take_data = 0;
11909 chk->asoc = &stcb->asoc;
11910 chk->book_size = sizeof(struct sctp_chunkhdr);
11911 chk->send_size = SCTP_SIZE32(chk->book_size);
11912 chk->book_size_scale = 0;
11913 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11914 if (chk->data == NULL) {
11915 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11916 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11919 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11920 sctp_add_stream_reset_result(chk, ent->seq, response);
11921 /* setup chunk parameters */
11922 chk->sent = SCTP_DATAGRAM_UNSENT;
11923 chk->snd_count = 0;
11924 if (stcb->asoc.alternate) {
11925 chk->whoTo = stcb->asoc.alternate;
11927 chk->whoTo = stcb->asoc.primary_destination;
11929 ch = mtod(chk->data, struct sctp_chunkhdr *);
11930 ch->chunk_type = SCTP_STREAM_RESET;
11931 ch->chunk_flags = 0;
11932 ch->chunk_length = htons(chk->book_size);
11933 atomic_add_int(&chk->whoTo->ref_count, 1);
11934 SCTP_BUF_LEN(chk->data) = chk->send_size;
11935 /* insert the chunk for sending */
11936 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11939 asoc->ctrl_queue_cnt++;
11943 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11944 uint32_t resp_seq, uint32_t result,
11945 uint32_t send_una, uint32_t recv_next)
11947 uint16_t len, old_len;
11948 struct sctp_stream_reset_response_tsn *resp;
11949 struct sctp_chunkhdr *ch;
11951 ch = mtod(chk->data, struct sctp_chunkhdr *);
11952 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11954 /* get to new offset for the param. */
11955 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11956 /* now how long will this param be? */
11957 len = sizeof(struct sctp_stream_reset_response_tsn);
11958 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11959 resp->ph.param_length = htons(len);
11960 resp->response_seq = htonl(resp_seq);
11961 resp->result = htonl(result);
11962 resp->senders_next_tsn = htonl(send_una);
11963 resp->receivers_next_tsn = htonl(recv_next);
11965 /* now fix the chunk length */
11966 ch->chunk_length = htons(len + old_len);
11967 chk->book_size = len + old_len;
11968 chk->send_size = SCTP_SIZE32(chk->book_size);
11969 chk->book_size_scale = 0;
11970 SCTP_BUF_LEN(chk->data) = chk->send_size;
11975 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11979 uint16_t len, old_len;
11980 struct sctp_chunkhdr *ch;
11981 struct sctp_stream_reset_add_strm *addstr;
11983 ch = mtod(chk->data, struct sctp_chunkhdr *);
11984 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11986 /* get to new offset for the param. */
11987 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11988 /* now how long will this param be? */
11989 len = sizeof(struct sctp_stream_reset_add_strm);
11992 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11993 addstr->ph.param_length = htons(len);
11994 addstr->request_seq = htonl(seq);
11995 addstr->number_of_streams = htons(adding);
11996 addstr->reserved = 0;
11998 /* now fix the chunk length */
11999 ch->chunk_length = htons(len + old_len);
12000 chk->send_size = len + old_len;
12001 chk->book_size = SCTP_SIZE32(chk->send_size);
12002 chk->book_size_scale = 0;
12003 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12008 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12012 uint16_t len, old_len;
12013 struct sctp_chunkhdr *ch;
12014 struct sctp_stream_reset_add_strm *addstr;
12016 ch = mtod(chk->data, struct sctp_chunkhdr *);
12017 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12019 /* get to new offset for the param. */
12020 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12021 /* now how long will this param be? */
12022 len = sizeof(struct sctp_stream_reset_add_strm);
12024 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12025 addstr->ph.param_length = htons(len);
12026 addstr->request_seq = htonl(seq);
12027 addstr->number_of_streams = htons(adding);
12028 addstr->reserved = 0;
12030 /* now fix the chunk length */
12031 ch->chunk_length = htons(len + old_len);
12032 chk->send_size = len + old_len;
12033 chk->book_size = SCTP_SIZE32(chk->send_size);
12034 chk->book_size_scale = 0;
12035 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12040 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12042 struct sctp_association *asoc;
12043 struct sctp_tmit_chunk *chk;
12044 struct sctp_chunkhdr *ch;
12047 asoc = &stcb->asoc;
12048 asoc->trigger_reset = 0;
12049 if (asoc->stream_reset_outstanding) {
12052 sctp_alloc_a_chunk(stcb, chk);
12054 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12057 chk->copy_by_ref = 0;
12058 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12059 chk->rec.chunk_id.can_take_data = 0;
12061 chk->asoc = &stcb->asoc;
12062 chk->book_size = sizeof(struct sctp_chunkhdr);
12063 chk->send_size = SCTP_SIZE32(chk->book_size);
12064 chk->book_size_scale = 0;
12065 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12066 if (chk->data == NULL) {
12067 sctp_free_a_chunk(stcb, chk, so_locked);
12068 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12071 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12073 /* setup chunk parameters */
12074 chk->sent = SCTP_DATAGRAM_UNSENT;
12075 chk->snd_count = 0;
12076 if (stcb->asoc.alternate) {
12077 chk->whoTo = stcb->asoc.alternate;
12079 chk->whoTo = stcb->asoc.primary_destination;
12081 ch = mtod(chk->data, struct sctp_chunkhdr *);
12082 ch->chunk_type = SCTP_STREAM_RESET;
12083 ch->chunk_flags = 0;
12084 ch->chunk_length = htons(chk->book_size);
12085 atomic_add_int(&chk->whoTo->ref_count, 1);
12086 SCTP_BUF_LEN(chk->data) = chk->send_size;
12087 seq = stcb->asoc.str_reset_seq_out;
12088 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12090 asoc->stream_reset_outstanding++;
12092 m_freem(chk->data);
12094 sctp_free_a_chunk(stcb, chk, so_locked);
12097 asoc->str_reset = chk;
12098 /* insert the chunk for sending */
12099 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12102 asoc->ctrl_queue_cnt++;
12104 if (stcb->asoc.send_sack) {
12105 sctp_send_sack(stcb, so_locked);
12107 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12112 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12113 uint16_t number_entries, uint16_t * list,
12114 uint8_t send_in_req,
12115 uint8_t send_tsn_req,
12116 uint8_t add_stream,
12118 uint16_t adding_i, uint8_t peer_asked)
12120 struct sctp_association *asoc;
12121 struct sctp_tmit_chunk *chk;
12122 struct sctp_chunkhdr *ch;
12123 int can_send_out_req = 0;
12126 asoc = &stcb->asoc;
12127 if (asoc->stream_reset_outstanding) {
12129 * Already one pending, must get ACK back to clear the flag.
12131 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12134 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12135 (add_stream == 0)) {
12136 /* nothing to do */
12137 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12140 if (send_tsn_req && send_in_req) {
12141 /* error, can't do that */
12142 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12144 } else if (send_in_req) {
12145 can_send_out_req = 1;
12147 if (number_entries > (MCLBYTES -
12148 SCTP_MIN_OVERHEAD -
12149 sizeof(struct sctp_chunkhdr) -
12150 sizeof(struct sctp_stream_reset_out_request)) /
12151 sizeof(uint16_t)) {
12152 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12155 sctp_alloc_a_chunk(stcb, chk);
12157 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12160 chk->copy_by_ref = 0;
12161 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12162 chk->rec.chunk_id.can_take_data = 0;
12164 chk->asoc = &stcb->asoc;
12165 chk->book_size = sizeof(struct sctp_chunkhdr);
12166 chk->send_size = SCTP_SIZE32(chk->book_size);
12167 chk->book_size_scale = 0;
12169 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12170 if (chk->data == NULL) {
12171 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12172 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12175 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12177 /* setup chunk parameters */
12178 chk->sent = SCTP_DATAGRAM_UNSENT;
12179 chk->snd_count = 0;
12180 if (stcb->asoc.alternate) {
12181 chk->whoTo = stcb->asoc.alternate;
12183 chk->whoTo = stcb->asoc.primary_destination;
12185 atomic_add_int(&chk->whoTo->ref_count, 1);
12186 ch = mtod(chk->data, struct sctp_chunkhdr *);
12187 ch->chunk_type = SCTP_STREAM_RESET;
12188 ch->chunk_flags = 0;
12189 ch->chunk_length = htons(chk->book_size);
12190 SCTP_BUF_LEN(chk->data) = chk->send_size;
12192 seq = stcb->asoc.str_reset_seq_out;
12193 if (can_send_out_req) {
12196 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12199 asoc->stream_reset_outstanding++;
12202 if ((add_stream & 1) &&
12203 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12204 /* Need to allocate more */
12205 struct sctp_stream_out *oldstream;
12206 struct sctp_stream_queue_pending *sp, *nsp;
12209 #if defined(SCTP_DETAILED_STR_STATS)
12214 oldstream = stcb->asoc.strmout;
12215 /* get some more */
12216 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12217 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12219 if (stcb->asoc.strmout == NULL) {
12222 stcb->asoc.strmout = oldstream;
12223 /* Turn off the bit */
12224 x = add_stream & 0xfe;
12229 * Ok now we proceed with copying the old out stuff and
12230 * initializing the new stuff.
12232 SCTP_TCB_SEND_LOCK(stcb);
12233 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12234 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12235 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12236 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12237 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
12238 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12239 stcb->asoc.strmout[i].stream_no = i;
12240 stcb->asoc.strmout[i].state = oldstream[i].state;
12241 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
12242 /* now anything on those queues? */
12243 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12244 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12245 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12247 /* Now move assoc pointers too */
12248 if (stcb->asoc.last_out_stream == &oldstream[i]) {
12249 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
12251 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
12252 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
12255 /* now the new streams */
12256 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12257 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12258 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12259 stcb->asoc.strmout[i].chunks_on_queues = 0;
12260 #if defined(SCTP_DETAILED_STR_STATS)
12261 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12262 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12263 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12266 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12267 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12269 stcb->asoc.strmout[i].next_sequence_send = 0x0;
12270 stcb->asoc.strmout[i].stream_no = i;
12271 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12272 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
12273 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12275 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12276 SCTP_FREE(oldstream, SCTP_M_STRMO);
12277 SCTP_TCB_SEND_UNLOCK(stcb);
12280 if ((add_stream & 1) && (adding_o > 0)) {
12281 asoc->strm_pending_add_size = adding_o;
12282 asoc->peer_req_out = peer_asked;
12283 sctp_add_an_out_stream(chk, seq, adding_o);
12285 asoc->stream_reset_outstanding++;
12287 if ((add_stream & 2) && (adding_i > 0)) {
12288 sctp_add_an_in_stream(chk, seq, adding_i);
12290 asoc->stream_reset_outstanding++;
12293 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12295 asoc->stream_reset_outstanding++;
12297 if (send_tsn_req) {
12298 sctp_add_stream_reset_tsn(chk, seq);
12299 asoc->stream_reset_outstanding++;
12301 asoc->str_reset = chk;
12302 /* insert the chunk for sending */
12303 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12306 asoc->ctrl_queue_cnt++;
12307 if (stcb->asoc.send_sack) {
12308 sctp_send_sack(stcb, SCTP_SO_LOCKED);
12310 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12315 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12316 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12317 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12318 uint32_t vrf_id, uint16_t port)
12320 /* Don't respond to an ABORT with an ABORT. */
12321 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12323 sctp_m_freem(cause);
12326 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12327 mflowtype, mflowid, fibnum,
12333 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12334 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12335 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12336 uint32_t vrf_id, uint16_t port)
12338 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12339 mflowtype, mflowid, fibnum,
12344 static struct mbuf *
12345 sctp_copy_resume(struct uio *uio,
12347 int user_marks_eor,
12350 struct mbuf **new_tail)
12354 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12355 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12357 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12360 *sndout = m_length(m, NULL);
12361 *new_tail = m_last(m);
12367 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12371 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12373 if (sp->data == NULL) {
12374 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12377 sp->tail_mbuf = m_last(sp->data);
12383 static struct sctp_stream_queue_pending *
12384 sctp_copy_it_in(struct sctp_tcb *stcb,
12385 struct sctp_association *asoc,
12386 struct sctp_sndrcvinfo *srcv,
12388 struct sctp_nets *net,
12390 int user_marks_eor,
12394 * This routine must be very careful in its work. Protocol
12395 * processing is up and running so care must be taken to spl...()
12396 * when you need to do something that may effect the stcb/asoc. The
12397 * sb is locked however. When data is copied the protocol processing
12398 * should be enabled since this is a slower operation...
12400 struct sctp_stream_queue_pending *sp = NULL;
12404 /* Now can we send this? */
12405 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12406 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12407 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12408 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12409 /* got data while shutting down */
12410 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12411 *error = ECONNRESET;
12414 sctp_alloc_a_strmoq(stcb, sp);
12416 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12421 sp->sender_all_done = 0;
12422 sp->sinfo_flags = srcv->sinfo_flags;
12423 sp->timetolive = srcv->sinfo_timetolive;
12424 sp->ppid = srcv->sinfo_ppid;
12425 sp->context = srcv->sinfo_context;
12427 sp->msg_id = atomic_fetchadd_int(&stcb->asoc.assoc_msg_id, 1);
12428 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12430 sp->stream = srcv->sinfo_stream;
12431 sp->length = (uint32_t) min(uio->uio_resid, max_send_len);
12432 if ((sp->length == (uint32_t) uio->uio_resid) &&
12433 ((user_marks_eor == 0) ||
12434 (srcv->sinfo_flags & SCTP_EOF) ||
12435 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12436 sp->msg_is_complete = 1;
12438 sp->msg_is_complete = 0;
12440 sp->sender_all_done = 0;
12441 sp->some_taken = 0;
12442 sp->put_last_out = 0;
12443 resv_in_first = sizeof(struct sctp_data_chunk);
12444 sp->data = sp->tail_mbuf = NULL;
12445 if (sp->length == 0) {
12449 if (srcv->sinfo_keynumber_valid) {
12450 sp->auth_keyid = srcv->sinfo_keynumber;
12452 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12454 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12455 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12456 sp->holds_key_ref = 1;
12458 *error = sctp_copy_one(sp, uio, resv_in_first);
12461 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12464 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12466 atomic_add_int(&sp->net->ref_count, 1);
12470 sctp_set_prsctp_policy(sp);
12478 sctp_sosend(struct socket *so,
12479 struct sockaddr *addr,
12482 struct mbuf *control,
12487 int error, use_sndinfo = 0;
12488 struct sctp_sndrcvinfo sndrcvninfo;
12489 struct sockaddr *addr_to_use;
12491 #if defined(INET) && defined(INET6)
12492 struct sockaddr_in sin;
12497 /* process cmsg snd/rcv info (maybe a assoc-id) */
12498 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12499 sizeof(sndrcvninfo))) {
12504 addr_to_use = addr;
12505 #if defined(INET) && defined(INET6)
12506 if ((addr) && (addr->sa_family == AF_INET6)) {
12507 struct sockaddr_in6 *sin6;
12509 sin6 = (struct sockaddr_in6 *)addr;
12510 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12511 in6_sin6_2_sin(&sin, sin6);
12512 addr_to_use = (struct sockaddr *)&sin;
12516 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12519 use_sndinfo ? &sndrcvninfo : NULL
12527 sctp_lower_sosend(struct socket *so,
12528 struct sockaddr *addr,
12530 struct mbuf *i_pak,
12531 struct mbuf *control,
12533 struct sctp_sndrcvinfo *srcv
12538 unsigned int sndlen = 0, max_len;
12540 struct mbuf *top = NULL;
12541 int queue_only = 0, queue_only_for_init = 0;
12542 int free_cnt_applied = 0;
12544 int now_filled = 0;
12545 unsigned int inqueue_bytes = 0;
12546 struct sctp_block_entry be;
12547 struct sctp_inpcb *inp;
12548 struct sctp_tcb *stcb = NULL;
12549 struct timeval now;
12550 struct sctp_nets *net;
12551 struct sctp_association *asoc;
12552 struct sctp_inpcb *t_inp;
12553 int user_marks_eor;
12554 int create_lock_applied = 0;
12555 int nagle_applies = 0;
12556 int some_on_control = 0;
12557 int got_all_of_the_send = 0;
12558 int hold_tcblock = 0;
12559 int non_blocking = 0;
12560 uint32_t local_add_more, local_soresv = 0;
12562 uint16_t sinfo_flags;
12563 sctp_assoc_t sinfo_assoc_id;
12570 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12572 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12575 SCTP_RELEASE_PKT(i_pak);
12579 if ((uio == NULL) && (i_pak == NULL)) {
12580 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12583 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12584 atomic_add_int(&inp->total_sends, 1);
12586 if (uio->uio_resid < 0) {
12587 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12590 sndlen = (unsigned int)uio->uio_resid;
12592 top = SCTP_HEADER_TO_CHAIN(i_pak);
12593 sndlen = SCTP_HEADER_LEN(i_pak);
12595 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12598 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12599 (inp->sctp_socket->so_qlimit)) {
12600 /* The listener can NOT send */
12601 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12606 * Pre-screen address, if one is given the sin-len
12607 * must be set correctly!
12610 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12612 switch (raddr->sa.sa_family) {
12615 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12616 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12620 port = raddr->sin.sin_port;
12625 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12626 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12630 port = raddr->sin6.sin6_port;
12634 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12635 error = EAFNOSUPPORT;
12642 sinfo_flags = srcv->sinfo_flags;
12643 sinfo_assoc_id = srcv->sinfo_assoc_id;
12644 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12645 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12646 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12650 if (srcv->sinfo_flags)
12651 SCTP_STAT_INCR(sctps_sends_with_flags);
12653 sinfo_flags = inp->def_send.sinfo_flags;
12654 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12656 if (sinfo_flags & SCTP_SENDALL) {
12657 /* its a sendall */
12658 error = sctp_sendall(inp, uio, top, srcv);
12662 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12663 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12667 /* now we must find the assoc */
12668 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12669 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12670 SCTP_INP_RLOCK(inp);
12671 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12673 SCTP_TCB_LOCK(stcb);
12676 SCTP_INP_RUNLOCK(inp);
12677 } else if (sinfo_assoc_id) {
12678 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12681 * Since we did not use findep we must
12682 * increment it, and if we don't find a tcb
12685 SCTP_INP_WLOCK(inp);
12686 SCTP_INP_INCR_REF(inp);
12687 SCTP_INP_WUNLOCK(inp);
12688 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12689 if (stcb == NULL) {
12690 SCTP_INP_WLOCK(inp);
12691 SCTP_INP_DECR_REF(inp);
12692 SCTP_INP_WUNLOCK(inp);
12697 if ((stcb == NULL) && (addr)) {
12698 /* Possible implicit send? */
12699 SCTP_ASOC_CREATE_LOCK(inp);
12700 create_lock_applied = 1;
12701 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12702 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12703 /* Should I really unlock ? */
12704 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12709 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12710 (addr->sa_family == AF_INET6)) {
12711 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12715 SCTP_INP_WLOCK(inp);
12716 SCTP_INP_INCR_REF(inp);
12717 SCTP_INP_WUNLOCK(inp);
12718 /* With the lock applied look again */
12719 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12720 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12721 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12723 if (stcb == NULL) {
12724 SCTP_INP_WLOCK(inp);
12725 SCTP_INP_DECR_REF(inp);
12726 SCTP_INP_WUNLOCK(inp);
12733 if (t_inp != inp) {
12734 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12739 if (stcb == NULL) {
12740 if (addr == NULL) {
12741 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12745 /* We must go ahead and start the INIT process */
12748 if ((sinfo_flags & SCTP_ABORT) ||
12749 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12751 * User asks to abort a non-existant assoc,
12752 * or EOF a non-existant assoc with no data
12754 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12758 /* get an asoc/stcb struct */
12759 vrf_id = inp->def_vrf_id;
12761 if (create_lock_applied == 0) {
12762 panic("Error, should hold create lock and I don't?");
12765 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12766 inp->sctp_ep.pre_open_stream_count,
12769 if (stcb == NULL) {
12770 /* Error is setup for us in the call */
12773 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12774 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12776 * Set the connected flag so we can queue
12779 soisconnecting(so);
12782 if (create_lock_applied) {
12783 SCTP_ASOC_CREATE_UNLOCK(inp);
12784 create_lock_applied = 0;
12786 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12789 * Turn on queue only flag to prevent data from
12793 asoc = &stcb->asoc;
12794 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12795 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12797 /* initialize authentication params for the assoc */
12798 sctp_initialize_auth_params(inp, stcb);
12801 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12802 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
12803 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
12809 /* out with the INIT */
12810 queue_only_for_init = 1;
12812 * we may want to dig in after this call and adjust the MTU
12813 * value. It defaulted to 1500 (constant) but the ro
12814 * structure may now have an update and thus we may need to
12815 * change it BEFORE we append the message.
12819 asoc = &stcb->asoc;
12821 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12822 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12824 net = sctp_findnet(stcb, addr);
12827 if ((net == NULL) ||
12828 ((port != 0) && (port != stcb->rport))) {
12829 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12834 if (stcb->asoc.alternate) {
12835 net = stcb->asoc.alternate;
12837 net = stcb->asoc.primary_destination;
12840 atomic_add_int(&stcb->total_sends, 1);
12841 /* Keep the stcb from being freed under our feet */
12842 atomic_add_int(&asoc->refcnt, 1);
12843 free_cnt_applied = 1;
12845 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12846 if (sndlen > asoc->smallest_mtu) {
12847 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12852 if (SCTP_SO_IS_NBIO(so)
12853 || (flags & MSG_NBIO)
12857 /* would we block? */
12858 if (non_blocking) {
12859 if (hold_tcblock == 0) {
12860 SCTP_TCB_LOCK(stcb);
12863 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12864 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12865 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12866 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12867 if (sndlen > SCTP_SB_LIMIT_SND(so))
12870 error = EWOULDBLOCK;
12873 stcb->asoc.sb_send_resv += sndlen;
12874 SCTP_TCB_UNLOCK(stcb);
12877 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12879 local_soresv = sndlen;
12880 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12881 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12882 error = ECONNRESET;
12885 if (create_lock_applied) {
12886 SCTP_ASOC_CREATE_UNLOCK(inp);
12887 create_lock_applied = 0;
12889 /* Is the stream no. valid? */
12890 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12891 /* Invalid stream number */
12892 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12896 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12897 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12899 * Can't queue any data while stream reset is underway.
12901 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12906 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12909 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12910 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12913 /* we are now done with all control */
12915 sctp_m_freem(control);
12918 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12919 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12920 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12921 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12922 if (srcv->sinfo_flags & SCTP_ABORT) {
12925 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12926 error = ECONNRESET;
12930 /* Ok, we will attempt a msgsnd :> */
12932 p->td_ru.ru_msgsnd++;
12934 /* Are we aborting? */
12935 if (srcv->sinfo_flags & SCTP_ABORT) {
12937 int tot_demand, tot_out = 0, max_out;
12939 SCTP_STAT_INCR(sctps_sends_with_abort);
12940 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12941 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12942 /* It has to be up before we abort */
12943 /* how big is the user initiated abort? */
12944 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12948 if (hold_tcblock) {
12949 SCTP_TCB_UNLOCK(stcb);
12953 struct mbuf *cntm = NULL;
12955 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12957 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12958 tot_out += SCTP_BUF_LEN(cntm);
12962 /* Must fit in a MTU */
12964 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12965 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12967 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12971 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
12974 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12978 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12979 max_out -= sizeof(struct sctp_abort_msg);
12980 if (tot_out > max_out) {
12984 struct sctp_paramhdr *ph;
12986 /* now move forward the data pointer */
12987 ph = mtod(mm, struct sctp_paramhdr *);
12988 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12989 ph->param_length = htons((uint16_t) (sizeof(struct sctp_paramhdr) + tot_out));
12991 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12993 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12996 * Here if we can't get his data we
12997 * still abort we just don't get to
12998 * send the users note :-0
13005 SCTP_BUF_NEXT(mm) = top;
13009 if (hold_tcblock == 0) {
13010 SCTP_TCB_LOCK(stcb);
13012 atomic_add_int(&stcb->asoc.refcnt, -1);
13013 free_cnt_applied = 0;
13014 /* release this lock, otherwise we hang on ourselves */
13015 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13016 /* now relock the stcb so everything is sane */
13020 * In this case top is already chained to mm avoid double
13021 * free, since we free it below if top != NULL and driver
13022 * would free it after sending the packet out
13029 /* Calculate the maximum we can send */
13030 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13031 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13032 if (non_blocking) {
13033 /* we already checked for non-blocking above. */
13036 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13041 if (hold_tcblock) {
13042 SCTP_TCB_UNLOCK(stcb);
13045 if (asoc->strmout == NULL) {
13046 /* huh? software error */
13047 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13051 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13052 if ((user_marks_eor == 0) &&
13053 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13054 /* It will NEVER fit */
13055 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13059 if ((uio == NULL) && user_marks_eor) {
13061 * We do not support eeor mode for
13062 * sending with mbuf chains (like sendfile).
13064 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13068 if (user_marks_eor) {
13069 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13072 * For non-eeor the whole message must fit in
13073 * the socket send buffer.
13075 local_add_more = sndlen;
13078 if (non_blocking) {
13079 goto skip_preblock;
13081 if (((max_len <= local_add_more) &&
13082 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13084 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13085 /* No room right now ! */
13086 SOCKBUF_LOCK(&so->so_snd);
13087 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13088 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13089 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13090 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13091 (unsigned int)SCTP_SB_LIMIT_SND(so),
13094 stcb->asoc.stream_queue_cnt,
13095 stcb->asoc.chunks_on_out_queue,
13096 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13097 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13098 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13101 stcb->block_entry = &be;
13102 error = sbwait(&so->so_snd);
13103 stcb->block_entry = NULL;
13104 if (error || so->so_error || be.error) {
13107 error = so->so_error;
13112 SOCKBUF_UNLOCK(&so->so_snd);
13115 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13116 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13117 asoc, stcb->asoc.total_output_queue_size);
13119 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13122 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13124 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13125 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13129 SOCKBUF_UNLOCK(&so->so_snd);
13132 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13136 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13137 * case NOTE: uio will be null when top/mbuf is passed
13140 if (srcv->sinfo_flags & SCTP_EOF) {
13141 got_all_of_the_send = 1;
13144 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13150 struct sctp_stream_queue_pending *sp;
13151 struct sctp_stream_out *strm;
13154 SCTP_TCB_SEND_LOCK(stcb);
13155 if ((asoc->stream_locked) &&
13156 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13157 SCTP_TCB_SEND_UNLOCK(stcb);
13158 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13162 SCTP_TCB_SEND_UNLOCK(stcb);
13164 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13165 if (strm->last_msg_incomplete == 0) {
13167 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13168 if ((sp == NULL) || (error)) {
13171 SCTP_TCB_SEND_LOCK(stcb);
13172 if (sp->msg_is_complete) {
13173 strm->last_msg_incomplete = 0;
13174 asoc->stream_locked = 0;
13177 * Just got locked to this guy in case of an
13180 strm->last_msg_incomplete = 1;
13181 if (stcb->asoc.idata_supported == 0) {
13182 asoc->stream_locked = 1;
13183 asoc->stream_locked_on = srcv->sinfo_stream;
13185 sp->sender_all_done = 0;
13187 sctp_snd_sb_alloc(stcb, sp->length);
13188 atomic_add_int(&asoc->stream_queue_cnt, 1);
13189 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13190 SCTP_STAT_INCR(sctps_sends_with_unord);
13192 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13193 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13194 SCTP_TCB_SEND_UNLOCK(stcb);
13196 SCTP_TCB_SEND_LOCK(stcb);
13197 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13198 SCTP_TCB_SEND_UNLOCK(stcb);
13200 /* ???? Huh ??? last msg is gone */
13202 panic("Warning: Last msg marked incomplete, yet nothing left?");
13204 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13205 strm->last_msg_incomplete = 0;
13211 while (uio->uio_resid > 0) {
13212 /* How much room do we have? */
13213 struct mbuf *new_tail, *mm;
13215 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13216 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13220 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13221 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13222 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13225 if (hold_tcblock) {
13226 SCTP_TCB_UNLOCK(stcb);
13229 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13230 if ((mm == NULL) || error) {
13236 /* Update the mbuf and count */
13237 SCTP_TCB_SEND_LOCK(stcb);
13238 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13240 * we need to get out. Peer probably
13244 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13245 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13246 error = ECONNRESET;
13248 SCTP_TCB_SEND_UNLOCK(stcb);
13251 if (sp->tail_mbuf) {
13252 /* tack it to the end */
13253 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13254 sp->tail_mbuf = new_tail;
13256 /* A stolen mbuf */
13258 sp->tail_mbuf = new_tail;
13260 sctp_snd_sb_alloc(stcb, sndout);
13261 atomic_add_int(&sp->length, sndout);
13264 /* Did we reach EOR? */
13265 if ((uio->uio_resid == 0) &&
13266 ((user_marks_eor == 0) ||
13267 (srcv->sinfo_flags & SCTP_EOF) ||
13268 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13269 sp->msg_is_complete = 1;
13271 sp->msg_is_complete = 0;
13273 SCTP_TCB_SEND_UNLOCK(stcb);
13275 if (uio->uio_resid == 0) {
13280 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13282 * This is ugly but we must assure locking
13285 if (hold_tcblock == 0) {
13286 SCTP_TCB_LOCK(stcb);
13289 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13290 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13291 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13292 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13298 SCTP_TCB_UNLOCK(stcb);
13301 /* wait for space now */
13302 if (non_blocking) {
13303 /* Non-blocking io in place out */
13306 /* What about the INIT, send it maybe */
13307 if (queue_only_for_init) {
13308 if (hold_tcblock == 0) {
13309 SCTP_TCB_LOCK(stcb);
13312 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13313 /* a collision took us forward? */
13316 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13317 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13321 if ((net->flight_size > net->cwnd) &&
13322 (asoc->sctp_cmt_on_off == 0)) {
13323 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13325 } else if (asoc->ifp_had_enobuf) {
13326 SCTP_STAT_INCR(sctps_ifnomemqueued);
13327 if (net->flight_size > (2 * net->mtu)) {
13330 asoc->ifp_had_enobuf = 0;
13332 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13333 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13334 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13335 (stcb->asoc.total_flight > 0) &&
13336 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13337 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13340 * Ok, Nagle is set on and we have data outstanding.
13341 * Don't send anything and let SACKs drive out the
13342 * data unless we have a "full" segment to send.
13344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13345 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13347 SCTP_STAT_INCR(sctps_naglequeued);
13350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13351 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13352 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13354 SCTP_STAT_INCR(sctps_naglesent);
13357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13359 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13360 nagle_applies, un_sent);
13361 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13362 stcb->asoc.total_flight,
13363 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13365 if (queue_only_for_init)
13366 queue_only_for_init = 0;
13367 if ((queue_only == 0) && (nagle_applies == 0)) {
13369 * need to start chunk output
13370 * before blocking.. note that if
13371 * a lock is already applied, then
13372 * the input via the net is happening
13373 * and I don't need to start output :-D
13375 if (hold_tcblock == 0) {
13376 if (SCTP_TCB_TRYLOCK(stcb)) {
13378 sctp_chunk_output(inp,
13380 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13383 sctp_chunk_output(inp,
13385 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13387 if (hold_tcblock == 1) {
13388 SCTP_TCB_UNLOCK(stcb);
13392 SOCKBUF_LOCK(&so->so_snd);
13394 * This is a bit strange, but I think it will
13395 * work. The total_output_queue_size is locked and
13396 * protected by the TCB_LOCK, which we just released.
13397 * There is a race that can occur between releasing it
13398 * above, and me getting the socket lock, where sacks
13399 * come in but we have not put the SB_WAIT on the
13400 * so_snd buffer to get the wakeup. After the LOCK
13401 * is applied the sack_processing will also need to
13402 * LOCK the so->so_snd to do the actual sowwakeup(). So
13403 * once we have the socket buffer lock if we recheck the
13404 * size we KNOW we will get to sleep safely with the
13405 * wakeup flag in place.
13407 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13408 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13410 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13411 asoc, (size_t)uio->uio_resid);
13414 stcb->block_entry = &be;
13415 error = sbwait(&so->so_snd);
13416 stcb->block_entry = NULL;
13418 if (error || so->so_error || be.error) {
13421 error = so->so_error;
13426 SOCKBUF_UNLOCK(&so->so_snd);
13429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13430 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13431 asoc, stcb->asoc.total_output_queue_size);
13434 SOCKBUF_UNLOCK(&so->so_snd);
13435 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13439 SCTP_TCB_SEND_LOCK(stcb);
13441 if (sp->msg_is_complete == 0) {
13442 strm->last_msg_incomplete = 1;
13443 if (stcb->asoc.idata_supported == 0) {
13444 asoc->stream_locked = 1;
13445 asoc->stream_locked_on = srcv->sinfo_stream;
13448 sp->sender_all_done = 1;
13449 strm->last_msg_incomplete = 0;
13450 asoc->stream_locked = 0;
13453 SCTP_PRINTF("Huh no sp TSNH?\n");
13454 strm->last_msg_incomplete = 0;
13455 asoc->stream_locked = 0;
13457 SCTP_TCB_SEND_UNLOCK(stcb);
13458 if (uio->uio_resid == 0) {
13459 got_all_of_the_send = 1;
13462 /* We send in a 0, since we do NOT have any locks */
13463 error = sctp_msg_append(stcb, net, top, srcv, 0);
13465 if (srcv->sinfo_flags & SCTP_EOF) {
13467 * This should only happen for Panda for the mbuf
13468 * send case, which does NOT yet support EEOR mode.
13469 * Thus, we can just set this flag to do the proper
13472 got_all_of_the_send = 1;
13480 if ((srcv->sinfo_flags & SCTP_EOF) &&
13481 (got_all_of_the_send == 1)) {
13484 SCTP_STAT_INCR(sctps_sends_with_eof);
13486 if (hold_tcblock == 0) {
13487 SCTP_TCB_LOCK(stcb);
13490 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13491 if (TAILQ_EMPTY(&asoc->send_queue) &&
13492 TAILQ_EMPTY(&asoc->sent_queue) &&
13494 if (asoc->locked_on_sending) {
13497 /* there is nothing queued to send, so I'm done... */
13498 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13499 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13500 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13501 struct sctp_nets *netp;
13503 /* only send SHUTDOWN the first time through */
13504 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13505 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13507 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13508 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13509 sctp_stop_timers_for_shutdown(stcb);
13510 if (stcb->asoc.alternate) {
13511 netp = stcb->asoc.alternate;
13513 netp = stcb->asoc.primary_destination;
13515 sctp_send_shutdown(stcb, netp);
13516 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13518 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13519 asoc->primary_destination);
13523 * we still got (or just got) data to send, so set
13527 * XXX sockets draft says that SCTP_EOF should be
13528 * sent with no data. currently, we will allow user
13529 * data to be sent first and move to
13532 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13533 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13534 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13535 if (hold_tcblock == 0) {
13536 SCTP_TCB_LOCK(stcb);
13539 if (asoc->locked_on_sending) {
13540 /* Locked to send out the data */
13541 struct sctp_stream_queue_pending *sp;
13543 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13545 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13546 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13549 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13550 if (TAILQ_EMPTY(&asoc->send_queue) &&
13551 TAILQ_EMPTY(&asoc->sent_queue) &&
13552 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13553 struct mbuf *op_err;
13554 char msg[SCTP_DIAG_INFO_LEN];
13557 if (free_cnt_applied) {
13558 atomic_add_int(&stcb->asoc.refcnt, -1);
13559 free_cnt_applied = 0;
13561 snprintf(msg, sizeof(msg),
13562 "%s:%d at %s", __FILE__, __LINE__, __func__);
13563 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13565 sctp_abort_an_association(stcb->sctp_ep, stcb,
13566 op_err, SCTP_SO_LOCKED);
13568 * now relock the stcb so everything
13575 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13576 asoc->primary_destination);
13577 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13582 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13583 some_on_control = 1;
13585 if (queue_only_for_init) {
13586 if (hold_tcblock == 0) {
13587 SCTP_TCB_LOCK(stcb);
13590 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13591 /* a collision took us forward? */
13594 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13595 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13599 if ((net->flight_size > net->cwnd) &&
13600 (stcb->asoc.sctp_cmt_on_off == 0)) {
13601 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13603 } else if (asoc->ifp_had_enobuf) {
13604 SCTP_STAT_INCR(sctps_ifnomemqueued);
13605 if (net->flight_size > (2 * net->mtu)) {
13608 asoc->ifp_had_enobuf = 0;
13610 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13611 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13612 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13613 (stcb->asoc.total_flight > 0) &&
13614 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13615 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13617 * Ok, Nagle is set on and we have data outstanding.
13618 * Don't send anything and let SACKs drive out the
13619 * data unless wen have a "full" segment to send.
13621 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13622 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13624 SCTP_STAT_INCR(sctps_naglequeued);
13627 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13628 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13629 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13631 SCTP_STAT_INCR(sctps_naglesent);
13634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13635 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13636 nagle_applies, un_sent);
13637 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13638 stcb->asoc.total_flight,
13639 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13641 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13642 /* we can attempt to send too. */
13643 if (hold_tcblock == 0) {
13645 * If there is activity recv'ing sacks no need to
13648 if (SCTP_TCB_TRYLOCK(stcb)) {
13649 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13653 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13655 } else if ((queue_only == 0) &&
13656 (stcb->asoc.peers_rwnd == 0) &&
13657 (stcb->asoc.total_flight == 0)) {
13658 /* We get to have a probe outstanding */
13659 if (hold_tcblock == 0) {
13661 SCTP_TCB_LOCK(stcb);
13663 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13664 } else if (some_on_control) {
13665 int num_out, reason, frag_point;
13667 /* Here we do control only */
13668 if (hold_tcblock == 0) {
13670 SCTP_TCB_LOCK(stcb);
13672 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13673 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13674 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13676 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13677 queue_only, stcb->asoc.peers_rwnd, un_sent,
13678 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13679 stcb->asoc.total_output_queue_size, error);
13684 if (local_soresv && stcb) {
13685 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13687 if (create_lock_applied) {
13688 SCTP_ASOC_CREATE_UNLOCK(inp);
13690 if ((stcb) && hold_tcblock) {
13691 SCTP_TCB_UNLOCK(stcb);
13693 if (stcb && free_cnt_applied) {
13694 atomic_add_int(&stcb->asoc.refcnt, -1);
13698 if (mtx_owned(&stcb->tcb_mtx)) {
13699 panic("Leaving with tcb mtx owned?");
13701 if (mtx_owned(&stcb->tcb_send_mtx)) {
13702 panic("Leaving with tcb send mtx owned?");
13710 sctp_m_freem(control);
13717 * generate an AUTHentication chunk, if required
13720 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13721 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13722 struct sctp_tcb *stcb, uint8_t chunk)
13724 struct mbuf *m_auth;
13725 struct sctp_auth_chunk *auth;
13729 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13733 if (stcb->asoc.auth_supported == 0) {
13736 /* does the requested chunk require auth? */
13737 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13740 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13741 if (m_auth == NULL) {
13745 /* reserve some space if this will be the first mbuf */
13747 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13748 /* fill in the AUTH chunk details */
13749 auth = mtod(m_auth, struct sctp_auth_chunk *);
13750 bzero(auth, sizeof(*auth));
13751 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13752 auth->ch.chunk_flags = 0;
13753 chunk_len = sizeof(*auth) +
13754 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13755 auth->ch.chunk_length = htons(chunk_len);
13756 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13757 /* key id and hmac digest will be computed and filled in upon send */
13759 /* save the offset where the auth was inserted into the chain */
13761 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13762 *offset += SCTP_BUF_LEN(cn);
13765 /* update length and return pointer to the auth chunk */
13766 SCTP_BUF_LEN(m_auth) = chunk_len;
13767 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13768 if (auth_ret != NULL)
13776 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13778 struct nd_prefix *pfx = NULL;
13779 struct nd_pfxrouter *pfxrtr = NULL;
13780 struct sockaddr_in6 gw6;
13782 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13785 /* get prefix entry of address */
13786 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13787 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13789 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13790 &src6->sin6_addr, &pfx->ndpr_mask))
13793 /* no prefix entry in the prefix list */
13795 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13796 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13799 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13800 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13802 /* search installed gateway from prefix entry */
13803 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13804 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13805 gw6.sin6_family = AF_INET6;
13806 gw6.sin6_len = sizeof(struct sockaddr_in6);
13807 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13808 sizeof(struct in6_addr));
13809 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13810 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13811 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13812 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13813 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13814 ro->ro_rt->rt_gateway)) {
13815 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13819 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13826 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13829 struct sockaddr_in *sin, *mask;
13830 struct ifaddr *ifa;
13831 struct in_addr srcnetaddr, gwnetaddr;
13833 if (ro == NULL || ro->ro_rt == NULL ||
13834 sifa->address.sa.sa_family != AF_INET) {
13837 ifa = (struct ifaddr *)sifa->ifa;
13838 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13839 sin = &sifa->address.sin;
13840 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13841 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13842 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13843 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13845 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13846 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13847 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13848 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13849 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13850 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {