2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <netinet/udp_var.h>
55 #include <machine/in_cksum.h>
59 #define SCTP_MAX_GAPS_INARRAY 4
61 uint8_t right_edge; /* mergable on the right edge */
62 uint8_t left_edge; /* mergable on the left edge */
65 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
68 struct sack_track sack_array[256] = {
69 {0, 0, 0, 0, /* 0x00 */
76 {1, 0, 1, 0, /* 0x01 */
83 {0, 0, 1, 0, /* 0x02 */
90 {1, 0, 1, 0, /* 0x03 */
97 {0, 0, 1, 0, /* 0x04 */
104 {1, 0, 2, 0, /* 0x05 */
111 {0, 0, 1, 0, /* 0x06 */
118 {1, 0, 1, 0, /* 0x07 */
125 {0, 0, 1, 0, /* 0x08 */
132 {1, 0, 2, 0, /* 0x09 */
139 {0, 0, 2, 0, /* 0x0a */
146 {1, 0, 2, 0, /* 0x0b */
153 {0, 0, 1, 0, /* 0x0c */
160 {1, 0, 2, 0, /* 0x0d */
167 {0, 0, 1, 0, /* 0x0e */
174 {1, 0, 1, 0, /* 0x0f */
181 {0, 0, 1, 0, /* 0x10 */
188 {1, 0, 2, 0, /* 0x11 */
195 {0, 0, 2, 0, /* 0x12 */
202 {1, 0, 2, 0, /* 0x13 */
209 {0, 0, 2, 0, /* 0x14 */
216 {1, 0, 3, 0, /* 0x15 */
223 {0, 0, 2, 0, /* 0x16 */
230 {1, 0, 2, 0, /* 0x17 */
237 {0, 0, 1, 0, /* 0x18 */
244 {1, 0, 2, 0, /* 0x19 */
251 {0, 0, 2, 0, /* 0x1a */
258 {1, 0, 2, 0, /* 0x1b */
265 {0, 0, 1, 0, /* 0x1c */
272 {1, 0, 2, 0, /* 0x1d */
279 {0, 0, 1, 0, /* 0x1e */
286 {1, 0, 1, 0, /* 0x1f */
293 {0, 0, 1, 0, /* 0x20 */
300 {1, 0, 2, 0, /* 0x21 */
307 {0, 0, 2, 0, /* 0x22 */
314 {1, 0, 2, 0, /* 0x23 */
321 {0, 0, 2, 0, /* 0x24 */
328 {1, 0, 3, 0, /* 0x25 */
335 {0, 0, 2, 0, /* 0x26 */
342 {1, 0, 2, 0, /* 0x27 */
349 {0, 0, 2, 0, /* 0x28 */
356 {1, 0, 3, 0, /* 0x29 */
363 {0, 0, 3, 0, /* 0x2a */
370 {1, 0, 3, 0, /* 0x2b */
377 {0, 0, 2, 0, /* 0x2c */
384 {1, 0, 3, 0, /* 0x2d */
391 {0, 0, 2, 0, /* 0x2e */
398 {1, 0, 2, 0, /* 0x2f */
405 {0, 0, 1, 0, /* 0x30 */
412 {1, 0, 2, 0, /* 0x31 */
419 {0, 0, 2, 0, /* 0x32 */
426 {1, 0, 2, 0, /* 0x33 */
433 {0, 0, 2, 0, /* 0x34 */
440 {1, 0, 3, 0, /* 0x35 */
447 {0, 0, 2, 0, /* 0x36 */
454 {1, 0, 2, 0, /* 0x37 */
461 {0, 0, 1, 0, /* 0x38 */
468 {1, 0, 2, 0, /* 0x39 */
475 {0, 0, 2, 0, /* 0x3a */
482 {1, 0, 2, 0, /* 0x3b */
489 {0, 0, 1, 0, /* 0x3c */
496 {1, 0, 2, 0, /* 0x3d */
503 {0, 0, 1, 0, /* 0x3e */
510 {1, 0, 1, 0, /* 0x3f */
517 {0, 0, 1, 0, /* 0x40 */
524 {1, 0, 2, 0, /* 0x41 */
531 {0, 0, 2, 0, /* 0x42 */
538 {1, 0, 2, 0, /* 0x43 */
545 {0, 0, 2, 0, /* 0x44 */
552 {1, 0, 3, 0, /* 0x45 */
559 {0, 0, 2, 0, /* 0x46 */
566 {1, 0, 2, 0, /* 0x47 */
573 {0, 0, 2, 0, /* 0x48 */
580 {1, 0, 3, 0, /* 0x49 */
587 {0, 0, 3, 0, /* 0x4a */
594 {1, 0, 3, 0, /* 0x4b */
601 {0, 0, 2, 0, /* 0x4c */
608 {1, 0, 3, 0, /* 0x4d */
615 {0, 0, 2, 0, /* 0x4e */
622 {1, 0, 2, 0, /* 0x4f */
629 {0, 0, 2, 0, /* 0x50 */
636 {1, 0, 3, 0, /* 0x51 */
643 {0, 0, 3, 0, /* 0x52 */
650 {1, 0, 3, 0, /* 0x53 */
657 {0, 0, 3, 0, /* 0x54 */
664 {1, 0, 4, 0, /* 0x55 */
671 {0, 0, 3, 0, /* 0x56 */
678 {1, 0, 3, 0, /* 0x57 */
685 {0, 0, 2, 0, /* 0x58 */
692 {1, 0, 3, 0, /* 0x59 */
699 {0, 0, 3, 0, /* 0x5a */
706 {1, 0, 3, 0, /* 0x5b */
713 {0, 0, 2, 0, /* 0x5c */
720 {1, 0, 3, 0, /* 0x5d */
727 {0, 0, 2, 0, /* 0x5e */
734 {1, 0, 2, 0, /* 0x5f */
741 {0, 0, 1, 0, /* 0x60 */
748 {1, 0, 2, 0, /* 0x61 */
755 {0, 0, 2, 0, /* 0x62 */
762 {1, 0, 2, 0, /* 0x63 */
769 {0, 0, 2, 0, /* 0x64 */
776 {1, 0, 3, 0, /* 0x65 */
783 {0, 0, 2, 0, /* 0x66 */
790 {1, 0, 2, 0, /* 0x67 */
797 {0, 0, 2, 0, /* 0x68 */
804 {1, 0, 3, 0, /* 0x69 */
811 {0, 0, 3, 0, /* 0x6a */
818 {1, 0, 3, 0, /* 0x6b */
825 {0, 0, 2, 0, /* 0x6c */
832 {1, 0, 3, 0, /* 0x6d */
839 {0, 0, 2, 0, /* 0x6e */
846 {1, 0, 2, 0, /* 0x6f */
853 {0, 0, 1, 0, /* 0x70 */
860 {1, 0, 2, 0, /* 0x71 */
867 {0, 0, 2, 0, /* 0x72 */
874 {1, 0, 2, 0, /* 0x73 */
881 {0, 0, 2, 0, /* 0x74 */
888 {1, 0, 3, 0, /* 0x75 */
895 {0, 0, 2, 0, /* 0x76 */
902 {1, 0, 2, 0, /* 0x77 */
909 {0, 0, 1, 0, /* 0x78 */
916 {1, 0, 2, 0, /* 0x79 */
923 {0, 0, 2, 0, /* 0x7a */
930 {1, 0, 2, 0, /* 0x7b */
937 {0, 0, 1, 0, /* 0x7c */
944 {1, 0, 2, 0, /* 0x7d */
951 {0, 0, 1, 0, /* 0x7e */
958 {1, 0, 1, 0, /* 0x7f */
965 {0, 1, 1, 0, /* 0x80 */
972 {1, 1, 2, 0, /* 0x81 */
979 {0, 1, 2, 0, /* 0x82 */
986 {1, 1, 2, 0, /* 0x83 */
993 {0, 1, 2, 0, /* 0x84 */
1000 {1, 1, 3, 0, /* 0x85 */
1007 {0, 1, 2, 0, /* 0x86 */
1014 {1, 1, 2, 0, /* 0x87 */
1021 {0, 1, 2, 0, /* 0x88 */
1028 {1, 1, 3, 0, /* 0x89 */
1035 {0, 1, 3, 0, /* 0x8a */
1042 {1, 1, 3, 0, /* 0x8b */
1049 {0, 1, 2, 0, /* 0x8c */
1056 {1, 1, 3, 0, /* 0x8d */
1063 {0, 1, 2, 0, /* 0x8e */
1070 {1, 1, 2, 0, /* 0x8f */
1077 {0, 1, 2, 0, /* 0x90 */
1084 {1, 1, 3, 0, /* 0x91 */
1091 {0, 1, 3, 0, /* 0x92 */
1098 {1, 1, 3, 0, /* 0x93 */
1105 {0, 1, 3, 0, /* 0x94 */
1112 {1, 1, 4, 0, /* 0x95 */
1119 {0, 1, 3, 0, /* 0x96 */
1126 {1, 1, 3, 0, /* 0x97 */
1133 {0, 1, 2, 0, /* 0x98 */
1140 {1, 1, 3, 0, /* 0x99 */
1147 {0, 1, 3, 0, /* 0x9a */
1154 {1, 1, 3, 0, /* 0x9b */
1161 {0, 1, 2, 0, /* 0x9c */
1168 {1, 1, 3, 0, /* 0x9d */
1175 {0, 1, 2, 0, /* 0x9e */
1182 {1, 1, 2, 0, /* 0x9f */
1189 {0, 1, 2, 0, /* 0xa0 */
1196 {1, 1, 3, 0, /* 0xa1 */
1203 {0, 1, 3, 0, /* 0xa2 */
1210 {1, 1, 3, 0, /* 0xa3 */
1217 {0, 1, 3, 0, /* 0xa4 */
1224 {1, 1, 4, 0, /* 0xa5 */
1231 {0, 1, 3, 0, /* 0xa6 */
1238 {1, 1, 3, 0, /* 0xa7 */
1245 {0, 1, 3, 0, /* 0xa8 */
1252 {1, 1, 4, 0, /* 0xa9 */
1259 {0, 1, 4, 0, /* 0xaa */
1266 {1, 1, 4, 0, /* 0xab */
1273 {0, 1, 3, 0, /* 0xac */
1280 {1, 1, 4, 0, /* 0xad */
1287 {0, 1, 3, 0, /* 0xae */
1294 {1, 1, 3, 0, /* 0xaf */
1301 {0, 1, 2, 0, /* 0xb0 */
1308 {1, 1, 3, 0, /* 0xb1 */
1315 {0, 1, 3, 0, /* 0xb2 */
1322 {1, 1, 3, 0, /* 0xb3 */
1329 {0, 1, 3, 0, /* 0xb4 */
1336 {1, 1, 4, 0, /* 0xb5 */
1343 {0, 1, 3, 0, /* 0xb6 */
1350 {1, 1, 3, 0, /* 0xb7 */
1357 {0, 1, 2, 0, /* 0xb8 */
1364 {1, 1, 3, 0, /* 0xb9 */
1371 {0, 1, 3, 0, /* 0xba */
1378 {1, 1, 3, 0, /* 0xbb */
1385 {0, 1, 2, 0, /* 0xbc */
1392 {1, 1, 3, 0, /* 0xbd */
1399 {0, 1, 2, 0, /* 0xbe */
1406 {1, 1, 2, 0, /* 0xbf */
1413 {0, 1, 1, 0, /* 0xc0 */
1420 {1, 1, 2, 0, /* 0xc1 */
1427 {0, 1, 2, 0, /* 0xc2 */
1434 {1, 1, 2, 0, /* 0xc3 */
1441 {0, 1, 2, 0, /* 0xc4 */
1448 {1, 1, 3, 0, /* 0xc5 */
1455 {0, 1, 2, 0, /* 0xc6 */
1462 {1, 1, 2, 0, /* 0xc7 */
1469 {0, 1, 2, 0, /* 0xc8 */
1476 {1, 1, 3, 0, /* 0xc9 */
1483 {0, 1, 3, 0, /* 0xca */
1490 {1, 1, 3, 0, /* 0xcb */
1497 {0, 1, 2, 0, /* 0xcc */
1504 {1, 1, 3, 0, /* 0xcd */
1511 {0, 1, 2, 0, /* 0xce */
1518 {1, 1, 2, 0, /* 0xcf */
1525 {0, 1, 2, 0, /* 0xd0 */
1532 {1, 1, 3, 0, /* 0xd1 */
1539 {0, 1, 3, 0, /* 0xd2 */
1546 {1, 1, 3, 0, /* 0xd3 */
1553 {0, 1, 3, 0, /* 0xd4 */
1560 {1, 1, 4, 0, /* 0xd5 */
1567 {0, 1, 3, 0, /* 0xd6 */
1574 {1, 1, 3, 0, /* 0xd7 */
1581 {0, 1, 2, 0, /* 0xd8 */
1588 {1, 1, 3, 0, /* 0xd9 */
1595 {0, 1, 3, 0, /* 0xda */
1602 {1, 1, 3, 0, /* 0xdb */
1609 {0, 1, 2, 0, /* 0xdc */
1616 {1, 1, 3, 0, /* 0xdd */
1623 {0, 1, 2, 0, /* 0xde */
1630 {1, 1, 2, 0, /* 0xdf */
1637 {0, 1, 1, 0, /* 0xe0 */
1644 {1, 1, 2, 0, /* 0xe1 */
1651 {0, 1, 2, 0, /* 0xe2 */
1658 {1, 1, 2, 0, /* 0xe3 */
1665 {0, 1, 2, 0, /* 0xe4 */
1672 {1, 1, 3, 0, /* 0xe5 */
1679 {0, 1, 2, 0, /* 0xe6 */
1686 {1, 1, 2, 0, /* 0xe7 */
1693 {0, 1, 2, 0, /* 0xe8 */
1700 {1, 1, 3, 0, /* 0xe9 */
1707 {0, 1, 3, 0, /* 0xea */
1714 {1, 1, 3, 0, /* 0xeb */
1721 {0, 1, 2, 0, /* 0xec */
1728 {1, 1, 3, 0, /* 0xed */
1735 {0, 1, 2, 0, /* 0xee */
1742 {1, 1, 2, 0, /* 0xef */
1749 {0, 1, 1, 0, /* 0xf0 */
1756 {1, 1, 2, 0, /* 0xf1 */
1763 {0, 1, 2, 0, /* 0xf2 */
1770 {1, 1, 2, 0, /* 0xf3 */
1777 {0, 1, 2, 0, /* 0xf4 */
1784 {1, 1, 3, 0, /* 0xf5 */
1791 {0, 1, 2, 0, /* 0xf6 */
1798 {1, 1, 2, 0, /* 0xf7 */
1805 {0, 1, 1, 0, /* 0xf8 */
1812 {1, 1, 2, 0, /* 0xf9 */
1819 {0, 1, 2, 0, /* 0xfa */
1826 {1, 1, 2, 0, /* 0xfb */
1833 {0, 1, 1, 0, /* 0xfc */
1840 {1, 1, 2, 0, /* 0xfd */
1847 {0, 1, 1, 0, /* 0xfe */
1854 {1, 1, 1, 0, /* 0xff */
1865 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1866 struct sctp_scoping *scope,
1869 if ((scope->loopback_scope == 0) &&
1870 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1872 * skip loopback if not in scope *
1876 switch (ifa->address.sa.sa_family) {
1879 if (scope->ipv4_addr_legal) {
1880 struct sockaddr_in *sin;
1882 sin = (struct sockaddr_in *)&ifa->address.sin;
1883 if (sin->sin_addr.s_addr == 0) {
1884 /* not in scope , unspecified */
1887 if ((scope->ipv4_local_scope == 0) &&
1888 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1889 /* private address not in scope */
1899 if (scope->ipv6_addr_legal) {
1900 struct sockaddr_in6 *sin6;
1903 * Must update the flags, bummer, which means any
1904 * IFA locks must now be applied HERE <->
1907 sctp_gather_internal_ifa_flags(ifa);
1909 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1912 /* ok to use deprecated addresses? */
1913 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1914 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1915 /* skip unspecifed addresses */
1918 if ( /* (local_scope == 0) && */
1919 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1922 if ((scope->site_scope == 0) &&
1923 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1937 static struct mbuf *
1938 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
1940 #if defined(INET) || defined(INET6)
1941 struct sctp_paramhdr *parmh;
1947 switch (ifa->address.sa.sa_family) {
1950 plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
1955 plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
1961 #if defined(INET) || defined(INET6)
1962 if (M_TRAILINGSPACE(m) >= plen) {
1963 /* easy side we just drop it on the end */
1964 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1967 /* Need more space */
1969 while (SCTP_BUF_NEXT(mret) != NULL) {
1970 mret = SCTP_BUF_NEXT(mret);
1972 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_DONTWAIT, 1, MT_DATA);
1973 if (SCTP_BUF_NEXT(mret) == NULL) {
1974 /* We are hosed, can't add more addresses */
1977 mret = SCTP_BUF_NEXT(mret);
1978 parmh = mtod(mret, struct sctp_paramhdr *);
1980 /* now add the parameter */
1981 switch (ifa->address.sa.sa_family) {
1985 struct sctp_ipv4addr_param *ipv4p;
1986 struct sockaddr_in *sin;
1988 sin = (struct sockaddr_in *)&ifa->address.sin;
1989 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1990 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1991 parmh->param_length = htons(plen);
1992 ipv4p->addr = sin->sin_addr.s_addr;
1993 SCTP_BUF_LEN(mret) += plen;
2000 struct sctp_ipv6addr_param *ipv6p;
2001 struct sockaddr_in6 *sin6;
2003 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2004 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2005 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2006 parmh->param_length = htons(plen);
2007 memcpy(ipv6p->addr, &sin6->sin6_addr,
2008 sizeof(ipv6p->addr));
2009 /* clear embedded scope in the address */
2010 in6_clearscope((struct in6_addr *)ipv6p->addr);
2011 SCTP_BUF_LEN(mret) += plen;
2027 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2028 struct sctp_scoping *scope,
2029 struct mbuf *m_at, int cnt_inits_to,
2030 uint16_t * padding_len, uint16_t * chunk_len)
2032 struct sctp_vrf *vrf = NULL;
2033 int cnt, limit_out = 0, total_count;
2036 vrf_id = inp->def_vrf_id;
2037 SCTP_IPI_ADDR_RLOCK();
2038 vrf = sctp_find_vrf(vrf_id);
2040 SCTP_IPI_ADDR_RUNLOCK();
2043 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2044 struct sctp_ifa *sctp_ifap;
2045 struct sctp_ifn *sctp_ifnp;
2048 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2050 cnt = SCTP_ADDRESS_LIMIT;
2053 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2054 if ((scope->loopback_scope == 0) &&
2055 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2057 * Skip loopback devices if loopback_scope
2062 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2064 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2065 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2066 &sctp_ifap->address.sin.sin_addr) != 0)) {
2071 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2072 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2073 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2077 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2080 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2084 if (cnt > SCTP_ADDRESS_LIMIT) {
2088 if (cnt > SCTP_ADDRESS_LIMIT) {
2095 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2097 if ((scope->loopback_scope == 0) &&
2098 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2100 * Skip loopback devices if
2101 * loopback_scope not set
2105 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2107 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2108 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2109 &sctp_ifap->address.sin.sin_addr) != 0)) {
2114 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2115 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2116 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2120 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2123 if (sctp_is_address_in_scope(sctp_ifap,
2127 if ((chunk_len != NULL) &&
2128 (padding_len != NULL) &&
2129 (*padding_len > 0)) {
2130 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2131 SCTP_BUF_LEN(m_at) += *padding_len;
2132 *chunk_len += *padding_len;
2135 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2146 if (total_count > SCTP_ADDRESS_LIMIT) {
2147 /* No more addresses */
2155 struct sctp_laddr *laddr;
2158 /* First, how many ? */
2159 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2160 if (laddr->ifa == NULL) {
2163 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2165 * Address being deleted by the system, dont
2169 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2171 * Address being deleted on this ep don't
2176 if (sctp_is_address_in_scope(laddr->ifa,
2183 * To get through a NAT we only list addresses if we have
2184 * more than one. That way if you just bind a single address
2185 * we let the source of the init dictate our address.
2189 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2190 if (laddr->ifa == NULL) {
2193 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2196 if (sctp_is_address_in_scope(laddr->ifa,
2200 if ((chunk_len != NULL) &&
2201 (padding_len != NULL) &&
2202 (*padding_len > 0)) {
2203 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2204 SCTP_BUF_LEN(m_at) += *padding_len;
2205 *chunk_len += *padding_len;
2208 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2210 if (cnt >= SCTP_ADDRESS_LIMIT) {
2216 SCTP_IPI_ADDR_RUNLOCK();
2220 static struct sctp_ifa *
2221 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2222 uint8_t dest_is_loop,
2223 uint8_t dest_is_priv,
2226 uint8_t dest_is_global = 0;
2228 /* dest_is_priv is true if destination is a private address */
2229 /* dest_is_loop is true if destination is a loopback addresses */
2232 * Here we determine if its a preferred address. A preferred address
2233 * means it is the same scope or higher scope then the destination.
2234 * L = loopback, P = private, G = global
2235 * -----------------------------------------
2236 * src | dest | result
2237 * ----------------------------------------
2239 * -----------------------------------------
2240 * P | L | yes-v4 no-v6
2241 * -----------------------------------------
2242 * G | L | yes-v4 no-v6
2243 * -----------------------------------------
2245 * -----------------------------------------
2247 * -----------------------------------------
2249 * -----------------------------------------
2251 * -----------------------------------------
2253 * -----------------------------------------
2255 * -----------------------------------------
2258 if (ifa->address.sa.sa_family != fam) {
2259 /* forget mis-matched family */
2262 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2265 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2266 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2267 /* Ok the address may be ok */
2269 if (fam == AF_INET6) {
2270 /* ok to use deprecated addresses? no lets not! */
2271 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2272 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2275 if (ifa->src_is_priv && !ifa->src_is_loop) {
2277 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2281 if (ifa->src_is_glob) {
2283 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2290 * Now that we know what is what, implement or table this could in
2291 * theory be done slicker (it used to be), but this is
2292 * straightforward and easier to validate :-)
2294 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2295 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2296 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2297 dest_is_loop, dest_is_priv, dest_is_global);
2299 if ((ifa->src_is_loop) && (dest_is_priv)) {
2300 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2303 if ((ifa->src_is_glob) && (dest_is_priv)) {
2304 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2307 if ((ifa->src_is_loop) && (dest_is_global)) {
2308 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2311 if ((ifa->src_is_priv) && (dest_is_global)) {
2312 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2315 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2316 /* its a preferred address */
2320 static struct sctp_ifa *
2321 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2322 uint8_t dest_is_loop,
2323 uint8_t dest_is_priv,
2326 uint8_t dest_is_global = 0;
2329 * Here we determine if its a acceptable address. A acceptable
2330 * address means it is the same scope or higher scope but we can
2331 * allow for NAT which means its ok to have a global dest and a
2334 * L = loopback, P = private, G = global
2335 * -----------------------------------------
2336 * src | dest | result
2337 * -----------------------------------------
2339 * -----------------------------------------
2340 * P | L | yes-v4 no-v6
2341 * -----------------------------------------
2343 * -----------------------------------------
2345 * -----------------------------------------
2347 * -----------------------------------------
2348 * G | P | yes - May not work
2349 * -----------------------------------------
2351 * -----------------------------------------
2352 * P | G | yes - May not work
2353 * -----------------------------------------
2355 * -----------------------------------------
2358 if (ifa->address.sa.sa_family != fam) {
2359 /* forget non matching family */
2360 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2361 ifa->address.sa.sa_family, fam);
2364 /* Ok the address may be ok */
2365 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2366 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2367 dest_is_loop, dest_is_priv);
2368 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2372 if (fam == AF_INET6) {
2373 /* ok to use deprecated addresses? */
2374 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2377 if (ifa->src_is_priv) {
2378 /* Special case, linklocal to loop */
2385 * Now that we know what is what, implement our table. This could in
2386 * theory be done slicker (it used to be), but this is
2387 * straightforward and easier to validate :-)
2389 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2392 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2395 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2398 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2401 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2402 /* its an acceptable address */
2407 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2409 struct sctp_laddr *laddr;
2412 /* There are no restrictions, no TCB :-) */
2415 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2416 if (laddr->ifa == NULL) {
2417 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2421 if (laddr->ifa == ifa) {
2422 /* Yes it is on the list */
2431 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2433 struct sctp_laddr *laddr;
2437 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2438 if (laddr->ifa == NULL) {
2439 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2443 if ((laddr->ifa == ifa) && laddr->action == 0)
2452 static struct sctp_ifa *
2453 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2456 int non_asoc_addr_ok,
2457 uint8_t dest_is_priv,
2458 uint8_t dest_is_loop,
2461 struct sctp_laddr *laddr, *starting_point;
2464 struct sctp_ifn *sctp_ifn;
2465 struct sctp_ifa *sctp_ifa, *sifa;
2466 struct sctp_vrf *vrf;
2469 vrf = sctp_find_vrf(vrf_id);
2473 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2474 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2475 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2477 * first question, is the ifn we will emit on in our list, if so, we
2478 * want such an address. Note that we first looked for a preferred
2482 /* is a preferred one on the interface we route out? */
2483 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2485 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2486 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2487 &sctp_ifa->address.sin.sin_addr) != 0)) {
2492 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2493 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2494 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2498 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2499 (non_asoc_addr_ok == 0))
2501 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2506 if (sctp_is_addr_in_ep(inp, sifa)) {
2507 atomic_add_int(&sifa->refcount, 1);
2513 * ok, now we now need to find one on the list of the addresses. We
2514 * can't get one on the emitting interface so let's find first a
2515 * preferred one. If not that an acceptable one otherwise... we
2518 starting_point = inp->next_addr_touse;
2520 if (inp->next_addr_touse == NULL) {
2521 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2524 for (laddr = inp->next_addr_touse; laddr;
2525 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2526 if (laddr->ifa == NULL) {
2527 /* address has been removed */
2530 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2531 /* address is being deleted */
2534 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2538 atomic_add_int(&sifa->refcount, 1);
2541 if (resettotop == 0) {
2542 inp->next_addr_touse = NULL;
2545 inp->next_addr_touse = starting_point;
2548 if (inp->next_addr_touse == NULL) {
2549 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2552 /* ok, what about an acceptable address in the inp */
2553 for (laddr = inp->next_addr_touse; laddr;
2554 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2555 if (laddr->ifa == NULL) {
2556 /* address has been removed */
2559 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2560 /* address is being deleted */
2563 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2567 atomic_add_int(&sifa->refcount, 1);
2570 if (resettotop == 0) {
2571 inp->next_addr_touse = NULL;
2572 goto once_again_too;
2575 * no address bound can be a source for the destination we are in
2583 static struct sctp_ifa *
2584 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2585 struct sctp_tcb *stcb,
2588 uint8_t dest_is_priv,
2589 uint8_t dest_is_loop,
2590 int non_asoc_addr_ok,
2593 struct sctp_laddr *laddr, *starting_point;
2595 struct sctp_ifn *sctp_ifn;
2596 struct sctp_ifa *sctp_ifa, *sifa;
2597 uint8_t start_at_beginning = 0;
2598 struct sctp_vrf *vrf;
2602 * first question, is the ifn we will emit on in our list, if so, we
2605 vrf = sctp_find_vrf(vrf_id);
2609 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2610 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2611 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2614 * first question, is the ifn we will emit on in our list? If so,
2615 * we want that one. First we look for a preferred. Second, we go
2616 * for an acceptable.
2619 /* first try for a preferred address on the ep */
2620 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2622 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2623 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2624 &sctp_ifa->address.sin.sin_addr) != 0)) {
2629 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2630 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2631 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2635 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2637 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2638 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2641 if (((non_asoc_addr_ok == 0) &&
2642 (sctp_is_addr_restricted(stcb, sifa))) ||
2643 (non_asoc_addr_ok &&
2644 (sctp_is_addr_restricted(stcb, sifa)) &&
2645 (!sctp_is_addr_pending(stcb, sifa)))) {
2646 /* on the no-no list */
2649 atomic_add_int(&sifa->refcount, 1);
2653 /* next try for an acceptable address on the ep */
2654 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2656 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2657 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2658 &sctp_ifa->address.sin.sin_addr) != 0)) {
2663 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2664 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2665 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2669 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2671 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2672 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2675 if (((non_asoc_addr_ok == 0) &&
2676 (sctp_is_addr_restricted(stcb, sifa))) ||
2677 (non_asoc_addr_ok &&
2678 (sctp_is_addr_restricted(stcb, sifa)) &&
2679 (!sctp_is_addr_pending(stcb, sifa)))) {
2680 /* on the no-no list */
2683 atomic_add_int(&sifa->refcount, 1);
2690 * if we can't find one like that then we must look at all addresses
2691 * bound to pick one at first preferable then secondly acceptable.
2693 starting_point = stcb->asoc.last_used_address;
2695 if (stcb->asoc.last_used_address == NULL) {
2696 start_at_beginning = 1;
2697 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2699 /* search beginning with the last used address */
2700 for (laddr = stcb->asoc.last_used_address; laddr;
2701 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2702 if (laddr->ifa == NULL) {
2703 /* address has been removed */
2706 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2707 /* address is being deleted */
2710 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2713 if (((non_asoc_addr_ok == 0) &&
2714 (sctp_is_addr_restricted(stcb, sifa))) ||
2715 (non_asoc_addr_ok &&
2716 (sctp_is_addr_restricted(stcb, sifa)) &&
2717 (!sctp_is_addr_pending(stcb, sifa)))) {
2718 /* on the no-no list */
2721 stcb->asoc.last_used_address = laddr;
2722 atomic_add_int(&sifa->refcount, 1);
2725 if (start_at_beginning == 0) {
2726 stcb->asoc.last_used_address = NULL;
2727 goto sctp_from_the_top;
2729 /* now try for any higher scope than the destination */
2730 stcb->asoc.last_used_address = starting_point;
2731 start_at_beginning = 0;
2733 if (stcb->asoc.last_used_address == NULL) {
2734 start_at_beginning = 1;
2735 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2737 /* search beginning with the last used address */
2738 for (laddr = stcb->asoc.last_used_address; laddr;
2739 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2740 if (laddr->ifa == NULL) {
2741 /* address has been removed */
2744 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2745 /* address is being deleted */
2748 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2752 if (((non_asoc_addr_ok == 0) &&
2753 (sctp_is_addr_restricted(stcb, sifa))) ||
2754 (non_asoc_addr_ok &&
2755 (sctp_is_addr_restricted(stcb, sifa)) &&
2756 (!sctp_is_addr_pending(stcb, sifa)))) {
2757 /* on the no-no list */
2760 stcb->asoc.last_used_address = laddr;
2761 atomic_add_int(&sifa->refcount, 1);
2764 if (start_at_beginning == 0) {
2765 stcb->asoc.last_used_address = NULL;
2766 goto sctp_from_the_top2;
2771 static struct sctp_ifa *
2772 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2773 struct sctp_inpcb *inp,
2774 struct sctp_tcb *stcb,
2775 int non_asoc_addr_ok,
2776 uint8_t dest_is_loop,
2777 uint8_t dest_is_priv,
2783 struct sctp_ifa *ifa, *sifa;
2784 int num_eligible_addr = 0;
2787 struct sockaddr_in6 sin6, lsa6;
2789 if (fam == AF_INET6) {
2790 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2791 (void)sa6_recoverscope(&sin6);
2794 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2796 if ((ifa->address.sa.sa_family == AF_INET) &&
2797 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2798 &ifa->address.sin.sin_addr) != 0)) {
2803 if ((ifa->address.sa.sa_family == AF_INET6) &&
2804 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2805 &ifa->address.sin6.sin6_addr) != 0)) {
2809 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2810 (non_asoc_addr_ok == 0))
2812 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2817 if (fam == AF_INET6 &&
2819 sifa->src_is_loop && sifa->src_is_priv) {
2821 * don't allow fe80::1 to be a src on loop ::1, we
2822 * don't list it to the peer so we will get an
2827 if (fam == AF_INET6 &&
2828 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2829 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2831 * link-local <-> link-local must belong to the same
2834 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2835 (void)sa6_recoverscope(&lsa6);
2836 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2843 * Check if the IPv6 address matches to next-hop. In the
2844 * mobile case, old IPv6 address may be not deleted from the
2845 * interface. Then, the interface has previous and new
2846 * addresses. We should use one corresponding to the
2847 * next-hop. (by micchie)
2850 if (stcb && fam == AF_INET6 &&
2851 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2852 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2859 /* Avoid topologically incorrect IPv4 address */
2860 if (stcb && fam == AF_INET &&
2861 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2862 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2868 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2871 if (((non_asoc_addr_ok == 0) &&
2872 (sctp_is_addr_restricted(stcb, sifa))) ||
2873 (non_asoc_addr_ok &&
2874 (sctp_is_addr_restricted(stcb, sifa)) &&
2875 (!sctp_is_addr_pending(stcb, sifa)))) {
2877 * It is restricted for some reason..
2878 * probably not yet added.
2883 if (num_eligible_addr >= addr_wanted) {
2886 num_eligible_addr++;
2893 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2894 struct sctp_inpcb *inp,
2895 struct sctp_tcb *stcb,
2896 int non_asoc_addr_ok,
2897 uint8_t dest_is_loop,
2898 uint8_t dest_is_priv,
2901 struct sctp_ifa *ifa, *sifa;
2902 int num_eligible_addr = 0;
2904 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2906 if ((ifa->address.sa.sa_family == AF_INET) &&
2907 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2908 &ifa->address.sin.sin_addr) != 0)) {
2913 if ((ifa->address.sa.sa_family == AF_INET6) &&
2915 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2916 &ifa->address.sin6.sin6_addr) != 0)) {
2920 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2921 (non_asoc_addr_ok == 0)) {
2924 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2930 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2933 if (((non_asoc_addr_ok == 0) &&
2934 (sctp_is_addr_restricted(stcb, sifa))) ||
2935 (non_asoc_addr_ok &&
2936 (sctp_is_addr_restricted(stcb, sifa)) &&
2937 (!sctp_is_addr_pending(stcb, sifa)))) {
2939 * It is restricted for some reason..
2940 * probably not yet added.
2945 num_eligible_addr++;
2947 return (num_eligible_addr);
2950 static struct sctp_ifa *
2951 sctp_choose_boundall(struct sctp_inpcb *inp,
2952 struct sctp_tcb *stcb,
2953 struct sctp_nets *net,
2956 uint8_t dest_is_priv,
2957 uint8_t dest_is_loop,
2958 int non_asoc_addr_ok,
2961 int cur_addr_num = 0, num_preferred = 0;
2963 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2964 struct sctp_ifa *sctp_ifa, *sifa;
2966 struct sctp_vrf *vrf;
2974 * For boundall we can use any address in the association.
2975 * If non_asoc_addr_ok is set we can use any address (at least in
2976 * theory). So we look for preferred addresses first. If we find one,
2977 * we use it. Otherwise we next try to get an address on the
2978 * interface, which we should be able to do (unless non_asoc_addr_ok
2979 * is false and we are routed out that way). In these cases where we
2980 * can't use the address of the interface we go through all the
2981 * ifn's looking for an address we can use and fill that in. Punting
2982 * means we send back address 0, which will probably cause problems
2983 * actually since then IP will fill in the address of the route ifn,
2984 * which means we probably already rejected it.. i.e. here comes an
2987 vrf = sctp_find_vrf(vrf_id);
2991 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2992 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2993 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2994 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2995 if (sctp_ifn == NULL) {
2996 /* ?? We don't have this guy ?? */
2997 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2998 goto bound_all_plan_b;
3000 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
3001 ifn_index, sctp_ifn->ifn_name);
3004 cur_addr_num = net->indx_of_eligible_next_to_use;
3006 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3011 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3012 num_preferred, sctp_ifn->ifn_name);
3013 if (num_preferred == 0) {
3015 * no eligible addresses, we must use some other interface
3016 * address if we can find one.
3018 goto bound_all_plan_b;
3021 * Ok we have num_eligible_addr set with how many we can use, this
3022 * may vary from call to call due to addresses being deprecated
3025 if (cur_addr_num >= num_preferred) {
3029 * select the nth address from the list (where cur_addr_num is the
3030 * nth) and 0 is the first one, 1 is the second one etc...
3032 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3034 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3035 dest_is_priv, cur_addr_num, fam, ro);
3037 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3039 atomic_add_int(&sctp_ifa->refcount, 1);
3041 /* save off where the next one we will want */
3042 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3047 * plan_b: Look at all interfaces and find a preferred address. If
3048 * no preferred fall through to plan_c.
3051 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3052 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3053 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3054 sctp_ifn->ifn_name);
3055 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3056 /* wrong base scope */
3057 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3060 if ((sctp_ifn == looked_at) && looked_at) {
3061 /* already looked at this guy */
3062 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3065 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3066 dest_is_loop, dest_is_priv, fam);
3067 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3068 "Found ifn:%p %d preferred source addresses\n",
3069 ifn, num_preferred);
3070 if (num_preferred == 0) {
3071 /* None on this interface. */
3072 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
3075 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3076 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3077 num_preferred, (void *)sctp_ifn, cur_addr_num);
3080 * Ok we have num_eligible_addr set with how many we can
3081 * use, this may vary from call to call due to addresses
3082 * being deprecated etc..
3084 if (cur_addr_num >= num_preferred) {
3087 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3088 dest_is_priv, cur_addr_num, fam, ro);
3092 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3093 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3095 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3096 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3097 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3098 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3100 atomic_add_int(&sifa->refcount, 1);
3104 again_with_private_addresses_allowed:
3106 /* plan_c: do we have an acceptable address on the emit interface */
3108 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3109 if (emit_ifn == NULL) {
3110 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3113 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3116 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3117 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3118 &sctp_ifa->address.sin.sin_addr) != 0)) {
3119 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3124 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3125 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3126 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3127 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3131 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3132 (non_asoc_addr_ok == 0)) {
3133 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3136 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3139 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3143 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3144 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3148 if (((non_asoc_addr_ok == 0) &&
3149 (sctp_is_addr_restricted(stcb, sifa))) ||
3150 (non_asoc_addr_ok &&
3151 (sctp_is_addr_restricted(stcb, sifa)) &&
3152 (!sctp_is_addr_pending(stcb, sifa)))) {
3154 * It is restricted for some reason..
3155 * probably not yet added.
3157 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3162 SCTP_PRINTF("Stcb is null - no print\n");
3164 atomic_add_int(&sifa->refcount, 1);
3169 * plan_d: We are in trouble. No preferred address on the emit
3170 * interface. And not even a preferred address on all interfaces. Go
3171 * out and see if we can find an acceptable address somewhere
3172 * amongst all interfaces.
3174 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3175 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3176 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3177 /* wrong base scope */
3180 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3182 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3183 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3184 &sctp_ifa->address.sin.sin_addr) != 0)) {
3189 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3190 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3191 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3195 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3196 (non_asoc_addr_ok == 0))
3198 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3204 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3208 if (((non_asoc_addr_ok == 0) &&
3209 (sctp_is_addr_restricted(stcb, sifa))) ||
3210 (non_asoc_addr_ok &&
3211 (sctp_is_addr_restricted(stcb, sifa)) &&
3212 (!sctp_is_addr_pending(stcb, sifa)))) {
3214 * It is restricted for some
3215 * reason.. probably not yet added.
3225 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3226 stcb->asoc.scope.ipv4_local_scope = 1;
3228 goto again_with_private_addresses_allowed;
3229 } else if (retried == 1) {
3230 stcb->asoc.scope.ipv4_local_scope = 0;
3237 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3238 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3239 /* wrong base scope */
3242 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3243 struct sctp_ifa *tmp_sifa;
3246 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3247 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3248 &sctp_ifa->address.sin.sin_addr) != 0)) {
3253 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3254 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3255 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3259 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3260 (non_asoc_addr_ok == 0))
3262 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3265 if (tmp_sifa == NULL) {
3268 if (tmp_sifa == sifa) {
3272 if (sctp_is_address_in_scope(tmp_sifa,
3273 &stcb->asoc.scope, 0) == 0) {
3276 if (((non_asoc_addr_ok == 0) &&
3277 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3278 (non_asoc_addr_ok &&
3279 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3280 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3290 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3291 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3292 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3297 atomic_add_int(&sifa->refcount, 1);
3305 /* tcb may be NULL */
3307 sctp_source_address_selection(struct sctp_inpcb *inp,
3308 struct sctp_tcb *stcb,
3310 struct sctp_nets *net,
3311 int non_asoc_addr_ok, uint32_t vrf_id)
3313 struct sctp_ifa *answer;
3314 uint8_t dest_is_priv, dest_is_loop;
3318 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3322 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3327 * Rules: - Find the route if needed, cache if I can. - Look at
3328 * interface address in route, Is it in the bound list. If so we
3329 * have the best source. - If not we must rotate amongst the
3334 * Do we need to pay attention to scope. We can have a private address
3335 * or a global address we are sourcing or sending to. So if we draw
3337 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3339 * ------------------------------------------
3340 * source * dest * result
3341 * -----------------------------------------
3342 * <a> Private * Global * NAT
3343 * -----------------------------------------
3344 * <b> Private * Private * No problem
3345 * -----------------------------------------
3346 * <c> Global * Private * Huh, How will this work?
3347 * -----------------------------------------
3348 * <d> Global * Global * No Problem
3349 *------------------------------------------
3350 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3352 *------------------------------------------
3353 * source * dest * result
3354 * -----------------------------------------
3355 * <a> Linklocal * Global *
3356 * -----------------------------------------
3357 * <b> Linklocal * Linklocal * No problem
3358 * -----------------------------------------
3359 * <c> Global * Linklocal * Huh, How will this work?
3360 * -----------------------------------------
3361 * <d> Global * Global * No Problem
3362 *------------------------------------------
3363 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3365 * And then we add to that what happens if there are multiple addresses
3366 * assigned to an interface. Remember the ifa on a ifn is a linked
3367 * list of addresses. So one interface can have more than one IP
3368 * address. What happens if we have both a private and a global
3369 * address? Do we then use context of destination to sort out which
3370 * one is best? And what about NAT's sending P->G may get you a NAT
3371 * translation, or should you select the G thats on the interface in
3376 * - count the number of addresses on the interface.
3377 * - if it is one, no problem except case <c>.
3378 * For <a> we will assume a NAT out there.
3379 * - if there are more than one, then we need to worry about scope P
3380 * or G. We should prefer G -> G and P -> P if possible.
3381 * Then as a secondary fall back to mixed types G->P being a last
3383 * - The above all works for bound all, but bound specific we need to
3384 * use the same concept but instead only consider the bound
3385 * addresses. If the bound set is NOT assigned to the interface then
3386 * we must use rotation amongst the bound addresses..
3388 if (ro->ro_rt == NULL) {
3390 * Need a route to cache.
3392 SCTP_RTALLOC(ro, vrf_id);
3394 if (ro->ro_rt == NULL) {
3397 fam = ro->ro_dst.sa_family;
3398 dest_is_priv = dest_is_loop = 0;
3399 /* Setup our scopes for the destination */
3403 /* Scope based on outbound address */
3404 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3407 /* mark it as local */
3408 net->addr_is_local = 1;
3410 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3417 /* Scope based on outbound address */
3418 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3419 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3421 * If the address is a loopback address, which
3422 * consists of "::1" OR "fe80::1%lo0", we are
3423 * loopback scope. But we don't use dest_is_priv
3424 * (link local addresses).
3428 /* mark it as local */
3429 net->addr_is_local = 1;
3431 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3437 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3438 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3439 SCTP_IPI_ADDR_RLOCK();
3440 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3444 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3445 dest_is_priv, dest_is_loop,
3446 non_asoc_addr_ok, fam);
3447 SCTP_IPI_ADDR_RUNLOCK();
3454 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3455 vrf_id, dest_is_priv,
3457 non_asoc_addr_ok, fam);
3459 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3464 SCTP_IPI_ADDR_RUNLOCK();
3469 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3472 int tlen, at, found;
3473 struct sctp_sndinfo sndinfo;
3474 struct sctp_prinfo prinfo;
3475 struct sctp_authinfo authinfo;
3477 tlen = SCTP_BUF_LEN(control);
3481 * Independent of how many mbufs, find the c_type inside the control
3482 * structure and copy out the data.
3485 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3486 /* There is not enough room for one more. */
3489 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3490 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3491 /* We dont't have a complete CMSG header. */
3494 if (((int)cmh.cmsg_len + at) > tlen) {
3495 /* We don't have the complete CMSG. */
3498 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3499 ((c_type == cmh.cmsg_type) ||
3500 ((c_type == SCTP_SNDRCV) &&
3501 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3502 (cmh.cmsg_type == SCTP_PRINFO) ||
3503 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3504 if (c_type == cmh.cmsg_type) {
3505 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3508 /* It is exactly what we want. Copy it out. */
3509 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3512 struct sctp_sndrcvinfo *sndrcvinfo;
3514 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3516 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3519 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3521 switch (cmh.cmsg_type) {
3523 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3526 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3527 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3528 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3529 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3530 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3531 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3534 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3537 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3538 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3539 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3541 sndrcvinfo->sinfo_timetolive = 0;
3543 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3546 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3549 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3550 sndrcvinfo->sinfo_keynumber_valid = 1;
3551 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3559 at += CMSG_ALIGN(cmh.cmsg_len);
3565 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3569 struct sctp_initmsg initmsg;
3572 struct sockaddr_in sin;
3576 struct sockaddr_in6 sin6;
3580 tlen = SCTP_BUF_LEN(control);
3583 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3584 /* There is not enough room for one more. */
3588 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3589 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3590 /* We dont't have a complete CMSG header. */
3594 if (((int)cmh.cmsg_len + at) > tlen) {
3595 /* We don't have the complete CMSG. */
3599 if (cmh.cmsg_level == IPPROTO_SCTP) {
3600 switch (cmh.cmsg_type) {
3602 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3606 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3607 if (initmsg.sinit_max_attempts)
3608 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3609 if (initmsg.sinit_num_ostreams)
3610 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3611 if (initmsg.sinit_max_instreams)
3612 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3613 if (initmsg.sinit_max_init_timeo)
3614 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3615 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3616 struct sctp_stream_out *tmp_str;
3619 /* Default is NOT correct */
3620 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3621 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3622 SCTP_TCB_UNLOCK(stcb);
3623 SCTP_MALLOC(tmp_str,
3624 struct sctp_stream_out *,
3625 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3627 SCTP_TCB_LOCK(stcb);
3628 if (tmp_str != NULL) {
3629 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3630 stcb->asoc.strmout = tmp_str;
3631 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3633 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3635 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3636 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3637 stcb->asoc.strmout[i].chunks_on_queues = 0;
3638 stcb->asoc.strmout[i].next_sequence_send = 0;
3639 stcb->asoc.strmout[i].stream_no = i;
3640 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3641 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3646 case SCTP_DSTADDRV4:
3647 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3651 memset(&sin, 0, sizeof(struct sockaddr_in));
3652 sin.sin_family = AF_INET;
3653 sin.sin_len = sizeof(struct sockaddr_in);
3654 sin.sin_port = stcb->rport;
3655 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3656 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3657 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3658 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3662 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3663 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3670 case SCTP_DSTADDRV6:
3671 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3675 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3676 sin6.sin6_family = AF_INET6;
3677 sin6.sin6_len = sizeof(struct sockaddr_in6);
3678 sin6.sin6_port = stcb->rport;
3679 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3680 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3681 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3686 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3687 in6_sin6_2_sin(&sin, &sin6);
3688 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3689 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3690 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3694 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3695 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3701 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3702 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3712 at += CMSG_ALIGN(cmh.cmsg_len);
3717 static struct sctp_tcb *
3718 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3720 struct mbuf *control,
3721 struct sctp_nets **net_p,
3726 struct sctp_tcb *stcb;
3727 struct sockaddr *addr;
3730 struct sockaddr_in sin;
3734 struct sockaddr_in6 sin6;
3738 tlen = SCTP_BUF_LEN(control);
3741 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3742 /* There is not enough room for one more. */
3746 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3747 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3748 /* We dont't have a complete CMSG header. */
3752 if (((int)cmh.cmsg_len + at) > tlen) {
3753 /* We don't have the complete CMSG. */
3757 if (cmh.cmsg_level == IPPROTO_SCTP) {
3758 switch (cmh.cmsg_type) {
3760 case SCTP_DSTADDRV4:
3761 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3765 memset(&sin, 0, sizeof(struct sockaddr_in));
3766 sin.sin_family = AF_INET;
3767 sin.sin_len = sizeof(struct sockaddr_in);
3768 sin.sin_port = port;
3769 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3770 addr = (struct sockaddr *)&sin;
3774 case SCTP_DSTADDRV6:
3775 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3779 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3780 sin6.sin6_family = AF_INET6;
3781 sin6.sin6_len = sizeof(struct sockaddr_in6);
3782 sin6.sin6_port = port;
3783 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3785 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3786 in6_sin6_2_sin(&sin, &sin6);
3787 addr = (struct sockaddr *)&sin;
3790 addr = (struct sockaddr *)&sin6;
3798 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3804 at += CMSG_ALIGN(cmh.cmsg_len);
3809 static struct mbuf *
3810 sctp_add_cookie(struct mbuf *init, int init_offset,
3811 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3813 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3814 struct sctp_state_cookie *stc;
3815 struct sctp_paramhdr *ph;
3820 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3821 sizeof(struct sctp_paramhdr)), 0,
3822 M_DONTWAIT, 1, MT_DATA);
3826 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3827 if (copy_init == NULL) {
3831 #ifdef SCTP_MBUF_LOGGING
3832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3835 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3836 if (SCTP_BUF_IS_EXTENDED(mat)) {
3837 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3842 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3844 if (copy_initack == NULL) {
3846 sctp_m_freem(copy_init);
3849 #ifdef SCTP_MBUF_LOGGING
3850 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3853 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3854 if (SCTP_BUF_IS_EXTENDED(mat)) {
3855 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3860 /* easy side we just drop it on the end */
3861 ph = mtod(mret, struct sctp_paramhdr *);
3862 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3863 sizeof(struct sctp_paramhdr);
3864 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3865 sizeof(struct sctp_paramhdr));
3866 ph->param_type = htons(SCTP_STATE_COOKIE);
3867 ph->param_length = 0; /* fill in at the end */
3868 /* Fill in the stc cookie data */
3869 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3871 /* tack the INIT and then the INIT-ACK onto the chain */
3873 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3874 cookie_sz += SCTP_BUF_LEN(m_at);
3875 if (SCTP_BUF_NEXT(m_at) == NULL) {
3876 SCTP_BUF_NEXT(m_at) = copy_init;
3880 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3881 cookie_sz += SCTP_BUF_LEN(m_at);
3882 if (SCTP_BUF_NEXT(m_at) == NULL) {
3883 SCTP_BUF_NEXT(m_at) = copy_initack;
3887 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3888 cookie_sz += SCTP_BUF_LEN(m_at);
3889 if (SCTP_BUF_NEXT(m_at) == NULL) {
3893 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3895 /* no space, so free the entire chain */
3899 SCTP_BUF_LEN(sig) = 0;
3900 SCTP_BUF_NEXT(m_at) = sig;
3902 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3903 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3905 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3906 cookie_sz += SCTP_SIGNATURE_SIZE;
3907 ph->param_length = htons(cookie_sz);
3913 sctp_get_ect(struct sctp_tcb *stcb)
3915 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3916 return (SCTP_ECT0_BIT);
3922 #if defined(INET) || defined(INET6)
3924 sctp_handle_no_route(struct sctp_tcb *stcb,
3925 struct sctp_nets *net,
3928 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3931 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3932 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3933 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3934 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3935 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3936 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3940 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3941 net->dest_state &= ~SCTP_ADDR_PF;
3945 if (net == stcb->asoc.primary_destination) {
3946 /* need a new primary */
3947 struct sctp_nets *alt;
3949 alt = sctp_find_alternate_net(stcb, net, 0);
3951 if (stcb->asoc.alternate) {
3952 sctp_free_remote_addr(stcb->asoc.alternate);
3954 stcb->asoc.alternate = alt;
3955 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3956 if (net->ro._s_addr) {
3957 sctp_free_ifa(net->ro._s_addr);
3958 net->ro._s_addr = NULL;
3960 net->src_addr_selected = 0;
3970 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3971 struct sctp_tcb *stcb, /* may be NULL */
3972 struct sctp_nets *net,
3973 struct sockaddr *to,
3975 uint32_t auth_offset,
3976 struct sctp_auth_chunk *auth,
3977 uint16_t auth_keyid,
3978 int nofragment_flag,
3985 union sctp_sockstore *over_addr,
3986 uint8_t use_mflowid, uint32_t mflowid,
3987 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3988 int so_locked SCTP_UNUSED
3993 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3996 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3997 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3998 * - fill in the HMAC digest of any AUTH chunk in the packet.
3999 * - calculate and fill in the SCTP checksum.
4000 * - prepend an IP address header.
4001 * - if boundall use INADDR_ANY.
4002 * - if boundspecific do source address selection.
4003 * - set fragmentation option for ipV4.
4004 * - On return from IP output, check/adjust mtu size of output
4005 * interface and smallest_mtu size as well.
4007 /* Will need ifdefs around this */
4009 struct sctphdr *sctphdr;
4013 #if defined(INET) || defined(INET6)
4017 #if defined(INET) || defined(INET6)
4019 sctp_route_t *ro = NULL;
4020 struct udphdr *udp = NULL;
4025 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4026 struct socket *so = NULL;
4030 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4031 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4035 #if defined(INET) || defined(INET6)
4037 vrf_id = stcb->asoc.vrf_id;
4039 vrf_id = inp->def_vrf_id;
4042 /* fill in the HMAC digest for any AUTH chunk in the packet */
4043 if ((auth != NULL) && (stcb != NULL)) {
4044 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4047 tos_value = net->dscp;
4049 tos_value = stcb->asoc.default_dscp;
4051 tos_value = inp->sctp_ep.default_dscp;
4054 switch (to->sa_family) {
4058 struct ip *ip = NULL;
4059 sctp_route_t iproute;
4062 len = sizeof(struct ip) + sizeof(struct sctphdr);
4064 len += sizeof(struct udphdr);
4066 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4069 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4072 SCTP_ALIGN_TO_END(newm, len);
4073 SCTP_BUF_LEN(newm) = len;
4074 SCTP_BUF_NEXT(newm) = m;
4078 if (net->flowidset == 0) {
4079 panic("Flow ID not set");
4082 m->m_pkthdr.flowid = net->flowid;
4083 m->m_flags |= M_FLOWID;
4085 if (use_mflowid != 0) {
4086 m->m_pkthdr.flowid = mflowid;
4087 m->m_flags |= M_FLOWID;
4090 packet_length = sctp_calculate_len(m);
4091 ip = mtod(m, struct ip *);
4092 ip->ip_v = IPVERSION;
4093 ip->ip_hl = (sizeof(struct ip) >> 2);
4094 if (tos_value == 0) {
4096 * This means especially, that it is not set
4097 * at the SCTP layer. So use the value from
4100 tos_value = inp->ip_inp.inp.inp_ip_tos;
4104 tos_value |= sctp_get_ect(stcb);
4106 if ((nofragment_flag) && (port == 0)) {
4111 /* FreeBSD has a function for ip_id's */
4112 ip->ip_id = ip_newid();
4114 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4115 ip->ip_len = packet_length;
4116 ip->ip_tos = tos_value;
4118 ip->ip_p = IPPROTO_UDP;
4120 ip->ip_p = IPPROTO_SCTP;
4125 memset(&iproute, 0, sizeof(iproute));
4126 memcpy(&ro->ro_dst, to, to->sa_len);
4128 ro = (sctp_route_t *) & net->ro;
4130 /* Now the address selection part */
4131 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4133 /* call the routine to select the src address */
4134 if (net && out_of_asoc_ok == 0) {
4135 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4136 sctp_free_ifa(net->ro._s_addr);
4137 net->ro._s_addr = NULL;
4138 net->src_addr_selected = 0;
4144 if (net->src_addr_selected == 0) {
4145 /* Cache the source address */
4146 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4149 net->src_addr_selected = 1;
4151 if (net->ro._s_addr == NULL) {
4152 /* No route to host */
4153 net->src_addr_selected = 0;
4154 sctp_handle_no_route(stcb, net, so_locked);
4155 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4157 return (EHOSTUNREACH);
4159 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4161 if (over_addr == NULL) {
4162 struct sctp_ifa *_lsrc;
4164 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4168 if (_lsrc == NULL) {
4169 sctp_handle_no_route(stcb, net, so_locked);
4170 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4172 return (EHOSTUNREACH);
4174 ip->ip_src = _lsrc->address.sin.sin_addr;
4175 sctp_free_ifa(_lsrc);
4177 ip->ip_src = over_addr->sin.sin_addr;
4178 SCTP_RTALLOC(ro, vrf_id);
4182 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4183 sctp_handle_no_route(stcb, net, so_locked);
4184 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4186 return (EHOSTUNREACH);
4188 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4189 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4190 udp->uh_dport = port;
4191 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4193 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4197 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4199 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4202 sctphdr->src_port = src_port;
4203 sctphdr->dest_port = dest_port;
4204 sctphdr->v_tag = v_tag;
4205 sctphdr->checksum = 0;
4208 * If source address selection fails and we find no
4209 * route then the ip_output should fail as well with
4210 * a NO_ROUTE_TO_HOST type error. We probably should
4211 * catch that somewhere and abort the association
4212 * right away (assuming this is an INIT being sent).
4214 if (ro->ro_rt == NULL) {
4216 * src addr selection failed to find a route
4217 * (or valid source addr), so we can't get
4218 * there from here (yet)!
4220 sctp_handle_no_route(stcb, net, so_locked);
4221 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4223 return (EHOSTUNREACH);
4225 if (ro != &iproute) {
4226 memcpy(&iproute, ro, sizeof(*ro));
4228 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4229 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4230 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4231 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4232 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4235 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4236 /* failed to prepend data, give up */
4237 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4241 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4243 #if defined(SCTP_WITH_NO_CSUM)
4244 SCTP_STAT_INCR(sctps_sendnocrc);
4246 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4247 SCTP_STAT_INCR(sctps_sendswcrc);
4250 SCTP_ENABLE_UDP_CSUM(o_pak);
4253 #if defined(SCTP_WITH_NO_CSUM)
4254 SCTP_STAT_INCR(sctps_sendnocrc);
4256 m->m_pkthdr.csum_flags = CSUM_SCTP;
4257 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4258 SCTP_STAT_INCR(sctps_sendhwcrc);
4261 #ifdef SCTP_PACKET_LOGGING
4262 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4263 sctp_packet_log(o_pak);
4265 /* send it out. table id is taken from stcb */
4266 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4267 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4268 so = SCTP_INP_SO(inp);
4269 SCTP_SOCKET_UNLOCK(so, 0);
4272 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4273 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4274 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4275 atomic_add_int(&stcb->asoc.refcnt, 1);
4276 SCTP_TCB_UNLOCK(stcb);
4277 SCTP_SOCKET_LOCK(so, 0);
4278 SCTP_TCB_LOCK(stcb);
4279 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4282 SCTP_STAT_INCR(sctps_sendpackets);
4283 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4285 SCTP_STAT_INCR(sctps_senderrors);
4287 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4289 /* free tempy routes */
4293 * PMTU check versus smallest asoc MTU goes
4296 if ((ro->ro_rt != NULL) &&
4297 (net->ro._s_addr)) {
4300 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4302 mtu -= sizeof(struct udphdr);
4304 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4305 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4308 } else if (ro->ro_rt == NULL) {
4309 /* route was freed */
4310 if (net->ro._s_addr &&
4311 net->src_addr_selected) {
4312 sctp_free_ifa(net->ro._s_addr);
4313 net->ro._s_addr = NULL;
4315 net->src_addr_selected = 0;
4324 uint32_t flowlabel, flowinfo;
4325 struct ip6_hdr *ip6h;
4326 struct route_in6 ip6route;
4328 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4330 struct sockaddr_in6 lsa6_storage;
4332 u_short prev_port = 0;
4336 flowlabel = net->flowlabel;
4338 flowlabel = stcb->asoc.default_flowlabel;
4340 flowlabel = inp->sctp_ep.default_flowlabel;
4342 if (flowlabel == 0) {
4344 * This means especially, that it is not set
4345 * at the SCTP layer. So use the value from
4348 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4350 flowlabel &= 0x000fffff;
4351 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4353 len += sizeof(struct udphdr);
4355 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4358 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4361 SCTP_ALIGN_TO_END(newm, len);
4362 SCTP_BUF_LEN(newm) = len;
4363 SCTP_BUF_NEXT(newm) = m;
4367 if (net->flowidset == 0) {
4368 panic("Flow ID not set");
4371 m->m_pkthdr.flowid = net->flowid;
4372 m->m_flags |= M_FLOWID;
4374 if (use_mflowid != 0) {
4375 m->m_pkthdr.flowid = mflowid;
4376 m->m_flags |= M_FLOWID;
4379 packet_length = sctp_calculate_len(m);
4381 ip6h = mtod(m, struct ip6_hdr *);
4382 /* protect *sin6 from overwrite */
4383 sin6 = (struct sockaddr_in6 *)to;
4387 /* KAME hack: embed scopeid */
4388 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4389 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4393 memset(&ip6route, 0, sizeof(ip6route));
4394 ro = (sctp_route_t *) & ip6route;
4395 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4397 ro = (sctp_route_t *) & net->ro;
4400 * We assume here that inp_flow is in host byte
4401 * order within the TCB!
4403 if (tos_value == 0) {
4405 * This means especially, that it is not set
4406 * at the SCTP layer. So use the value from
4409 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4413 tos_value |= sctp_get_ect(stcb);
4417 flowinfo |= tos_value;
4419 flowinfo |= flowlabel;
4420 ip6h->ip6_flow = htonl(flowinfo);
4422 ip6h->ip6_nxt = IPPROTO_UDP;
4424 ip6h->ip6_nxt = IPPROTO_SCTP;
4426 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4427 ip6h->ip6_dst = sin6->sin6_addr;
4430 * Add SRC address selection here: we can only reuse
4431 * to a limited degree the kame src-addr-sel, since
4432 * we can try their selection but it may not be
4435 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4436 lsa6_tmp.sin6_family = AF_INET6;
4437 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4439 if (net && out_of_asoc_ok == 0) {
4440 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4441 sctp_free_ifa(net->ro._s_addr);
4442 net->ro._s_addr = NULL;
4443 net->src_addr_selected = 0;
4449 if (net->src_addr_selected == 0) {
4450 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4451 /* KAME hack: embed scopeid */
4452 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4453 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4456 /* Cache the source address */
4457 net->ro._s_addr = sctp_source_address_selection(inp,
4463 (void)sa6_recoverscope(sin6);
4464 net->src_addr_selected = 1;
4466 if (net->ro._s_addr == NULL) {
4467 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4468 net->src_addr_selected = 0;
4469 sctp_handle_no_route(stcb, net, so_locked);
4470 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4472 return (EHOSTUNREACH);
4474 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4476 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4477 /* KAME hack: embed scopeid */
4478 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4479 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4482 if (over_addr == NULL) {
4483 struct sctp_ifa *_lsrc;
4485 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4489 if (_lsrc == NULL) {
4490 sctp_handle_no_route(stcb, net, so_locked);
4491 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4493 return (EHOSTUNREACH);
4495 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4496 sctp_free_ifa(_lsrc);
4498 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4499 SCTP_RTALLOC(ro, vrf_id);
4501 (void)sa6_recoverscope(sin6);
4503 lsa6->sin6_port = inp->sctp_lport;
4505 if (ro->ro_rt == NULL) {
4507 * src addr selection failed to find a route
4508 * (or valid source addr), so we can't get
4511 sctp_handle_no_route(stcb, net, so_locked);
4512 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4514 return (EHOSTUNREACH);
4517 * XXX: sa6 may not have a valid sin6_scope_id in
4518 * the non-SCOPEDROUTING case.
4520 bzero(&lsa6_storage, sizeof(lsa6_storage));
4521 lsa6_storage.sin6_family = AF_INET6;
4522 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4523 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4524 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4525 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4530 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4531 lsa6_storage.sin6_port = inp->sctp_lport;
4532 lsa6 = &lsa6_storage;
4533 ip6h->ip6_src = lsa6->sin6_addr;
4536 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4537 sctp_handle_no_route(stcb, net, so_locked);
4538 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4540 return (EHOSTUNREACH);
4542 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4543 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4544 udp->uh_dport = port;
4545 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4547 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4549 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4552 sctphdr->src_port = src_port;
4553 sctphdr->dest_port = dest_port;
4554 sctphdr->v_tag = v_tag;
4555 sctphdr->checksum = 0;
4558 * We set the hop limit now since there is a good
4559 * chance that our ro pointer is now filled
4561 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4562 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4565 /* Copy to be sure something bad is not happening */
4566 sin6->sin6_addr = ip6h->ip6_dst;
4567 lsa6->sin6_addr = ip6h->ip6_src;
4570 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4571 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4572 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4573 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4574 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4576 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4578 * preserve the port and scope for link
4581 prev_scope = sin6->sin6_scope_id;
4582 prev_port = sin6->sin6_port;
4584 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4585 /* failed to prepend data, give up */
4587 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4590 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4592 #if defined(SCTP_WITH_NO_CSUM)
4593 SCTP_STAT_INCR(sctps_sendnocrc);
4595 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4596 SCTP_STAT_INCR(sctps_sendswcrc);
4598 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4599 udp->uh_sum = 0xffff;
4602 #if defined(SCTP_WITH_NO_CSUM)
4603 SCTP_STAT_INCR(sctps_sendnocrc);
4605 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4606 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4607 SCTP_STAT_INCR(sctps_sendhwcrc);
4610 /* send it out. table id is taken from stcb */
4611 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4612 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4613 so = SCTP_INP_SO(inp);
4614 SCTP_SOCKET_UNLOCK(so, 0);
4617 #ifdef SCTP_PACKET_LOGGING
4618 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4619 sctp_packet_log(o_pak);
4621 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4622 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4623 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4624 atomic_add_int(&stcb->asoc.refcnt, 1);
4625 SCTP_TCB_UNLOCK(stcb);
4626 SCTP_SOCKET_LOCK(so, 0);
4627 SCTP_TCB_LOCK(stcb);
4628 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4632 /* for link local this must be done */
4633 sin6->sin6_scope_id = prev_scope;
4634 sin6->sin6_port = prev_port;
4636 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4637 SCTP_STAT_INCR(sctps_sendpackets);
4638 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4640 SCTP_STAT_INCR(sctps_senderrors);
4643 /* Now if we had a temp route free it */
4647 * PMTU check versus smallest asoc MTU goes
4650 if (ro->ro_rt == NULL) {
4651 /* Route was freed */
4652 if (net->ro._s_addr &&
4653 net->src_addr_selected) {
4654 sctp_free_ifa(net->ro._s_addr);
4655 net->ro._s_addr = NULL;
4657 net->src_addr_selected = 0;
4659 if ((ro->ro_rt != NULL) &&
4660 (net->ro._s_addr)) {
4663 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4665 (stcb->asoc.smallest_mtu > mtu)) {
4666 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4669 net->mtu -= sizeof(struct udphdr);
4673 if (ND_IFINFO(ifp)->linkmtu &&
4674 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4675 sctp_mtu_size_reset(inp,
4677 ND_IFINFO(ifp)->linkmtu);
4685 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4686 ((struct sockaddr *)to)->sa_family);
4688 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4695 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4696 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4702 struct sctp_nets *net;
4703 struct sctp_init_chunk *init;
4704 struct sctp_supported_addr_param *sup_addr;
4705 struct sctp_adaptation_layer_indication *ali;
4706 struct sctp_supported_chunk_types_param *pr_supported;
4707 struct sctp_paramhdr *ph;
4708 int cnt_inits_to = 0;
4710 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4712 /* INIT's always go to the primary (and usually ONLY address) */
4713 net = stcb->asoc.primary_destination;
4715 net = TAILQ_FIRST(&stcb->asoc.nets);
4720 /* we confirm any address we send an INIT to */
4721 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4722 (void)sctp_set_primary_addr(stcb, NULL, net);
4724 /* we confirm any address we send an INIT to */
4725 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4727 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4729 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4731 * special hook, if we are sending to link local it will not
4732 * show up in our private address count.
4734 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4738 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4739 /* This case should not happen */
4740 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4743 /* start the INIT timer */
4744 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4746 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4748 /* No memory, INIT timer will re-attempt. */
4749 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4752 chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
4755 * assume peer supports asconf in order to be able to queue local
4756 * address changes while an INIT is in flight and before the assoc
4759 stcb->asoc.peer_supports_asconf = 1;
4760 /* Now lets put the chunk header in place */
4761 init = mtod(m, struct sctp_init_chunk *);
4762 /* now the chunk header */
4763 init->ch.chunk_type = SCTP_INITIATION;
4764 init->ch.chunk_flags = 0;
4765 /* fill in later from mbuf we build */
4766 init->ch.chunk_length = 0;
4767 /* place in my tag */
4768 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4769 /* set up some of the credits. */
4770 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4771 SCTP_MINIMAL_RWND));
4772 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4773 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4774 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4776 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4779 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4780 if (stcb->asoc.scope.ipv4_addr_legal) {
4781 parameter_len += (uint16_t) sizeof(uint16_t);
4783 if (stcb->asoc.scope.ipv6_addr_legal) {
4784 parameter_len += (uint16_t) sizeof(uint16_t);
4786 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4787 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4788 sup_addr->ph.param_length = htons(parameter_len);
4790 if (stcb->asoc.scope.ipv4_addr_legal) {
4791 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4793 if (stcb->asoc.scope.ipv6_addr_legal) {
4794 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4796 padding_len = 4 - 2 * i;
4797 chunk_len += parameter_len;
4799 /* Adaptation layer indication parameter */
4800 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4801 if (padding_len > 0) {
4802 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4803 chunk_len += padding_len;
4806 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
4807 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4808 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4809 ali->ph.param_length = htons(parameter_len);
4810 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4811 chunk_len += parameter_len;
4813 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4814 /* Add NAT friendly parameter. */
4815 if (padding_len > 0) {
4816 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4817 chunk_len += padding_len;
4820 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4821 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4822 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4823 ph->param_length = htons(parameter_len);
4824 chunk_len += parameter_len;
4826 /* now any cookie time extensions */
4827 if (stcb->asoc.cookie_preserve_req) {
4828 struct sctp_cookie_perserve_param *cookie_preserve;
4830 if (padding_len > 0) {
4831 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4832 chunk_len += padding_len;
4835 parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
4836 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4837 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4838 cookie_preserve->ph.param_length = htons(parameter_len);
4839 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4840 stcb->asoc.cookie_preserve_req = 0;
4841 chunk_len += parameter_len;
4844 if (stcb->asoc.ecn_allowed == 1) {
4845 if (padding_len > 0) {
4846 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4847 chunk_len += padding_len;
4850 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4851 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4852 ph->param_type = htons(SCTP_ECN_CAPABLE);
4853 ph->param_length = htons(parameter_len);
4854 chunk_len += parameter_len;
4856 /* And now tell the peer we do support PR-SCTP. */
4857 if (padding_len > 0) {
4858 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4859 chunk_len += padding_len;
4862 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4863 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4864 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4865 ph->param_length = htons(parameter_len);
4866 chunk_len += parameter_len;
4868 /* And now tell the peer we do all the extensions */
4869 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4870 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4872 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4873 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4874 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4875 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4876 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4877 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4878 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4880 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4881 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4883 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4884 pr_supported->ph.param_length = htons(parameter_len);
4885 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4886 chunk_len += parameter_len;
4888 /* add authentication parameters */
4889 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4890 /* attach RANDOM parameter, if available */
4891 if (stcb->asoc.authinfo.random != NULL) {
4892 struct sctp_auth_random *randp;
4894 if (padding_len > 0) {
4895 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4896 chunk_len += padding_len;
4899 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4900 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4901 /* random key already contains the header */
4902 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4903 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4904 chunk_len += parameter_len;
4906 /* add HMAC_ALGO parameter */
4907 if ((stcb->asoc.local_hmacs != NULL) &&
4908 (stcb->asoc.local_hmacs->num_algo > 0)) {
4909 struct sctp_auth_hmac_algo *hmacs;
4911 if (padding_len > 0) {
4912 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4913 chunk_len += padding_len;
4916 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4917 parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
4918 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4919 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4920 hmacs->ph.param_length = htons(parameter_len);
4921 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
4922 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4923 chunk_len += parameter_len;
4925 /* add CHUNKS parameter */
4926 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
4927 struct sctp_auth_chunk_list *chunks;
4929 if (padding_len > 0) {
4930 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4931 chunk_len += padding_len;
4934 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4935 parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
4936 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4937 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4938 chunks->ph.param_length = htons(parameter_len);
4939 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4940 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4941 chunk_len += parameter_len;
4944 SCTP_BUF_LEN(m) = chunk_len;
4946 /* now the addresses */
4948 * To optimize this we could put the scoping stuff into a structure
4949 * and remove the individual uint8's from the assoc structure. Then
4950 * we could just sifa in the address within the stcb. But for now
4951 * this is a quick hack to get the address stuff teased apart.
4953 sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
4955 init->ch.chunk_length = htons(chunk_len);
4956 if (padding_len > 0) {
4957 struct mbuf *m_at, *mp_last;
4960 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4961 if (SCTP_BUF_NEXT(m_at) == NULL)
4964 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
4969 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4970 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4971 (struct sockaddr *)&net->ro._l_addr,
4972 m, 0, NULL, 0, 0, 0, 0,
4973 inp->sctp_lport, stcb->rport, htonl(0),
4977 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4978 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4979 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4983 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4984 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4987 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4988 * being equal to the beginning of the params i.e. (iphlen +
4989 * sizeof(struct sctp_init_msg) parse through the parameters to the
4990 * end of the mbuf verifying that all parameters are known.
4992 * For unknown parameters build and return a mbuf with
4993 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4994 * processing this chunk stop, and set *abort_processing to 1.
4996 * By having param_offset be pre-set to where parameters begin it is
4997 * hoped that this routine may be reused in the future by new
5000 struct sctp_paramhdr *phdr, params;
5002 struct mbuf *mat, *op_err;
5003 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
5004 int at, limit, pad_needed;
5005 uint16_t ptype, plen, padded_size;
5008 *abort_processing = 0;
5011 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5014 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5015 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5016 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5017 ptype = ntohs(phdr->param_type);
5018 plen = ntohs(phdr->param_length);
5019 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5020 /* wacked parameter */
5021 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5024 limit -= SCTP_SIZE32(plen);
5026 * All parameters for all chunks that we know/understand are
5027 * listed here. We process them other places and make
5028 * appropriate stop actions per the upper bits. However this
5029 * is the generic routine processor's can call to get back
5030 * an operr.. to either incorporate (init-ack) or send.
5032 padded_size = SCTP_SIZE32(plen);
5034 /* Param's with variable size */
5035 case SCTP_HEARTBEAT_INFO:
5036 case SCTP_STATE_COOKIE:
5037 case SCTP_UNRECOG_PARAM:
5038 case SCTP_ERROR_CAUSE_IND:
5042 /* Param's with variable size within a range */
5043 case SCTP_CHUNK_LIST:
5044 case SCTP_SUPPORTED_CHUNK_EXT:
5045 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5046 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5051 case SCTP_SUPPORTED_ADDRTYPE:
5052 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5053 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5059 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5060 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5065 case SCTP_SET_PRIM_ADDR:
5066 case SCTP_DEL_IP_ADDRESS:
5067 case SCTP_ADD_IP_ADDRESS:
5068 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5069 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5070 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5075 /* Param's with a fixed size */
5076 case SCTP_IPV4_ADDRESS:
5077 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5078 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5083 case SCTP_IPV6_ADDRESS:
5084 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5085 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5090 case SCTP_COOKIE_PRESERVE:
5091 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5092 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5097 case SCTP_HAS_NAT_SUPPORT:
5100 case SCTP_PRSCTP_SUPPORTED:
5102 if (padded_size != sizeof(struct sctp_paramhdr)) {
5103 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5108 case SCTP_ECN_CAPABLE:
5109 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
5110 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5115 case SCTP_ULP_ADAPTATION:
5116 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5117 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5122 case SCTP_SUCCESS_REPORT:
5123 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5124 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5129 case SCTP_HOSTNAME_ADDRESS:
5131 /* We can NOT handle HOST NAME addresses!! */
5134 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5135 *abort_processing = 1;
5136 if (op_err == NULL) {
5137 /* Ok need to try to get a mbuf */
5139 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5141 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5144 l_len += sizeof(struct sctp_paramhdr);
5145 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5147 SCTP_BUF_LEN(op_err) = 0;
5149 * pre-reserve space for ip
5150 * and sctp header and
5154 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5156 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5158 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5159 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5163 /* If we have space */
5164 struct sctp_paramhdr s;
5167 uint32_t cpthis = 0;
5169 pad_needed = 4 - (err_at % 4);
5170 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5171 err_at += pad_needed;
5173 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5174 s.param_length = htons(sizeof(s) + plen);
5175 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5176 err_at += sizeof(s);
5177 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5179 sctp_m_freem(op_err);
5181 * we are out of memory but
5182 * we still need to have a
5183 * look at what to do (the
5184 * system is in trouble
5189 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5196 * we do not recognize the parameter figure out what
5199 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5200 if ((ptype & 0x4000) == 0x4000) {
5201 /* Report bit is set?? */
5202 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5203 if (op_err == NULL) {
5206 /* Ok need to try to get an mbuf */
5208 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5210 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5213 l_len += sizeof(struct sctp_paramhdr);
5214 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5216 SCTP_BUF_LEN(op_err) = 0;
5218 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5220 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5222 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5223 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5227 /* If we have space */
5228 struct sctp_paramhdr s;
5231 uint32_t cpthis = 0;
5233 pad_needed = 4 - (err_at % 4);
5234 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5235 err_at += pad_needed;
5237 s.param_type = htons(SCTP_UNRECOG_PARAM);
5238 s.param_length = htons(sizeof(s) + plen);
5239 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5240 err_at += sizeof(s);
5241 if (plen > sizeof(tempbuf)) {
5242 plen = sizeof(tempbuf);
5244 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5246 sctp_m_freem(op_err);
5248 * we are out of memory but
5249 * we still need to have a
5250 * look at what to do (the
5251 * system is in trouble
5255 goto more_processing;
5257 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5262 if ((ptype & 0x8000) == 0x0000) {
5263 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5266 /* skip this chunk and continue processing */
5267 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5268 at += SCTP_SIZE32(plen);
5273 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5277 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5278 *abort_processing = 1;
5279 if ((op_err == NULL) && phdr) {
5283 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5285 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5287 l_len += (2 * sizeof(struct sctp_paramhdr));
5288 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5290 SCTP_BUF_LEN(op_err) = 0;
5292 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5294 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5296 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5297 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5300 if ((op_err) && phdr) {
5301 struct sctp_paramhdr s;
5304 uint32_t cpthis = 0;
5306 pad_needed = 4 - (err_at % 4);
5307 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5308 err_at += pad_needed;
5310 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5311 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5312 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5313 err_at += sizeof(s);
5314 /* Only copy back the p-hdr that caused the issue */
5315 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5321 sctp_are_there_new_addresses(struct sctp_association *asoc,
5322 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5325 * Given a INIT packet, look through the packet to verify that there
5326 * are NO new addresses. As we go through the parameters add reports
5327 * of any un-understood parameters that require an error. Also we
5328 * must return (1) to drop the packet if we see a un-understood
5329 * parameter that tells us to drop the chunk.
5331 struct sockaddr *sa_touse;
5332 struct sockaddr *sa;
5333 struct sctp_paramhdr *phdr, params;
5334 uint16_t ptype, plen;
5336 struct sctp_nets *net;
5339 struct sockaddr_in sin4, *sa4;
5343 struct sockaddr_in6 sin6, *sa6;
5348 memset(&sin4, 0, sizeof(sin4));
5349 sin4.sin_family = AF_INET;
5350 sin4.sin_len = sizeof(sin4);
5353 memset(&sin6, 0, sizeof(sin6));
5354 sin6.sin6_family = AF_INET6;
5355 sin6.sin6_len = sizeof(sin6);
5357 /* First what about the src address of the pkt ? */
5359 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5360 sa = (struct sockaddr *)&net->ro._l_addr;
5361 if (sa->sa_family == src->sa_family) {
5363 if (sa->sa_family == AF_INET) {
5364 struct sockaddr_in *src4;
5366 sa4 = (struct sockaddr_in *)sa;
5367 src4 = (struct sockaddr_in *)src;
5368 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5375 if (sa->sa_family == AF_INET6) {
5376 struct sockaddr_in6 *src6;
5378 sa6 = (struct sockaddr_in6 *)sa;
5379 src6 = (struct sockaddr_in6 *)src;
5380 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5389 /* New address added! no need to look futher. */
5392 /* Ok so far lets munge through the rest of the packet */
5393 offset += sizeof(struct sctp_init_chunk);
5394 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5397 ptype = ntohs(phdr->param_type);
5398 plen = ntohs(phdr->param_length);
5401 case SCTP_IPV4_ADDRESS:
5403 struct sctp_ipv4addr_param *p4, p4_buf;
5405 phdr = sctp_get_next_param(in_initpkt, offset,
5406 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5407 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5411 p4 = (struct sctp_ipv4addr_param *)phdr;
5412 sin4.sin_addr.s_addr = p4->addr;
5413 sa_touse = (struct sockaddr *)&sin4;
5418 case SCTP_IPV6_ADDRESS:
5420 struct sctp_ipv6addr_param *p6, p6_buf;
5422 phdr = sctp_get_next_param(in_initpkt, offset,
5423 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5424 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5428 p6 = (struct sctp_ipv6addr_param *)phdr;
5429 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5431 sa_touse = (struct sockaddr *)&sin6;
5440 /* ok, sa_touse points to one to check */
5442 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5443 sa = (struct sockaddr *)&net->ro._l_addr;
5444 if (sa->sa_family != sa_touse->sa_family) {
5448 if (sa->sa_family == AF_INET) {
5449 sa4 = (struct sockaddr_in *)sa;
5450 if (sa4->sin_addr.s_addr ==
5451 sin4.sin_addr.s_addr) {
5458 if (sa->sa_family == AF_INET6) {
5459 sa6 = (struct sockaddr_in6 *)sa;
5460 if (SCTP6_ARE_ADDR_EQUAL(
5469 /* New addr added! no need to look further */
5473 offset += SCTP_SIZE32(plen);
5474 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5480 * Given a MBUF chain that was sent into us containing an INIT. Build a
5481 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5482 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5483 * message (i.e. the struct sctp_init_msg).
5486 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5487 struct mbuf *init_pkt, int iphlen, int offset,
5488 struct sockaddr *src, struct sockaddr *dst,
5489 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5490 uint8_t use_mflowid, uint32_t mflowid,
5491 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5493 struct sctp_association *asoc;
5494 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5495 struct sctp_init_ack_chunk *initack;
5496 struct sctp_adaptation_layer_indication *ali;
5497 struct sctp_ecn_supported_param *ecn;
5498 struct sctp_prsctp_supported_param *prsctp;
5499 struct sctp_supported_chunk_types_param *pr_supported;
5500 union sctp_sockstore *over_addr;
5503 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5504 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5505 struct sockaddr_in *sin;
5509 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5510 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5511 struct sockaddr_in6 *sin6;
5514 struct sockaddr *to;
5515 struct sctp_state_cookie stc;
5516 struct sctp_nets *net = NULL;
5517 uint8_t *signature = NULL;
5518 int cnt_inits_to = 0;
5519 uint16_t his_limit, i_want;
5520 int abort_flag, padval;
5523 int nat_friendly = 0;
5532 if ((asoc != NULL) &&
5533 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5534 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5535 /* new addresses, out of here in non-cookie-wait states */
5537 * Send a ABORT, we don't add the new address error clause
5538 * though we even set the T bit and copy in the 0 tag.. this
5539 * looks no different than if no listener was present.
5541 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5543 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5544 use_mflowid, mflowid,
5549 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5550 (offset + sizeof(struct sctp_init_chunk)),
5551 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5554 if (op_err == NULL) {
5555 char msg[SCTP_DIAG_INFO_LEN];
5557 snprintf(msg, sizeof(msg), "%s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5558 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5561 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5562 init_chk->init.initiate_tag, op_err,
5563 use_mflowid, mflowid,
5567 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5569 /* No memory, INIT timer will re-attempt. */
5571 sctp_m_freem(op_err);
5574 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5577 * We might not overwrite the identification[] completely and on
5578 * some platforms time_entered will contain some padding. Therefore
5579 * zero out the cookie to avoid putting uninitialized memory on the
5582 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5584 /* the time I built cookie */
5585 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5587 /* populate any tie tags */
5589 /* unlock before tag selections */
5590 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5591 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5592 stc.cookie_life = asoc->cookie_life;
5593 net = asoc->primary_destination;
5595 stc.tie_tag_my_vtag = 0;
5596 stc.tie_tag_peer_vtag = 0;
5597 /* life I will award this cookie */
5598 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5601 /* copy in the ports for later check */
5602 stc.myport = sh->dest_port;
5603 stc.peerport = sh->src_port;
5606 * If we wanted to honor cookie life extentions, we would add to
5607 * stc.cookie_life. For now we should NOT honor any extension
5609 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5610 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5611 stc.ipv6_addr_legal = 1;
5612 if (SCTP_IPV6_V6ONLY(inp)) {
5613 stc.ipv4_addr_legal = 0;
5615 stc.ipv4_addr_legal = 1;
5618 stc.ipv6_addr_legal = 0;
5619 stc.ipv4_addr_legal = 1;
5621 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5628 switch (dst->sa_family) {
5632 /* lookup address */
5633 stc.address[0] = src4->sin_addr.s_addr;
5637 stc.addr_type = SCTP_IPV4_ADDRESS;
5638 /* local from address */
5639 stc.laddress[0] = dst4->sin_addr.s_addr;
5640 stc.laddress[1] = 0;
5641 stc.laddress[2] = 0;
5642 stc.laddress[3] = 0;
5643 stc.laddr_type = SCTP_IPV4_ADDRESS;
5644 /* scope_id is only for v6 */
5646 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5647 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
5652 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5653 /* Must use the address in this case */
5654 if (sctp_is_address_on_local_host(src, vrf_id)) {
5655 stc.loopback_scope = 1;
5658 stc.local_scope = 0;
5666 stc.addr_type = SCTP_IPV6_ADDRESS;
5667 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5668 stc.scope_id = in6_getscope(&src6->sin6_addr);
5669 if (sctp_is_address_on_local_host(src, vrf_id)) {
5670 stc.loopback_scope = 1;
5671 stc.local_scope = 0;
5674 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
5676 * If the new destination is a
5677 * LINK_LOCAL we must have common
5678 * both site and local scope. Don't
5679 * set local scope though since we
5680 * must depend on the source to be
5681 * added implicitly. We cannot
5682 * assure just because we share one
5683 * link that all links are common.
5685 stc.local_scope = 0;
5689 * we start counting for the private
5690 * address stuff at 1. since the
5691 * link local we source from won't
5692 * show up in our scoped count.
5696 * pull out the scope_id from
5699 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
5701 * If the new destination is
5702 * SITE_LOCAL then we must have site
5707 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5708 stc.laddr_type = SCTP_IPV6_ADDRESS;
5718 /* set the scope per the existing tcb */
5721 struct sctp_nets *lnet;
5725 stc.loopback_scope = asoc->scope.loopback_scope;
5726 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5727 stc.site_scope = asoc->scope.site_scope;
5728 stc.local_scope = asoc->scope.local_scope;
5730 /* Why do we not consider IPv4 LL addresses? */
5731 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5732 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5733 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5735 * if we have a LL address, start
5743 /* use the net pointer */
5744 to = (struct sockaddr *)&net->ro._l_addr;
5745 switch (to->sa_family) {
5748 sin = (struct sockaddr_in *)to;
5749 stc.address[0] = sin->sin_addr.s_addr;
5753 stc.addr_type = SCTP_IPV4_ADDRESS;
5754 if (net->src_addr_selected == 0) {
5756 * strange case here, the INIT should have
5757 * did the selection.
5759 net->ro._s_addr = sctp_source_address_selection(inp,
5760 stcb, (sctp_route_t *) & net->ro,
5762 if (net->ro._s_addr == NULL)
5765 net->src_addr_selected = 1;
5768 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5769 stc.laddress[1] = 0;
5770 stc.laddress[2] = 0;
5771 stc.laddress[3] = 0;
5772 stc.laddr_type = SCTP_IPV4_ADDRESS;
5773 /* scope_id is only for v6 */
5779 sin6 = (struct sockaddr_in6 *)to;
5780 memcpy(&stc.address, &sin6->sin6_addr,
5781 sizeof(struct in6_addr));
5782 stc.addr_type = SCTP_IPV6_ADDRESS;
5783 stc.scope_id = sin6->sin6_scope_id;
5784 if (net->src_addr_selected == 0) {
5786 * strange case here, the INIT should have
5787 * done the selection.
5789 net->ro._s_addr = sctp_source_address_selection(inp,
5790 stcb, (sctp_route_t *) & net->ro,
5792 if (net->ro._s_addr == NULL)
5795 net->src_addr_selected = 1;
5797 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5798 sizeof(struct in6_addr));
5799 stc.laddr_type = SCTP_IPV6_ADDRESS;
5804 /* Now lets put the SCTP header in place */
5805 initack = mtod(m, struct sctp_init_ack_chunk *);
5806 /* Save it off for quick ref */
5807 stc.peers_vtag = init_chk->init.initiate_tag;
5809 memcpy(stc.identification, SCTP_VERSION_STRING,
5810 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5811 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5812 /* now the chunk header */
5813 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5814 initack->ch.chunk_flags = 0;
5815 /* fill in later from mbuf we build */
5816 initack->ch.chunk_length = 0;
5817 /* place in my tag */
5818 if ((asoc != NULL) &&
5819 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5820 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5821 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5822 /* re-use the v-tags and init-seq here */
5823 initack->init.initiate_tag = htonl(asoc->my_vtag);
5824 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5826 uint32_t vtag, itsn;
5828 if (hold_inp_lock) {
5829 SCTP_INP_INCR_REF(inp);
5830 SCTP_INP_RUNLOCK(inp);
5833 atomic_add_int(&asoc->refcnt, 1);
5834 SCTP_TCB_UNLOCK(stcb);
5836 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5837 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5839 * Got a duplicate vtag on some guy behind a
5840 * nat make sure we don't use it.
5844 initack->init.initiate_tag = htonl(vtag);
5845 /* get a TSN to use too */
5846 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5847 initack->init.initial_tsn = htonl(itsn);
5848 SCTP_TCB_LOCK(stcb);
5849 atomic_add_int(&asoc->refcnt, -1);
5851 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5852 initack->init.initiate_tag = htonl(vtag);
5853 /* get a TSN to use too */
5854 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5856 if (hold_inp_lock) {
5857 SCTP_INP_RLOCK(inp);
5858 SCTP_INP_DECR_REF(inp);
5861 /* save away my tag to */
5862 stc.my_vtag = initack->init.initiate_tag;
5864 /* set up some of the credits. */
5865 so = inp->sctp_socket;
5867 /* memory problem */
5871 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5873 /* set what I want */
5874 his_limit = ntohs(init_chk->init.num_inbound_streams);
5875 /* choose what I want */
5877 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5878 i_want = asoc->streamoutcnt;
5880 i_want = inp->sctp_ep.pre_open_stream_count;
5883 i_want = inp->sctp_ep.pre_open_stream_count;
5885 if (his_limit < i_want) {
5886 /* I Want more :< */
5887 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5889 /* I can have what I want :> */
5890 initack->init.num_outbound_streams = htons(i_want);
5892 /* tell him his limit. */
5893 initack->init.num_inbound_streams =
5894 htons(inp->sctp_ep.max_open_streams_intome);
5896 /* adaptation layer indication parameter */
5897 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5898 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5899 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5900 ali->ph.param_length = htons(sizeof(*ali));
5901 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5902 SCTP_BUF_LEN(m) += sizeof(*ali);
5903 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5905 ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
5909 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5910 (inp->sctp_ecn_enable == 1)) {
5911 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5912 ecn->ph.param_length = htons(sizeof(*ecn));
5913 SCTP_BUF_LEN(m) += sizeof(*ecn);
5915 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5918 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5920 /* And now tell the peer we do pr-sctp */
5921 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5922 prsctp->ph.param_length = htons(sizeof(*prsctp));
5923 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5925 /* Add NAT friendly parameter */
5926 struct sctp_paramhdr *ph;
5928 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5929 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5930 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5931 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5933 /* And now tell the peer we do all the extensions */
5934 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5935 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5937 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5938 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5939 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5940 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5941 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5942 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5943 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5944 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5945 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5946 p_len = sizeof(*pr_supported) + num_ext;
5947 pr_supported->ph.param_length = htons(p_len);
5948 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5949 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5951 /* add authentication parameters */
5952 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5953 struct sctp_auth_random *randp;
5954 struct sctp_auth_hmac_algo *hmacs;
5955 struct sctp_auth_chunk_list *chunks;
5956 uint16_t random_len;
5958 /* generate and add RANDOM parameter */
5959 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5960 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5961 randp->ph.param_type = htons(SCTP_RANDOM);
5962 p_len = sizeof(*randp) + random_len;
5963 randp->ph.param_length = htons(p_len);
5964 SCTP_READ_RANDOM(randp->random_data, random_len);
5965 /* zero out any padding required */
5966 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5967 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5969 /* add HMAC_ALGO parameter */
5970 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5971 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5972 (uint8_t *) hmacs->hmac_ids);
5974 p_len += sizeof(*hmacs);
5975 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5976 hmacs->ph.param_length = htons(p_len);
5977 /* zero out any padding required */
5978 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5979 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5981 /* add CHUNKS parameter */
5982 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5983 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5984 chunks->chunk_types);
5986 p_len += sizeof(*chunks);
5987 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5988 chunks->ph.param_length = htons(p_len);
5989 /* zero out any padding required */
5990 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5991 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5995 /* now the addresses */
5997 struct sctp_scoping scp;
6000 * To optimize this we could put the scoping stuff into a
6001 * structure and remove the individual uint8's from the stc
6002 * structure. Then we could just sifa in the address within
6003 * the stc.. but for now this is a quick hack to get the
6004 * address stuff teased apart.
6006 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6007 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6008 scp.loopback_scope = stc.loopback_scope;
6009 scp.ipv4_local_scope = stc.ipv4_scope;
6010 scp.local_scope = stc.local_scope;
6011 scp.site_scope = stc.site_scope;
6012 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
6015 /* tack on the operational error if present */
6024 llen += SCTP_BUF_LEN(ol);
6025 ol = SCTP_BUF_NEXT(ol);
6028 /* must add a pad to the param */
6029 uint32_t cpthis = 0;
6032 padlen = 4 - (llen % 4);
6033 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
6035 while (SCTP_BUF_NEXT(m_at) != NULL) {
6036 m_at = SCTP_BUF_NEXT(m_at);
6038 SCTP_BUF_NEXT(m_at) = op_err;
6039 while (SCTP_BUF_NEXT(m_at) != NULL) {
6040 m_at = SCTP_BUF_NEXT(m_at);
6043 /* pre-calulate the size and update pkt header and chunk header */
6045 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6046 p_len += SCTP_BUF_LEN(m_tmp);
6047 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6048 /* m_tmp should now point to last one */
6053 /* Now we must build a cookie */
6054 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6055 if (m_cookie == NULL) {
6056 /* memory problem */
6060 /* Now append the cookie to the end and update the space/size */
6061 SCTP_BUF_NEXT(m_tmp) = m_cookie;
6063 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6064 p_len += SCTP_BUF_LEN(m_tmp);
6065 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6066 /* m_tmp should now point to last one */
6072 * Place in the size, but we don't include the last pad (if any) in
6075 initack->ch.chunk_length = htons(p_len);
6078 * Time to sign the cookie, we don't sign over the cookie signature
6079 * though thus we set trailer.
6081 (void)sctp_hmac_m(SCTP_HMAC,
6082 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6083 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6084 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6086 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6087 * here since the timer will drive a retranmission.
6090 if ((padval) && (mp_last)) {
6091 /* see my previous comments on mp_last */
6092 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
6093 /* Houston we have a problem, no space */
6098 if (stc.loopback_scope) {
6099 over_addr = (union sctp_sockstore *)dst;
6104 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6106 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6108 use_mflowid, mflowid,
6109 SCTP_SO_NOT_LOCKED);
6110 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6115 sctp_prune_prsctp(struct sctp_tcb *stcb,
6116 struct sctp_association *asoc,
6117 struct sctp_sndrcvinfo *srcv,
6121 struct sctp_tmit_chunk *chk, *nchk;
6123 SCTP_TCB_LOCK_ASSERT(stcb);
6124 if ((asoc->peer_supports_prsctp) &&
6125 (asoc->sent_queue_cnt_removeable > 0)) {
6126 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6128 * Look for chunks marked with the PR_SCTP flag AND
6129 * the buffer space flag. If the one being sent is
6130 * equal or greater priority then purge the old one
6131 * and free some space.
6133 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6135 * This one is PR-SCTP AND buffer space
6138 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6140 * Lower numbers equates to higher
6141 * priority so if the one we are
6142 * looking at has a larger or equal
6143 * priority we want to drop the data
6144 * and NOT retransmit it.
6148 * We release the book_size
6149 * if the mbuf is here
6154 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6158 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6161 freed_spc += ret_spc;
6162 if (freed_spc >= dataout) {
6165 } /* if chunk was present */
6166 } /* if of sufficent priority */
6167 } /* if chunk has enabled */
6168 } /* tailqforeach */
6170 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6171 /* Here we must move to the sent queue and mark */
6172 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6173 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6176 * We release the book_size
6177 * if the mbuf is here
6181 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6184 freed_spc += ret_spc;
6185 if (freed_spc >= dataout) {
6188 } /* end if chk->data */
6189 } /* end if right class */
6190 } /* end if chk pr-sctp */
6191 } /* tailqforeachsafe (chk) */
6192 } /* if enabled in asoc */
6196 sctp_get_frag_point(struct sctp_tcb *stcb,
6197 struct sctp_association *asoc)
6202 * For endpoints that have both v6 and v4 addresses we must reserve
6203 * room for the ipv6 header, for those that are only dealing with V4
6204 * we use a larger frag point.
6206 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6207 ovh = SCTP_MED_OVERHEAD;
6209 ovh = SCTP_MED_V4_OVERHEAD;
6212 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6213 siz = asoc->smallest_mtu - ovh;
6215 siz = (stcb->asoc.sctp_frag_point - ovh);
6217 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6219 /* A data chunk MUST fit in a cluster */
6220 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6223 /* adjust for an AUTH chunk if DATA requires auth */
6224 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6225 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6228 /* make it an even word boundary please */
6235 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6238 * We assume that the user wants PR_SCTP_TTL if the user provides a
6239 * positive lifetime but does not specify any PR_SCTP policy.
6241 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6242 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6243 } else if (sp->timetolive > 0) {
6244 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6245 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6249 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6250 case CHUNK_FLAGS_PR_SCTP_BUF:
6252 * Time to live is a priority stored in tv_sec when doing
6253 * the buffer drop thing.
6255 sp->ts.tv_sec = sp->timetolive;
6258 case CHUNK_FLAGS_PR_SCTP_TTL:
6262 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6263 tv.tv_sec = sp->timetolive / 1000;
6264 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6266 * TODO sctp_constants.h needs alternative time
6267 * macros when _KERNEL is undefined.
6269 timevaladd(&sp->ts, &tv);
6272 case CHUNK_FLAGS_PR_SCTP_RTX:
6274 * Time to live is a the number or retransmissions stored in
6277 sp->ts.tv_sec = sp->timetolive;
6281 SCTPDBG(SCTP_DEBUG_USRREQ1,
6282 "Unknown PR_SCTP policy %u.\n",
6283 PR_SCTP_POLICY(sp->sinfo_flags));
6289 sctp_msg_append(struct sctp_tcb *stcb,
6290 struct sctp_nets *net,
6292 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6296 struct sctp_stream_queue_pending *sp = NULL;
6297 struct sctp_stream_out *strm;
6300 * Given an mbuf chain, put it into the association send queue and
6301 * place it on the wheel
6303 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6304 /* Invalid stream number */
6305 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6309 if ((stcb->asoc.stream_locked) &&
6310 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6311 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6315 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6316 /* Now can we send this? */
6317 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6318 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6319 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6320 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6321 /* got data while shutting down */
6322 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6326 sctp_alloc_a_strmoq(stcb, sp);
6328 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6332 sp->sinfo_flags = srcv->sinfo_flags;
6333 sp->timetolive = srcv->sinfo_timetolive;
6334 sp->ppid = srcv->sinfo_ppid;
6335 sp->context = srcv->sinfo_context;
6336 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6338 atomic_add_int(&sp->net->ref_count, 1);
6342 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6343 sp->stream = srcv->sinfo_stream;
6344 sp->msg_is_complete = 1;
6345 sp->sender_all_done = 1;
6348 sp->tail_mbuf = NULL;
6349 sctp_set_prsctp_policy(sp);
6351 * We could in theory (for sendall) sifa the length in, but we would
6352 * still have to hunt through the chain since we need to setup the
6356 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6357 if (SCTP_BUF_NEXT(at) == NULL)
6359 sp->length += SCTP_BUF_LEN(at);
6361 if (srcv->sinfo_keynumber_valid) {
6362 sp->auth_keyid = srcv->sinfo_keynumber;
6364 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6366 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6367 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6368 sp->holds_key_ref = 1;
6370 if (hold_stcb_lock == 0) {
6371 SCTP_TCB_SEND_LOCK(stcb);
6373 sctp_snd_sb_alloc(stcb, sp->length);
6374 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6375 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6376 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6378 if (hold_stcb_lock == 0) {
6379 SCTP_TCB_SEND_UNLOCK(stcb);
6389 static struct mbuf *
6390 sctp_copy_mbufchain(struct mbuf *clonechain,
6391 struct mbuf *outchain,
6392 struct mbuf **endofchain,
6395 uint8_t copy_by_ref)
6398 struct mbuf *appendchain;
6402 if (endofchain == NULL) {
6406 sctp_m_freem(outchain);
6409 if (can_take_mbuf) {
6410 appendchain = clonechain;
6413 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6415 /* Its not in a cluster */
6416 if (*endofchain == NULL) {
6417 /* lets get a mbuf cluster */
6418 if (outchain == NULL) {
6419 /* This is the general case */
6421 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6422 if (outchain == NULL) {
6425 SCTP_BUF_LEN(outchain) = 0;
6426 *endofchain = outchain;
6427 /* get the prepend space */
6428 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6431 * We really should not get a NULL
6437 if (SCTP_BUF_NEXT(m) == NULL) {
6441 m = SCTP_BUF_NEXT(m);
6444 if (*endofchain == NULL) {
6446 * huh, TSNH XXX maybe we
6449 sctp_m_freem(outchain);
6453 /* get the new end of length */
6454 len = M_TRAILINGSPACE(*endofchain);
6456 /* how much is left at the end? */
6457 len = M_TRAILINGSPACE(*endofchain);
6459 /* Find the end of the data, for appending */
6460 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6462 /* Now lets copy it out */
6463 if (len >= sizeofcpy) {
6464 /* It all fits, copy it in */
6465 m_copydata(clonechain, 0, sizeofcpy, cp);
6466 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6468 /* fill up the end of the chain */
6470 m_copydata(clonechain, 0, len, cp);
6471 SCTP_BUF_LEN((*endofchain)) += len;
6472 /* now we need another one */
6475 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6480 SCTP_BUF_NEXT((*endofchain)) = m;
6482 cp = mtod((*endofchain), caddr_t);
6483 m_copydata(clonechain, len, sizeofcpy, cp);
6484 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6488 /* copy the old fashion way */
6489 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6490 #ifdef SCTP_MBUF_LOGGING
6491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6494 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6495 if (SCTP_BUF_IS_EXTENDED(mat)) {
6496 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6503 if (appendchain == NULL) {
6506 sctp_m_freem(outchain);
6510 /* tack on to the end */
6511 if (*endofchain != NULL) {
6512 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6516 if (SCTP_BUF_NEXT(m) == NULL) {
6517 SCTP_BUF_NEXT(m) = appendchain;
6520 m = SCTP_BUF_NEXT(m);
6524 * save off the end and update the end-chain postion
6528 if (SCTP_BUF_NEXT(m) == NULL) {
6532 m = SCTP_BUF_NEXT(m);
6536 /* save off the end and update the end-chain postion */
6539 if (SCTP_BUF_NEXT(m) == NULL) {
6543 m = SCTP_BUF_NEXT(m);
6545 return (appendchain);
6550 sctp_med_chunk_output(struct sctp_inpcb *inp,
6551 struct sctp_tcb *stcb,
6552 struct sctp_association *asoc,
6555 int control_only, int from_where,
6556 struct timeval *now, int *now_filled, int frag_point, int so_locked
6557 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6563 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6564 uint32_t val SCTP_UNUSED)
6566 struct sctp_copy_all *ca;
6569 int added_control = 0;
6570 int un_sent, do_chunk_output = 1;
6571 struct sctp_association *asoc;
6572 struct sctp_nets *net;
6574 ca = (struct sctp_copy_all *)ptr;
6575 if (ca->m == NULL) {
6578 if (ca->inp != inp) {
6582 if (ca->sndlen > 0) {
6583 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6585 /* can't copy so we are done */
6589 #ifdef SCTP_MBUF_LOGGING
6590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6593 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6594 if (SCTP_BUF_IS_EXTENDED(mat)) {
6595 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6603 SCTP_TCB_LOCK_ASSERT(stcb);
6604 if (stcb->asoc.alternate) {
6605 net = stcb->asoc.alternate;
6607 net = stcb->asoc.primary_destination;
6609 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6610 /* Abort this assoc with m as the user defined reason */
6612 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6614 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6615 0, M_NOWAIT, 1, MT_DATA);
6616 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6619 struct sctp_paramhdr *ph;
6621 ph = mtod(m, struct sctp_paramhdr *);
6622 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6623 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
6626 * We add one here to keep the assoc from dis-appearing on
6629 atomic_add_int(&stcb->asoc.refcnt, 1);
6630 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6632 * sctp_abort_an_association calls sctp_free_asoc() free
6633 * association will NOT free it since we incremented the
6634 * refcnt .. we do this to prevent it being freed and things
6635 * getting tricky since we could end up (from free_asoc)
6636 * calling inpcb_free which would get a recursive lock call
6637 * to the iterator lock.. But as a consequence of that the
6638 * stcb will return to us un-locked.. since free_asoc
6639 * returns with either no TCB or the TCB unlocked, we must
6640 * relock.. to unlock in the iterator timer :-0
6642 SCTP_TCB_LOCK(stcb);
6643 atomic_add_int(&stcb->asoc.refcnt, -1);
6644 goto no_chunk_output;
6647 ret = sctp_msg_append(stcb, net, m,
6651 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6652 /* shutdown this assoc */
6655 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6657 if (TAILQ_EMPTY(&asoc->send_queue) &&
6658 TAILQ_EMPTY(&asoc->sent_queue) &&
6660 if (asoc->locked_on_sending) {
6664 * there is nothing queued to send, so I'm
6667 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6668 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6669 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6671 * only send SHUTDOWN the first time
6674 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6675 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6677 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6678 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6679 sctp_stop_timers_for_shutdown(stcb);
6680 sctp_send_shutdown(stcb, net);
6681 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6683 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6684 asoc->primary_destination);
6686 do_chunk_output = 0;
6690 * we still got (or just got) data to send,
6691 * so set SHUTDOWN_PENDING
6694 * XXX sockets draft says that SCTP_EOF
6695 * should be sent with no data. currently,
6696 * we will allow user data to be sent first
6697 * and move to SHUTDOWN-PENDING
6699 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6700 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6701 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6702 if (asoc->locked_on_sending) {
6704 * Locked to send out the
6707 struct sctp_stream_queue_pending *sp;
6709 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6711 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6712 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6715 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6716 if (TAILQ_EMPTY(&asoc->send_queue) &&
6717 TAILQ_EMPTY(&asoc->sent_queue) &&
6718 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6720 atomic_add_int(&stcb->asoc.refcnt, 1);
6721 sctp_abort_an_association(stcb->sctp_ep, stcb,
6722 NULL, SCTP_SO_NOT_LOCKED);
6723 atomic_add_int(&stcb->asoc.refcnt, -1);
6724 goto no_chunk_output;
6726 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6727 asoc->primary_destination);
6733 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6734 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6736 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6737 (stcb->asoc.total_flight > 0) &&
6738 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6739 do_chunk_output = 0;
6741 if (do_chunk_output)
6742 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6743 else if (added_control) {
6744 int num_out = 0, reason = 0, now_filled = 0;
6748 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6749 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6750 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6761 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6763 struct sctp_copy_all *ca;
6765 ca = (struct sctp_copy_all *)ptr;
6767 * Do a notify here? Kacheong suggests that the notify be done at
6768 * the send time.. so you would push up a notification if any send
6769 * failed. Don't know if this is feasable since the only failures we
6770 * have is "memory" related and if you cannot get an mbuf to send
6771 * the data you surely can't get an mbuf to send up to notify the
6772 * user you can't send the data :->
6775 /* now free everything */
6776 sctp_m_freem(ca->m);
6777 SCTP_FREE(ca, SCTP_M_COPYAL);
6781 #define MC_ALIGN(m, len) do { \
6782 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6787 static struct mbuf *
6788 sctp_copy_out_all(struct uio *uio, int len)
6790 struct mbuf *ret, *at;
6791 int left, willcpy, cancpy, error;
6793 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6799 SCTP_BUF_LEN(ret) = 0;
6800 /* save space for the data chunk header */
6801 cancpy = M_TRAILINGSPACE(ret);
6802 willcpy = min(cancpy, left);
6805 /* Align data to the end */
6806 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6812 SCTP_BUF_LEN(at) = willcpy;
6813 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6816 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6817 if (SCTP_BUF_NEXT(at) == NULL) {
6820 at = SCTP_BUF_NEXT(at);
6821 SCTP_BUF_LEN(at) = 0;
6822 cancpy = M_TRAILINGSPACE(at);
6823 willcpy = min(cancpy, left);
6830 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6831 struct sctp_sndrcvinfo *srcv)
6834 struct sctp_copy_all *ca;
6836 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6840 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6843 memset(ca, 0, sizeof(struct sctp_copy_all));
6847 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6850 * take off the sendall flag, it would be bad if we failed to do
6853 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6854 /* get length and mbuf chain */
6856 ca->sndlen = uio->uio_resid;
6857 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6858 if (ca->m == NULL) {
6859 SCTP_FREE(ca, SCTP_M_COPYAL);
6860 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6864 /* Gather the length of the send */
6868 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6869 ca->sndlen += SCTP_BUF_LEN(mat);
6872 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6873 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6874 SCTP_ASOC_ANY_STATE,
6876 sctp_sendall_completes, inp, 1);
6878 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6879 SCTP_FREE(ca, SCTP_M_COPYAL);
6880 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6888 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6890 struct sctp_tmit_chunk *chk, *nchk;
6892 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6893 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6894 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6896 sctp_m_freem(chk->data);
6899 asoc->ctrl_queue_cnt--;
6900 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6906 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6908 struct sctp_association *asoc;
6909 struct sctp_tmit_chunk *chk, *nchk;
6910 struct sctp_asconf_chunk *acp;
6913 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6914 /* find SCTP_ASCONF chunk in queue */
6915 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6917 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6918 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6923 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6925 sctp_m_freem(chk->data);
6928 asoc->ctrl_queue_cnt--;
6929 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6936 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6937 struct sctp_association *asoc,
6938 struct sctp_tmit_chunk **data_list,
6940 struct sctp_nets *net)
6943 struct sctp_tmit_chunk *tp1;
6945 for (i = 0; i < bundle_at; i++) {
6946 /* off of the send queue */
6947 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6948 asoc->send_queue_cnt--;
6951 * Any chunk NOT 0 you zap the time chunk 0 gets
6952 * zapped or set based on if a RTO measurment is
6955 data_list[i]->do_rtt = 0;
6958 data_list[i]->sent_rcv_time = net->last_sent_time;
6959 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6960 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6961 if (data_list[i]->whoTo == NULL) {
6962 data_list[i]->whoTo = net;
6963 atomic_add_int(&net->ref_count, 1);
6965 /* on to the sent queue */
6966 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6967 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6968 struct sctp_tmit_chunk *tpp;
6970 /* need to move back */
6972 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6974 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6978 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6981 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6983 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6988 /* This does not lower until the cum-ack passes it */
6989 asoc->sent_queue_cnt++;
6990 if ((asoc->peers_rwnd <= 0) &&
6991 (asoc->total_flight == 0) &&
6993 /* Mark the chunk as being a window probe */
6994 SCTP_STAT_INCR(sctps_windowprobed);
6996 #ifdef SCTP_AUDITING_ENABLED
6997 sctp_audit_log(0xC2, 3);
6999 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7000 data_list[i]->snd_count = 1;
7001 data_list[i]->rec.data.chunk_was_revoked = 0;
7002 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7003 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7004 data_list[i]->whoTo->flight_size,
7005 data_list[i]->book_size,
7006 (uintptr_t) data_list[i]->whoTo,
7007 data_list[i]->rec.data.TSN_seq);
7009 sctp_flight_size_increase(data_list[i]);
7010 sctp_total_flight_increase(stcb, data_list[i]);
7011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7012 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7013 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7015 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7016 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7017 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7018 /* SWS sender side engages */
7019 asoc->peers_rwnd = 0;
7022 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7023 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7028 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7029 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7034 struct sctp_tmit_chunk *chk, *nchk;
7036 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7037 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7038 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7039 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7040 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7041 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7042 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7043 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7044 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7045 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7046 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7047 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7048 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7049 /* Stray chunks must be cleaned up */
7051 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7053 sctp_m_freem(chk->data);
7056 asoc->ctrl_queue_cnt--;
7057 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7058 asoc->fwd_tsn_cnt--;
7059 sctp_free_a_chunk(stcb, chk, so_locked);
7060 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7061 /* special handling, we must look into the param */
7062 if (chk != asoc->str_reset) {
7063 goto clean_up_anyway;
7071 sctp_can_we_split_this(struct sctp_tcb *stcb,
7073 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7076 * Make a decision on if I should split a msg into multiple parts.
7077 * This is only asked of incomplete messages.
7081 * If we are doing EEOR we need to always send it if its the
7082 * entire thing, since it might be all the guy is putting in
7085 if (goal_mtu >= length) {
7087 * If we have data outstanding,
7088 * we get another chance when the sack
7089 * arrives to transmit - wait for more data
7091 if (stcb->asoc.total_flight == 0) {
7093 * If nothing is in flight, we zero the
7101 /* You can fill the rest */
7106 * For those strange folk that make the send buffer
7107 * smaller than our fragmentation point, we can't
7108 * get a full msg in so we have to allow splitting.
7110 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7113 if ((length <= goal_mtu) ||
7114 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7115 /* Sub-optimial residual don't split in non-eeor mode. */
7119 * If we reach here length is larger than the goal_mtu. Do we wish
7120 * to split it for the sake of packet putting together?
7122 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7123 /* Its ok to split it */
7124 return (min(goal_mtu, frag_point));
7126 /* Nope, can't split */
7132 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7133 struct sctp_stream_out *strq,
7135 uint32_t frag_point,
7141 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7146 /* Move from the stream to the send_queue keeping track of the total */
7147 struct sctp_association *asoc;
7148 struct sctp_stream_queue_pending *sp;
7149 struct sctp_tmit_chunk *chk;
7150 struct sctp_data_chunk *dchkh;
7151 uint32_t to_move, length;
7152 uint8_t rcv_flags = 0;
7154 uint8_t send_lock_up = 0;
7156 SCTP_TCB_LOCK_ASSERT(stcb);
7159 /* sa_ignore FREED_MEMORY */
7160 sp = TAILQ_FIRST(&strq->outqueue);
7163 if (send_lock_up == 0) {
7164 SCTP_TCB_SEND_LOCK(stcb);
7167 sp = TAILQ_FIRST(&strq->outqueue);
7171 if (strq->last_msg_incomplete) {
7172 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7174 strq->last_msg_incomplete);
7175 strq->last_msg_incomplete = 0;
7179 SCTP_TCB_SEND_UNLOCK(stcb);
7184 if ((sp->msg_is_complete) && (sp->length == 0)) {
7185 if (sp->sender_all_done) {
7187 * We are doing differed cleanup. Last time through
7188 * when we took all the data the sender_all_done was
7191 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7192 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7193 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7194 sp->sender_all_done,
7196 sp->msg_is_complete,
7200 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7201 SCTP_TCB_SEND_LOCK(stcb);
7204 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7205 TAILQ_REMOVE(&strq->outqueue, sp, next);
7206 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7208 sctp_free_remote_addr(sp->net);
7212 sctp_m_freem(sp->data);
7215 sctp_free_a_strmoq(stcb, sp, so_locked);
7216 /* we can't be locked to it */
7218 stcb->asoc.locked_on_sending = NULL;
7220 SCTP_TCB_SEND_UNLOCK(stcb);
7223 /* back to get the next msg */
7227 * sender just finished this but still holds a
7236 /* is there some to get */
7237 if (sp->length == 0) {
7243 } else if (sp->discard_rest) {
7244 if (send_lock_up == 0) {
7245 SCTP_TCB_SEND_LOCK(stcb);
7248 /* Whack down the size */
7249 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7250 if ((stcb->sctp_socket != NULL) && \
7251 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7252 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7253 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7256 sctp_m_freem(sp->data);
7258 sp->tail_mbuf = NULL;
7268 some_taken = sp->some_taken;
7269 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7270 sp->msg_is_complete = 1;
7273 length = sp->length;
7274 if (sp->msg_is_complete) {
7275 /* The message is complete */
7276 to_move = min(length, frag_point);
7277 if (to_move == length) {
7278 /* All of it fits in the MTU */
7279 if (sp->some_taken) {
7280 rcv_flags |= SCTP_DATA_LAST_FRAG;
7281 sp->put_last_out = 1;
7283 rcv_flags |= SCTP_DATA_NOT_FRAG;
7284 sp->put_last_out = 1;
7287 /* Not all of it fits, we fragment */
7288 if (sp->some_taken == 0) {
7289 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7294 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7297 * We use a snapshot of length in case it
7298 * is expanding during the compare.
7303 if (to_move >= llen) {
7305 if (send_lock_up == 0) {
7307 * We are taking all of an incomplete msg
7308 * thus we need a send lock.
7310 SCTP_TCB_SEND_LOCK(stcb);
7312 if (sp->msg_is_complete) {
7314 * the sender finished the
7321 if (sp->some_taken == 0) {
7322 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7326 /* Nothing to take. */
7327 if (sp->some_taken) {
7336 /* If we reach here, we can copy out a chunk */
7337 sctp_alloc_a_chunk(stcb, chk);
7339 /* No chunk memory */
7345 * Setup for unordered if needed by looking at the user sent info
7348 if (sp->sinfo_flags & SCTP_UNORDERED) {
7349 rcv_flags |= SCTP_DATA_UNORDERED;
7351 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7352 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7353 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7355 /* clear out the chunk before setting up */
7356 memset(chk, 0, sizeof(*chk));
7357 chk->rec.data.rcv_flags = rcv_flags;
7359 if (to_move >= length) {
7360 /* we think we can steal the whole thing */
7361 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7362 SCTP_TCB_SEND_LOCK(stcb);
7365 if (to_move < sp->length) {
7366 /* bail, it changed */
7369 chk->data = sp->data;
7370 chk->last_mbuf = sp->tail_mbuf;
7371 /* register the stealing */
7372 sp->data = sp->tail_mbuf = NULL;
7377 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
7378 chk->last_mbuf = NULL;
7379 if (chk->data == NULL) {
7380 sp->some_taken = some_taken;
7381 sctp_free_a_chunk(stcb, chk, so_locked);
7386 #ifdef SCTP_MBUF_LOGGING
7387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7390 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7391 if (SCTP_BUF_IS_EXTENDED(mat)) {
7392 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7397 /* Pull off the data */
7398 m_adj(sp->data, to_move);
7399 /* Now lets work our way down and compact it */
7401 while (m && (SCTP_BUF_LEN(m) == 0)) {
7402 sp->data = SCTP_BUF_NEXT(m);
7403 SCTP_BUF_NEXT(m) = NULL;
7404 if (sp->tail_mbuf == m) {
7406 * Freeing tail? TSNH since
7407 * we supposedly were taking less
7408 * than the sp->length.
7411 panic("Huh, freing tail? - TSNH");
7413 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7414 sp->tail_mbuf = sp->data = NULL;
7423 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7424 chk->copy_by_ref = 1;
7426 chk->copy_by_ref = 0;
7429 * get last_mbuf and counts of mb useage This is ugly but hopefully
7430 * its only one mbuf.
7432 if (chk->last_mbuf == NULL) {
7433 chk->last_mbuf = chk->data;
7434 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7435 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7438 if (to_move > length) {
7439 /*- This should not happen either
7440 * since we always lower to_move to the size
7441 * of sp->length if its larger.
7444 panic("Huh, how can to_move be larger?");
7446 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7450 atomic_subtract_int(&sp->length, to_move);
7452 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7453 /* Not enough room for a chunk header, get some */
7456 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7459 * we're in trouble here. _PREPEND below will free
7460 * all the data if there is no leading space, so we
7461 * must put the data back and restore.
7463 if (send_lock_up == 0) {
7464 SCTP_TCB_SEND_LOCK(stcb);
7467 if (chk->data == NULL) {
7468 /* unsteal the data */
7469 sp->data = chk->data;
7470 sp->tail_mbuf = chk->last_mbuf;
7474 /* reassemble the data */
7476 sp->data = chk->data;
7477 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7479 sp->some_taken = some_taken;
7480 atomic_add_int(&sp->length, to_move);
7483 sctp_free_a_chunk(stcb, chk, so_locked);
7487 SCTP_BUF_LEN(m) = 0;
7488 SCTP_BUF_NEXT(m) = chk->data;
7490 M_ALIGN(chk->data, 4);
7493 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7494 if (chk->data == NULL) {
7495 /* HELP, TSNH since we assured it would not above? */
7497 panic("prepend failes HELP?");
7499 SCTP_PRINTF("prepend fails HELP?\n");
7500 sctp_free_a_chunk(stcb, chk, so_locked);
7506 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7507 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7508 chk->book_size_scale = 0;
7509 chk->sent = SCTP_DATAGRAM_UNSENT;
7512 chk->asoc = &stcb->asoc;
7513 chk->pad_inplace = 0;
7514 chk->no_fr_allowed = 0;
7515 chk->rec.data.stream_seq = strq->next_sequence_send;
7516 if ((rcv_flags & SCTP_DATA_LAST_FRAG) &&
7517 !(rcv_flags & SCTP_DATA_UNORDERED)) {
7518 strq->next_sequence_send++;
7520 chk->rec.data.stream_number = sp->stream;
7521 chk->rec.data.payloadtype = sp->ppid;
7522 chk->rec.data.context = sp->context;
7523 chk->rec.data.doing_fast_retransmit = 0;
7525 chk->rec.data.timetodrop = sp->ts;
7526 chk->flags = sp->act_flags;
7529 chk->whoTo = sp->net;
7530 atomic_add_int(&chk->whoTo->ref_count, 1);
7534 if (sp->holds_key_ref) {
7535 chk->auth_keyid = sp->auth_keyid;
7536 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7537 chk->holds_key_ref = 1;
7539 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7540 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7541 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7542 (uintptr_t) stcb, sp->length,
7543 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7544 chk->rec.data.TSN_seq);
7546 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7548 * Put the rest of the things in place now. Size was done earlier in
7549 * previous loop prior to padding.
7552 #ifdef SCTP_ASOCLOG_OF_TSNS
7553 SCTP_TCB_LOCK_ASSERT(stcb);
7554 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7555 asoc->tsn_out_at = 0;
7556 asoc->tsn_out_wrapped = 1;
7558 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7559 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7560 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7561 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7562 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7563 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7564 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7565 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7569 dchkh->ch.chunk_type = SCTP_DATA;
7570 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7571 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7572 dchkh->dp.stream_id = htons(strq->stream_no);
7573 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7574 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7575 dchkh->ch.chunk_length = htons(chk->send_size);
7576 /* Now advance the chk->send_size by the actual pad needed. */
7577 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7582 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7583 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7584 chk->pad_inplace = 1;
7586 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7587 /* pad added an mbuf */
7588 chk->last_mbuf = lm;
7590 chk->send_size += pads;
7592 if (PR_SCTP_ENABLED(chk->flags)) {
7593 asoc->pr_sctp_cnt++;
7595 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7596 /* All done pull and kill the message */
7597 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7598 if (sp->put_last_out == 0) {
7599 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7600 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7601 sp->sender_all_done,
7603 sp->msg_is_complete,
7607 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7608 SCTP_TCB_SEND_LOCK(stcb);
7611 TAILQ_REMOVE(&strq->outqueue, sp, next);
7612 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7614 sctp_free_remote_addr(sp->net);
7618 sctp_m_freem(sp->data);
7621 sctp_free_a_strmoq(stcb, sp, so_locked);
7623 /* we can't be locked to it */
7625 stcb->asoc.locked_on_sending = NULL;
7627 /* more to go, we are locked */
7630 asoc->chunks_on_out_queue++;
7631 strq->chunks_on_queues++;
7632 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7633 asoc->send_queue_cnt++;
7636 SCTP_TCB_SEND_UNLOCK(stcb);
7643 sctp_fill_outqueue(struct sctp_tcb *stcb,
7644 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7645 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7650 struct sctp_association *asoc;
7651 struct sctp_stream_out *strq;
7652 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7655 SCTP_TCB_LOCK_ASSERT(stcb);
7657 switch (net->ro._l_addr.sa.sa_family) {
7660 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7665 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7670 goal_mtu = net->mtu;
7673 /* Need an allowance for the data chunk header too */
7674 goal_mtu -= sizeof(struct sctp_data_chunk);
7676 /* must make even word boundary */
7677 goal_mtu &= 0xfffffffc;
7678 if (asoc->locked_on_sending) {
7679 /* We are stuck on one stream until the message completes. */
7680 strq = asoc->locked_on_sending;
7683 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7686 while ((goal_mtu > 0) && strq) {
7689 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7690 &giveup, eeor_mode, &bail, so_locked);
7692 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7695 asoc->locked_on_sending = strq;
7696 if ((moved_how_much == 0) || (giveup) || bail)
7697 /* no more to move for now */
7700 asoc->locked_on_sending = NULL;
7701 if ((giveup) || bail) {
7704 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7709 total_moved += moved_how_much;
7710 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7711 goal_mtu &= 0xfffffffc;
7716 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7718 if (total_moved == 0) {
7719 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7720 (net == stcb->asoc.primary_destination)) {
7721 /* ran dry for primary network net */
7722 SCTP_STAT_INCR(sctps_primary_randry);
7723 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7724 /* ran dry with CMT on */
7725 SCTP_STAT_INCR(sctps_cmt_randry);
7731 sctp_fix_ecn_echo(struct sctp_association *asoc)
7733 struct sctp_tmit_chunk *chk;
7735 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7736 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7737 chk->sent = SCTP_DATAGRAM_UNSENT;
7743 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7745 struct sctp_association *asoc;
7746 struct sctp_tmit_chunk *chk;
7747 struct sctp_stream_queue_pending *sp;
7754 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7755 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7756 if (sp->net == net) {
7757 sctp_free_remote_addr(sp->net);
7762 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7763 if (chk->whoTo == net) {
7764 sctp_free_remote_addr(chk->whoTo);
7771 sctp_med_chunk_output(struct sctp_inpcb *inp,
7772 struct sctp_tcb *stcb,
7773 struct sctp_association *asoc,
7776 int control_only, int from_where,
7777 struct timeval *now, int *now_filled, int frag_point, int so_locked
7778 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7784 * Ok this is the generic chunk service queue. we must do the
7785 * following: - Service the stream queue that is next, moving any
7786 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7787 * LAST to the out queue in one pass) and assigning TSN's - Check to
7788 * see if the cwnd/rwnd allows any output, if so we go ahead and
7789 * fomulate and send the low level chunks. Making sure to combine
7790 * any control in the control chunk queue also.
7792 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7793 struct mbuf *outchain, *endoutchain;
7794 struct sctp_tmit_chunk *chk, *nchk;
7796 /* temp arrays for unlinking */
7797 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7798 int no_fragmentflg, error;
7799 unsigned int max_rwnd_per_dest, max_send_per_dest;
7800 int one_chunk, hbflag, skip_data_for_this_net;
7801 int asconf, cookie, no_out_cnt;
7802 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7803 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7805 uint32_t auth_offset = 0;
7806 struct sctp_auth_chunk *auth = NULL;
7807 uint16_t auth_keyid;
7808 int override_ok = 1;
7809 int skip_fill_up = 0;
7810 int data_auth_reqd = 0;
7813 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7819 auth_keyid = stcb->asoc.authinfo.active_keyid;
7821 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7822 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7823 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7828 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7830 * First lets prime the pump. For each destination, if there is room
7831 * in the flight size, attempt to pull an MTU's worth out of the
7832 * stream queues into the general send_queue
7834 #ifdef SCTP_AUDITING_ENABLED
7835 sctp_audit_log(0xC2, 2);
7837 SCTP_TCB_LOCK_ASSERT(stcb);
7839 if ((control_only) || (asoc->stream_reset_outstanding))
7844 /* Nothing to possible to send? */
7845 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7846 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7847 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7848 TAILQ_EMPTY(&asoc->send_queue) &&
7849 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7854 if (asoc->peers_rwnd == 0) {
7855 /* No room in peers rwnd */
7857 if (asoc->total_flight > 0) {
7858 /* we are allowed one chunk in flight */
7862 if (stcb->asoc.ecn_echo_cnt_onq) {
7863 /* Record where a sack goes, if any */
7864 if (no_data_chunks &&
7865 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7866 /* Nothing but ECNe to send - we don't do that */
7867 goto nothing_to_send;
7869 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7870 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7871 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7872 sack_goes_to = chk->whoTo;
7877 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7878 if (stcb->sctp_socket)
7879 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7881 max_send_per_dest = 0;
7882 if (no_data_chunks == 0) {
7883 /* How many non-directed chunks are there? */
7884 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7885 if (chk->whoTo == NULL) {
7887 * We already have non-directed chunks on
7888 * the queue, no need to do a fill-up.
7896 if ((no_data_chunks == 0) &&
7897 (skip_fill_up == 0) &&
7898 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7899 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7901 * This for loop we are in takes in each net, if
7902 * its's got space in cwnd and has data sent to it
7903 * (when CMT is off) then it calls
7904 * sctp_fill_outqueue for the net. This gets data on
7905 * the send queue for that network.
7907 * In sctp_fill_outqueue TSN's are assigned and data is
7908 * copied out of the stream buffers. Note mostly
7909 * copy by reference (we hope).
7911 net->window_probe = 0;
7912 if ((net != stcb->asoc.alternate) &&
7913 ((net->dest_state & SCTP_ADDR_PF) ||
7914 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7915 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7917 sctp_log_cwnd(stcb, net, 1,
7918 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7922 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7923 (net->flight_size == 0)) {
7924 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7926 if (net->flight_size >= net->cwnd) {
7927 /* skip this network, no room - can't fill */
7928 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7929 sctp_log_cwnd(stcb, net, 3,
7930 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7935 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7937 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7939 /* memory alloc failure */
7945 /* now service each destination and send out what we can for it */
7946 /* Nothing to send? */
7947 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7948 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7949 TAILQ_EMPTY(&asoc->send_queue)) {
7953 if (asoc->sctp_cmt_on_off > 0) {
7954 /* get the last start point */
7955 start_at = asoc->last_net_cmt_send_started;
7956 if (start_at == NULL) {
7957 /* null so to beginning */
7958 start_at = TAILQ_FIRST(&asoc->nets);
7960 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7961 if (start_at == NULL) {
7962 start_at = TAILQ_FIRST(&asoc->nets);
7965 asoc->last_net_cmt_send_started = start_at;
7967 start_at = TAILQ_FIRST(&asoc->nets);
7969 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7970 if (chk->whoTo == NULL) {
7971 if (asoc->alternate) {
7972 chk->whoTo = asoc->alternate;
7974 chk->whoTo = asoc->primary_destination;
7976 atomic_add_int(&chk->whoTo->ref_count, 1);
7979 old_start_at = NULL;
7980 again_one_more_time:
7981 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7982 /* how much can we send? */
7983 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7984 if (old_start_at && (old_start_at == net)) {
7985 /* through list ocmpletely. */
7989 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7990 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7991 (net->flight_size >= net->cwnd)) {
7993 * Nothing on control or asconf and flight is full,
7994 * we can skip even in the CMT case.
7999 endoutchain = outchain = NULL;
8002 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8003 skip_data_for_this_net = 1;
8005 skip_data_for_this_net = 0;
8007 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
8009 * if we have a route and an ifp check to see if we
8010 * have room to send to this guy
8014 ifp = net->ro.ro_rt->rt_ifp;
8015 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
8016 SCTP_STAT_INCR(sctps_ifnomemqueued);
8017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
8018 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
8023 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8026 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8031 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8041 if (mtu > asoc->peers_rwnd) {
8042 if (asoc->total_flight > 0) {
8043 /* We have a packet in flight somewhere */
8044 r_mtu = asoc->peers_rwnd;
8046 /* We are always allowed to send one MTU out */
8053 /************************/
8054 /* ASCONF transmission */
8055 /************************/
8056 /* Now first lets go through the asconf queue */
8057 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8058 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8061 if (chk->whoTo == NULL) {
8062 if (asoc->alternate == NULL) {
8063 if (asoc->primary_destination != net) {
8067 if (asoc->alternate != net) {
8072 if (chk->whoTo != net) {
8076 if (chk->data == NULL) {
8079 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8080 chk->sent != SCTP_DATAGRAM_RESEND) {
8084 * if no AUTH is yet included and this chunk
8085 * requires it, make sure to account for it. We
8086 * don't apply the size until the AUTH chunk is
8087 * actually added below in case there is no room for
8088 * this chunk. NOTE: we overload the use of "omtu"
8091 if ((auth == NULL) &&
8092 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8093 stcb->asoc.peer_auth_chunks)) {
8094 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8097 /* Here we do NOT factor the r_mtu */
8098 if ((chk->send_size < (int)(mtu - omtu)) ||
8099 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8101 * We probably should glom the mbuf chain
8102 * from the chk->data for control but the
8103 * problem is it becomes yet one more level
8104 * of tracking to do if for some reason
8105 * output fails. Then I have got to
8106 * reconstruct the merged control chain.. el
8107 * yucko.. for now we take the easy way and
8111 * Add an AUTH chunk, if chunk requires it
8112 * save the offset into the chain for AUTH
8114 if ((auth == NULL) &&
8115 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8116 stcb->asoc.peer_auth_chunks))) {
8117 outchain = sctp_add_auth_chunk(outchain,
8122 chk->rec.chunk_id.id);
8123 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8125 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8126 (int)chk->rec.chunk_id.can_take_data,
8127 chk->send_size, chk->copy_by_ref);
8128 if (outchain == NULL) {
8130 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8133 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8134 /* update our MTU size */
8135 if (mtu > (chk->send_size + omtu))
8136 mtu -= (chk->send_size + omtu);
8139 to_out += (chk->send_size + omtu);
8140 /* Do clear IP_DF ? */
8141 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8144 if (chk->rec.chunk_id.can_take_data)
8147 * set hb flag since we can use these for
8153 * should sysctl this: don't bundle data
8154 * with ASCONF since it requires AUTH
8157 chk->sent = SCTP_DATAGRAM_SENT;
8158 if (chk->whoTo == NULL) {
8160 atomic_add_int(&net->ref_count, 1);
8165 * Ok we are out of room but we can
8166 * output without effecting the
8167 * flight size since this little guy
8168 * is a control only packet.
8170 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8172 * do NOT clear the asconf flag as
8173 * it is used to do appropriate
8174 * source address selection.
8176 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8177 (struct sockaddr *)&net->ro._l_addr,
8178 outchain, auth_offset, auth,
8179 stcb->asoc.authinfo.active_keyid,
8180 no_fragmentflg, 0, asconf,
8181 inp->sctp_lport, stcb->rport,
8182 htonl(stcb->asoc.peer_vtag),
8186 if (error == ENOBUFS) {
8187 asoc->ifp_had_enobuf = 1;
8188 SCTP_STAT_INCR(sctps_lowlevelerr);
8190 if (from_where == 0) {
8191 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8193 if (*now_filled == 0) {
8194 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8196 *now = net->last_sent_time;
8198 net->last_sent_time = *now;
8201 /* error, could not output */
8202 if (error == EHOSTUNREACH) {
8208 sctp_move_chunks_from_net(stcb, net);
8213 asoc->ifp_had_enobuf = 0;
8214 if (*now_filled == 0) {
8215 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8217 *now = net->last_sent_time;
8219 net->last_sent_time = *now;
8223 * increase the number we sent, if a
8224 * cookie is sent we don't tell them
8227 outchain = endoutchain = NULL;
8231 *num_out += ctl_cnt;
8232 /* recalc a clean slate and setup */
8233 switch (net->ro._l_addr.sa.sa_family) {
8236 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8241 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8254 /************************/
8255 /* Control transmission */
8256 /************************/
8257 /* Now first lets go through the control queue */
8258 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8259 if ((sack_goes_to) &&
8260 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8261 (chk->whoTo != sack_goes_to)) {
8263 * if we have a sack in queue, and we are
8264 * looking at an ecn echo that is NOT queued
8265 * to where the sack is going..
8267 if (chk->whoTo == net) {
8269 * Don't transmit it to where its
8270 * going (current net)
8273 } else if (sack_goes_to == net) {
8275 * But do transmit it to this
8278 goto skip_net_check;
8281 if (chk->whoTo == NULL) {
8282 if (asoc->alternate == NULL) {
8283 if (asoc->primary_destination != net) {
8287 if (asoc->alternate != net) {
8292 if (chk->whoTo != net) {
8297 if (chk->data == NULL) {
8300 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8302 * It must be unsent. Cookies and ASCONF's
8303 * hang around but there timers will force
8304 * when marked for resend.
8309 * if no AUTH is yet included and this chunk
8310 * requires it, make sure to account for it. We
8311 * don't apply the size until the AUTH chunk is
8312 * actually added below in case there is no room for
8313 * this chunk. NOTE: we overload the use of "omtu"
8316 if ((auth == NULL) &&
8317 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8318 stcb->asoc.peer_auth_chunks)) {
8319 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8322 /* Here we do NOT factor the r_mtu */
8323 if ((chk->send_size <= (int)(mtu - omtu)) ||
8324 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8326 * We probably should glom the mbuf chain
8327 * from the chk->data for control but the
8328 * problem is it becomes yet one more level
8329 * of tracking to do if for some reason
8330 * output fails. Then I have got to
8331 * reconstruct the merged control chain.. el
8332 * yucko.. for now we take the easy way and
8336 * Add an AUTH chunk, if chunk requires it
8337 * save the offset into the chain for AUTH
8339 if ((auth == NULL) &&
8340 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8341 stcb->asoc.peer_auth_chunks))) {
8342 outchain = sctp_add_auth_chunk(outchain,
8347 chk->rec.chunk_id.id);
8348 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8350 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8351 (int)chk->rec.chunk_id.can_take_data,
8352 chk->send_size, chk->copy_by_ref);
8353 if (outchain == NULL) {
8355 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8358 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8359 /* update our MTU size */
8360 if (mtu > (chk->send_size + omtu))
8361 mtu -= (chk->send_size + omtu);
8364 to_out += (chk->send_size + omtu);
8365 /* Do clear IP_DF ? */
8366 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8369 if (chk->rec.chunk_id.can_take_data)
8371 /* Mark things to be removed, if needed */
8372 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8373 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8374 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8375 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8376 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8377 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8378 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8379 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8380 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8381 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8382 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8383 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8386 /* remove these chunks at the end */
8387 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8388 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8389 /* turn off the timer */
8390 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8391 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8392 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8398 * Other chunks, since they have
8399 * timers running (i.e. COOKIE) we
8400 * just "trust" that it gets sent or
8404 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8407 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8409 * Increment ecne send count
8410 * here this means we may be
8411 * over-zealous in our
8412 * counting if the send
8413 * fails, but its the best
8414 * place to do it (we used
8415 * to do it in the queue of
8416 * the chunk, but that did
8417 * not tell how many times
8420 SCTP_STAT_INCR(sctps_sendecne);
8422 chk->sent = SCTP_DATAGRAM_SENT;
8423 if (chk->whoTo == NULL) {
8425 atomic_add_int(&net->ref_count, 1);
8431 * Ok we are out of room but we can
8432 * output without effecting the
8433 * flight size since this little guy
8434 * is a control only packet.
8437 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8439 * do NOT clear the asconf
8440 * flag as it is used to do
8441 * appropriate source
8442 * address selection.
8446 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8449 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8450 (struct sockaddr *)&net->ro._l_addr,
8453 stcb->asoc.authinfo.active_keyid,
8454 no_fragmentflg, 0, asconf,
8455 inp->sctp_lport, stcb->rport,
8456 htonl(stcb->asoc.peer_vtag),
8460 if (error == ENOBUFS) {
8461 asoc->ifp_had_enobuf = 1;
8462 SCTP_STAT_INCR(sctps_lowlevelerr);
8464 if (from_where == 0) {
8465 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8467 /* error, could not output */
8469 if (*now_filled == 0) {
8470 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8472 *now = net->last_sent_time;
8474 net->last_sent_time = *now;
8478 if (error == EHOSTUNREACH) {
8484 sctp_move_chunks_from_net(stcb, net);
8489 asoc->ifp_had_enobuf = 0;
8490 /* Only HB or ASCONF advances time */
8492 if (*now_filled == 0) {
8493 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8495 *now = net->last_sent_time;
8497 net->last_sent_time = *now;
8502 * increase the number we sent, if a
8503 * cookie is sent we don't tell them
8506 outchain = endoutchain = NULL;
8510 *num_out += ctl_cnt;
8511 /* recalc a clean slate and setup */
8512 switch (net->ro._l_addr.sa.sa_family) {
8515 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8520 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8533 /* JRI: if dest is in PF state, do not send data to it */
8534 if ((asoc->sctp_cmt_on_off > 0) &&
8535 (net != stcb->asoc.alternate) &&
8536 (net->dest_state & SCTP_ADDR_PF)) {
8539 if (net->flight_size >= net->cwnd) {
8542 if ((asoc->sctp_cmt_on_off > 0) &&
8543 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8544 (net->flight_size > max_rwnd_per_dest)) {
8548 * We need a specific accounting for the usage of the send
8549 * buffer. We also need to check the number of messages per
8550 * net. For now, this is better than nothing and it disabled
8553 if ((asoc->sctp_cmt_on_off > 0) &&
8554 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8555 (max_send_per_dest > 0) &&
8556 (net->flight_size > max_send_per_dest)) {
8559 /*********************/
8560 /* Data transmission */
8561 /*********************/
8563 * if AUTH for DATA is required and no AUTH has been added
8564 * yet, account for this in the mtu now... if no data can be
8565 * bundled, this adjustment won't matter anyways since the
8566 * packet will be going out...
8568 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8569 stcb->asoc.peer_auth_chunks);
8570 if (data_auth_reqd && (auth == NULL)) {
8571 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8573 /* now lets add any data within the MTU constraints */
8574 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8577 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8578 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8585 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8586 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8596 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8597 (skip_data_for_this_net == 0)) ||
8599 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8600 if (no_data_chunks) {
8601 /* let only control go out */
8605 if (net->flight_size >= net->cwnd) {
8606 /* skip this net, no room for data */
8610 if ((chk->whoTo != NULL) &&
8611 (chk->whoTo != net)) {
8612 /* Don't send the chunk on this net */
8615 if (asoc->sctp_cmt_on_off == 0) {
8616 if ((asoc->alternate) &&
8617 (asoc->alternate != net) &&
8618 (chk->whoTo == NULL)) {
8620 } else if ((net != asoc->primary_destination) &&
8621 (asoc->alternate == NULL) &&
8622 (chk->whoTo == NULL)) {
8626 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8628 * strange, we have a chunk that is
8629 * to big for its destination and
8630 * yet no fragment ok flag.
8631 * Something went wrong when the
8632 * PMTU changed...we did not mark
8633 * this chunk for some reason?? I
8634 * will fix it here by letting IP
8635 * fragment it for now and printing
8636 * a warning. This really should not
8639 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8640 chk->send_size, mtu);
8641 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8643 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8644 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8645 struct sctp_data_chunk *dchkh;
8647 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8648 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8650 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8651 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8652 /* ok we will add this one */
8655 * Add an AUTH chunk, if chunk
8656 * requires it, save the offset into
8657 * the chain for AUTH
8659 if (data_auth_reqd) {
8661 outchain = sctp_add_auth_chunk(outchain,
8667 auth_keyid = chk->auth_keyid;
8669 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8670 } else if (override_ok) {
8675 auth_keyid = chk->auth_keyid;
8677 } else if (auth_keyid != chk->auth_keyid) {
8685 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8686 chk->send_size, chk->copy_by_ref);
8687 if (outchain == NULL) {
8688 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8689 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8690 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8693 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8696 /* upate our MTU size */
8697 /* Do clear IP_DF ? */
8698 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8701 /* unsigned subtraction of mtu */
8702 if (mtu > chk->send_size)
8703 mtu -= chk->send_size;
8706 /* unsigned subtraction of r_mtu */
8707 if (r_mtu > chk->send_size)
8708 r_mtu -= chk->send_size;
8712 to_out += chk->send_size;
8713 if ((to_out > mx_mtu) && no_fragmentflg) {
8715 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8717 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8721 chk->window_probe = 0;
8722 data_list[bundle_at++] = chk;
8723 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8726 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8727 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8728 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8730 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8732 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8733 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8743 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8745 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8746 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8747 data_list[0]->window_probe = 1;
8748 net->window_probe = 1;
8754 * Must be sent in order of the
8755 * TSN's (on a network)
8759 } /* for (chunk gather loop for this net) */
8760 } /* if asoc.state OPEN */
8762 /* Is there something to send for this destination? */
8764 /* We may need to start a control timer or two */
8766 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8769 * do NOT clear the asconf flag as it is
8770 * used to do appropriate source address
8775 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8778 /* must start a send timer if data is being sent */
8779 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8781 * no timer running on this destination
8784 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8786 /* Now send it, if there is anything to send :> */
8787 if ((error = sctp_lowlevel_chunk_output(inp,
8790 (struct sockaddr *)&net->ro._l_addr,
8798 inp->sctp_lport, stcb->rport,
8799 htonl(stcb->asoc.peer_vtag),
8803 /* error, we could not output */
8804 if (error == ENOBUFS) {
8805 SCTP_STAT_INCR(sctps_lowlevelerr);
8806 asoc->ifp_had_enobuf = 1;
8808 if (from_where == 0) {
8809 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8811 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8813 if (*now_filled == 0) {
8814 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8816 *now = net->last_sent_time;
8818 net->last_sent_time = *now;
8822 if (error == EHOSTUNREACH) {
8824 * Destination went unreachable
8827 sctp_move_chunks_from_net(stcb, net);
8831 * I add this line to be paranoid. As far as
8832 * I can tell the continue, takes us back to
8833 * the top of the for, but just to make sure
8834 * I will reset these again here.
8836 ctl_cnt = bundle_at = 0;
8837 continue; /* This takes us back to the
8838 * for() for the nets. */
8840 asoc->ifp_had_enobuf = 0;
8845 if (bundle_at || hbflag) {
8846 /* For data/asconf and hb set time */
8847 if (*now_filled == 0) {
8848 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8850 *now = net->last_sent_time;
8852 net->last_sent_time = *now;
8856 *num_out += (ctl_cnt + bundle_at);
8859 /* setup for a RTO measurement */
8860 tsns_sent = data_list[0]->rec.data.TSN_seq;
8861 /* fill time if not already filled */
8862 if (*now_filled == 0) {
8863 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8865 *now = asoc->time_last_sent;
8867 asoc->time_last_sent = *now;
8869 if (net->rto_needed) {
8870 data_list[0]->do_rtt = 1;
8871 net->rto_needed = 0;
8873 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8874 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8881 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8884 if (old_start_at == NULL) {
8885 old_start_at = start_at;
8886 start_at = TAILQ_FIRST(&asoc->nets);
8888 goto again_one_more_time;
8891 * At the end there should be no NON timed chunks hanging on this
8894 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8895 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8897 if ((*num_out == 0) && (*reason_code == 0)) {
8902 sctp_clean_up_ctl(stcb, asoc, so_locked);
8907 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8910 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8911 * the control chunk queue.
8913 struct sctp_chunkhdr *hdr;
8914 struct sctp_tmit_chunk *chk;
8917 SCTP_TCB_LOCK_ASSERT(stcb);
8918 sctp_alloc_a_chunk(stcb, chk);
8921 sctp_m_freem(op_err);
8924 chk->copy_by_ref = 0;
8925 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8926 if (op_err == NULL) {
8927 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8932 while (mat != NULL) {
8933 chk->send_size += SCTP_BUF_LEN(mat);
8934 mat = SCTP_BUF_NEXT(mat);
8936 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8937 chk->rec.chunk_id.can_take_data = 1;
8938 chk->sent = SCTP_DATAGRAM_UNSENT;
8941 chk->asoc = &stcb->asoc;
8944 hdr = mtod(op_err, struct sctp_chunkhdr *);
8945 hdr->chunk_type = SCTP_OPERATION_ERROR;
8946 hdr->chunk_flags = 0;
8947 hdr->chunk_length = htons(chk->send_size);
8948 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8951 chk->asoc->ctrl_queue_cnt++;
8955 sctp_send_cookie_echo(struct mbuf *m,
8957 struct sctp_tcb *stcb,
8958 struct sctp_nets *net)
8961 * pull out the cookie and put it at the front of the control chunk
8965 struct mbuf *cookie;
8966 struct sctp_paramhdr parm, *phdr;
8967 struct sctp_chunkhdr *hdr;
8968 struct sctp_tmit_chunk *chk;
8969 uint16_t ptype, plen;
8971 /* First find the cookie in the param area */
8973 at = offset + sizeof(struct sctp_init_chunk);
8975 SCTP_TCB_LOCK_ASSERT(stcb);
8977 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8981 ptype = ntohs(phdr->param_type);
8982 plen = ntohs(phdr->param_length);
8983 if (ptype == SCTP_STATE_COOKIE) {
8986 /* found the cookie */
8987 if ((pad = (plen % 4))) {
8990 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8991 if (cookie == NULL) {
8995 #ifdef SCTP_MBUF_LOGGING
8996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8999 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
9000 if (SCTP_BUF_IS_EXTENDED(mat)) {
9001 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9008 at += SCTP_SIZE32(plen);
9010 if (cookie == NULL) {
9011 /* Did not find the cookie */
9014 /* ok, we got the cookie lets change it into a cookie echo chunk */
9016 /* first the change from param to cookie */
9017 hdr = mtod(cookie, struct sctp_chunkhdr *);
9018 hdr->chunk_type = SCTP_COOKIE_ECHO;
9019 hdr->chunk_flags = 0;
9020 /* get the chunk stuff now and place it in the FRONT of the queue */
9021 sctp_alloc_a_chunk(stcb, chk);
9024 sctp_m_freem(cookie);
9027 chk->copy_by_ref = 0;
9028 chk->send_size = plen;
9029 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9030 chk->rec.chunk_id.can_take_data = 0;
9031 chk->sent = SCTP_DATAGRAM_UNSENT;
9033 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9034 chk->asoc = &stcb->asoc;
9037 atomic_add_int(&chk->whoTo->ref_count, 1);
9038 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9039 chk->asoc->ctrl_queue_cnt++;
9044 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9048 struct sctp_nets *net)
9051 * take a HB request and make it into a HB ack and send it.
9053 struct mbuf *outchain;
9054 struct sctp_chunkhdr *chdr;
9055 struct sctp_tmit_chunk *chk;
9059 /* must have a net pointer */
9062 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
9063 if (outchain == NULL) {
9064 /* gak out of memory */
9067 #ifdef SCTP_MBUF_LOGGING
9068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9071 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
9072 if (SCTP_BUF_IS_EXTENDED(mat)) {
9073 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9078 chdr = mtod(outchain, struct sctp_chunkhdr *);
9079 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9080 chdr->chunk_flags = 0;
9081 if (chk_length % 4) {
9083 uint32_t cpthis = 0;
9086 padlen = 4 - (chk_length % 4);
9087 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9089 sctp_alloc_a_chunk(stcb, chk);
9092 sctp_m_freem(outchain);
9095 chk->copy_by_ref = 0;
9096 chk->send_size = chk_length;
9097 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9098 chk->rec.chunk_id.can_take_data = 1;
9099 chk->sent = SCTP_DATAGRAM_UNSENT;
9102 chk->asoc = &stcb->asoc;
9103 chk->data = outchain;
9105 atomic_add_int(&chk->whoTo->ref_count, 1);
9106 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9107 chk->asoc->ctrl_queue_cnt++;
9111 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9113 /* formulate and queue a cookie-ack back to sender */
9114 struct mbuf *cookie_ack;
9115 struct sctp_chunkhdr *hdr;
9116 struct sctp_tmit_chunk *chk;
9118 SCTP_TCB_LOCK_ASSERT(stcb);
9120 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
9121 if (cookie_ack == NULL) {
9125 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9126 sctp_alloc_a_chunk(stcb, chk);
9129 sctp_m_freem(cookie_ack);
9132 chk->copy_by_ref = 0;
9133 chk->send_size = sizeof(struct sctp_chunkhdr);
9134 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9135 chk->rec.chunk_id.can_take_data = 1;
9136 chk->sent = SCTP_DATAGRAM_UNSENT;
9139 chk->asoc = &stcb->asoc;
9140 chk->data = cookie_ack;
9141 if (chk->asoc->last_control_chunk_from != NULL) {
9142 chk->whoTo = chk->asoc->last_control_chunk_from;
9143 atomic_add_int(&chk->whoTo->ref_count, 1);
9147 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9148 hdr->chunk_type = SCTP_COOKIE_ACK;
9149 hdr->chunk_flags = 0;
9150 hdr->chunk_length = htons(chk->send_size);
9151 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9152 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9153 chk->asoc->ctrl_queue_cnt++;
9159 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9161 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9162 struct mbuf *m_shutdown_ack;
9163 struct sctp_shutdown_ack_chunk *ack_cp;
9164 struct sctp_tmit_chunk *chk;
9166 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9167 if (m_shutdown_ack == NULL) {
9171 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9172 sctp_alloc_a_chunk(stcb, chk);
9175 sctp_m_freem(m_shutdown_ack);
9178 chk->copy_by_ref = 0;
9179 chk->send_size = sizeof(struct sctp_chunkhdr);
9180 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9181 chk->rec.chunk_id.can_take_data = 1;
9182 chk->sent = SCTP_DATAGRAM_UNSENT;
9185 chk->asoc = &stcb->asoc;
9186 chk->data = m_shutdown_ack;
9189 atomic_add_int(&chk->whoTo->ref_count, 1);
9191 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9192 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9193 ack_cp->ch.chunk_flags = 0;
9194 ack_cp->ch.chunk_length = htons(chk->send_size);
9195 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9196 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9197 chk->asoc->ctrl_queue_cnt++;
9202 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9204 /* formulate and queue a SHUTDOWN to the sender */
9205 struct mbuf *m_shutdown;
9206 struct sctp_shutdown_chunk *shutdown_cp;
9207 struct sctp_tmit_chunk *chk;
9209 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9210 if (m_shutdown == NULL) {
9214 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9215 sctp_alloc_a_chunk(stcb, chk);
9218 sctp_m_freem(m_shutdown);
9221 chk->copy_by_ref = 0;
9222 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9223 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9224 chk->rec.chunk_id.can_take_data = 1;
9225 chk->sent = SCTP_DATAGRAM_UNSENT;
9228 chk->asoc = &stcb->asoc;
9229 chk->data = m_shutdown;
9232 atomic_add_int(&chk->whoTo->ref_count, 1);
9234 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9235 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9236 shutdown_cp->ch.chunk_flags = 0;
9237 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9238 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9239 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9240 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9241 chk->asoc->ctrl_queue_cnt++;
9246 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9249 * formulate and queue an ASCONF to the peer. ASCONF parameters
9250 * should be queued on the assoc queue.
9252 struct sctp_tmit_chunk *chk;
9253 struct mbuf *m_asconf;
9256 SCTP_TCB_LOCK_ASSERT(stcb);
9258 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9259 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9260 /* can't send a new one if there is one in flight already */
9263 /* compose an ASCONF chunk, maximum length is PMTU */
9264 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9265 if (m_asconf == NULL) {
9268 sctp_alloc_a_chunk(stcb, chk);
9271 sctp_m_freem(m_asconf);
9274 chk->copy_by_ref = 0;
9275 chk->data = m_asconf;
9276 chk->send_size = len;
9277 chk->rec.chunk_id.id = SCTP_ASCONF;
9278 chk->rec.chunk_id.can_take_data = 0;
9279 chk->sent = SCTP_DATAGRAM_UNSENT;
9281 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9282 chk->asoc = &stcb->asoc;
9285 atomic_add_int(&chk->whoTo->ref_count, 1);
9287 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9288 chk->asoc->ctrl_queue_cnt++;
9293 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9296 * formulate and queue a asconf-ack back to sender. the asconf-ack
9297 * must be stored in the tcb.
9299 struct sctp_tmit_chunk *chk;
9300 struct sctp_asconf_ack *ack, *latest_ack;
9302 struct sctp_nets *net = NULL;
9304 SCTP_TCB_LOCK_ASSERT(stcb);
9305 /* Get the latest ASCONF-ACK */
9306 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9307 if (latest_ack == NULL) {
9310 if (latest_ack->last_sent_to != NULL &&
9311 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9312 /* we're doing a retransmission */
9313 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9316 if (stcb->asoc.last_control_chunk_from == NULL) {
9317 if (stcb->asoc.alternate) {
9318 net = stcb->asoc.alternate;
9320 net = stcb->asoc.primary_destination;
9323 net = stcb->asoc.last_control_chunk_from;
9328 if (stcb->asoc.last_control_chunk_from == NULL) {
9329 if (stcb->asoc.alternate) {
9330 net = stcb->asoc.alternate;
9332 net = stcb->asoc.primary_destination;
9335 net = stcb->asoc.last_control_chunk_from;
9338 latest_ack->last_sent_to = net;
9340 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9341 if (ack->data == NULL) {
9344 /* copy the asconf_ack */
9345 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
9346 if (m_ack == NULL) {
9347 /* couldn't copy it */
9350 #ifdef SCTP_MBUF_LOGGING
9351 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9354 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9355 if (SCTP_BUF_IS_EXTENDED(mat)) {
9356 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9362 sctp_alloc_a_chunk(stcb, chk);
9366 sctp_m_freem(m_ack);
9369 chk->copy_by_ref = 0;
9373 atomic_add_int(&chk->whoTo->ref_count, 1);
9378 chk->send_size = ack->len;
9379 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9380 chk->rec.chunk_id.can_take_data = 1;
9381 chk->sent = SCTP_DATAGRAM_UNSENT;
9383 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9384 chk->asoc = &stcb->asoc;
9386 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9387 chk->asoc->ctrl_queue_cnt++;
9394 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9395 struct sctp_tcb *stcb,
9396 struct sctp_association *asoc,
9397 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9398 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9404 * send out one MTU of retransmission. If fast_retransmit is
9405 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9406 * rwnd. For a Cookie or Asconf in the control chunk queue we
9407 * retransmit them by themselves.
9409 * For data chunks we will pick out the lowest TSN's in the sent_queue
9410 * marked for resend and bundle them all together (up to a MTU of
9411 * destination). The address to send to should have been
9412 * selected/changed where the retransmission was marked (i.e. in FR
9413 * or t3-timeout routines).
9415 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9416 struct sctp_tmit_chunk *chk, *fwd;
9417 struct mbuf *m, *endofchain;
9418 struct sctp_nets *net = NULL;
9419 uint32_t tsns_sent = 0;
9420 int no_fragmentflg, bundle_at, cnt_thru;
9422 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9423 struct sctp_auth_chunk *auth = NULL;
9424 uint32_t auth_offset = 0;
9425 uint16_t auth_keyid;
9426 int override_ok = 1;
9427 int data_auth_reqd = 0;
9430 SCTP_TCB_LOCK_ASSERT(stcb);
9431 tmr_started = ctl_cnt = bundle_at = error = 0;
9436 endofchain = m = NULL;
9437 auth_keyid = stcb->asoc.authinfo.active_keyid;
9438 #ifdef SCTP_AUDITING_ENABLED
9439 sctp_audit_log(0xC3, 1);
9441 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9442 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9443 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9444 asoc->sent_queue_retran_cnt);
9445 asoc->sent_queue_cnt = 0;
9446 asoc->sent_queue_cnt_removeable = 0;
9447 /* send back 0/0 so we enter normal transmission */
9451 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9452 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9453 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9454 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9455 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9458 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9459 if (chk != asoc->str_reset) {
9461 * not eligible for retran if its
9468 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9472 * Add an AUTH chunk, if chunk requires it save the
9473 * offset into the chain for AUTH
9475 if ((auth == NULL) &&
9476 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9477 stcb->asoc.peer_auth_chunks))) {
9478 m = sctp_add_auth_chunk(m, &endofchain,
9479 &auth, &auth_offset,
9481 chk->rec.chunk_id.id);
9482 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9484 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9490 /* do we have control chunks to retransmit? */
9492 /* Start a timer no matter if we suceed or fail */
9493 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9494 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9495 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9496 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9497 chk->snd_count++; /* update our count */
9498 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9499 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9500 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9501 no_fragmentflg, 0, 0,
9502 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9503 chk->whoTo->port, NULL,
9506 SCTP_STAT_INCR(sctps_lowlevelerr);
9513 * We don't want to mark the net->sent time here since this
9514 * we use this for HB and retrans cannot measure RTT
9516 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9518 chk->sent = SCTP_DATAGRAM_SENT;
9519 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9523 /* Clean up the fwd-tsn list */
9524 sctp_clean_up_ctl(stcb, asoc, so_locked);
9529 * Ok, it is just data retransmission we need to do or that and a
9530 * fwd-tsn with it all.
9532 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9533 return (SCTP_RETRAN_DONE);
9535 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9536 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9537 /* not yet open, resend the cookie and that is it */
9540 #ifdef SCTP_AUDITING_ENABLED
9541 sctp_auditing(20, inp, stcb, NULL);
9543 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9544 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9545 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9546 /* No, not sent to this net or not ready for rtx */
9549 if (chk->data == NULL) {
9550 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9551 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9554 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9555 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9556 /* Gak, we have exceeded max unlucky retran, abort! */
9557 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9559 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9560 atomic_add_int(&stcb->asoc.refcnt, 1);
9561 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9562 SCTP_TCB_LOCK(stcb);
9563 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9564 return (SCTP_RETRAN_EXIT);
9566 /* pick up the net */
9568 switch (net->ro._l_addr.sa.sa_family) {
9571 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9576 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9585 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9586 /* No room in peers rwnd */
9589 tsn = asoc->last_acked_seq + 1;
9590 if (tsn == chk->rec.data.TSN_seq) {
9592 * we make a special exception for this
9593 * case. The peer has no rwnd but is missing
9594 * the lowest chunk.. which is probably what
9595 * is holding up the rwnd.
9597 goto one_chunk_around;
9602 if (asoc->peers_rwnd < mtu) {
9604 if ((asoc->peers_rwnd == 0) &&
9605 (asoc->total_flight == 0)) {
9606 chk->window_probe = 1;
9607 chk->whoTo->window_probe = 1;
9610 #ifdef SCTP_AUDITING_ENABLED
9611 sctp_audit_log(0xC3, 2);
9615 net->fast_retran_ip = 0;
9616 if (chk->rec.data.doing_fast_retransmit == 0) {
9618 * if no FR in progress skip destination that have
9619 * flight_size > cwnd.
9621 if (net->flight_size >= net->cwnd) {
9626 * Mark the destination net to have FR recovery
9630 net->fast_retran_ip = 1;
9634 * if no AUTH is yet included and this chunk requires it,
9635 * make sure to account for it. We don't apply the size
9636 * until the AUTH chunk is actually added below in case
9637 * there is no room for this chunk.
9639 if (data_auth_reqd && (auth == NULL)) {
9640 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9644 if ((chk->send_size <= (mtu - dmtu)) ||
9645 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9646 /* ok we will add this one */
9647 if (data_auth_reqd) {
9649 m = sctp_add_auth_chunk(m,
9655 auth_keyid = chk->auth_keyid;
9657 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9658 } else if (override_ok) {
9659 auth_keyid = chk->auth_keyid;
9661 } else if (chk->auth_keyid != auth_keyid) {
9662 /* different keyid, so done bundling */
9666 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9668 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9671 /* Do clear IP_DF ? */
9672 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9675 /* upate our MTU size */
9676 if (mtu > (chk->send_size + dmtu))
9677 mtu -= (chk->send_size + dmtu);
9680 data_list[bundle_at++] = chk;
9681 if (one_chunk && (asoc->total_flight <= 0)) {
9682 SCTP_STAT_INCR(sctps_windowprobed);
9685 if (one_chunk == 0) {
9687 * now are there anymore forward from chk to pick
9690 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9691 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9692 /* Nope, not for retran */
9695 if (fwd->whoTo != net) {
9696 /* Nope, not the net in question */
9699 if (data_auth_reqd && (auth == NULL)) {
9700 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9703 if (fwd->send_size <= (mtu - dmtu)) {
9704 if (data_auth_reqd) {
9706 m = sctp_add_auth_chunk(m,
9712 auth_keyid = fwd->auth_keyid;
9714 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9715 } else if (override_ok) {
9716 auth_keyid = fwd->auth_keyid;
9718 } else if (fwd->auth_keyid != auth_keyid) {
9726 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9728 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9731 /* Do clear IP_DF ? */
9732 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9735 /* upate our MTU size */
9736 if (mtu > (fwd->send_size + dmtu))
9737 mtu -= (fwd->send_size + dmtu);
9740 data_list[bundle_at++] = fwd;
9741 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9745 /* can't fit so we are done */
9750 /* Is there something to send for this destination? */
9753 * No matter if we fail/or suceed we should start a
9754 * timer. A failure is like a lost IP packet :-)
9756 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9758 * no timer running on this destination
9761 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9764 /* Now lets send it, if there is anything to send :> */
9765 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9766 (struct sockaddr *)&net->ro._l_addr, m,
9767 auth_offset, auth, auth_keyid,
9768 no_fragmentflg, 0, 0,
9769 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9773 /* error, we could not output */
9774 SCTP_STAT_INCR(sctps_lowlevelerr);
9782 * We don't want to mark the net->sent time here
9783 * since this we use this for HB and retrans cannot
9786 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9788 /* For auto-close */
9790 if (*now_filled == 0) {
9791 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9792 *now = asoc->time_last_sent;
9795 asoc->time_last_sent = *now;
9797 *cnt_out += bundle_at;
9798 #ifdef SCTP_AUDITING_ENABLED
9799 sctp_audit_log(0xC4, bundle_at);
9802 tsns_sent = data_list[0]->rec.data.TSN_seq;
9804 for (i = 0; i < bundle_at; i++) {
9805 SCTP_STAT_INCR(sctps_sendretransdata);
9806 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9808 * When we have a revoked data, and we
9809 * retransmit it, then we clear the revoked
9810 * flag since this flag dictates if we
9811 * subtracted from the fs
9813 if (data_list[i]->rec.data.chunk_was_revoked) {
9814 /* Deflate the cwnd */
9815 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9816 data_list[i]->rec.data.chunk_was_revoked = 0;
9818 data_list[i]->snd_count++;
9819 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9820 /* record the time */
9821 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9822 if (data_list[i]->book_size_scale) {
9824 * need to double the book size on
9827 data_list[i]->book_size_scale = 0;
9829 * Since we double the booksize, we
9830 * must also double the output queue
9831 * size, since this get shrunk when
9832 * we free by this amount.
9834 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9835 data_list[i]->book_size *= 2;
9839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9840 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9841 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9843 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9844 (uint32_t) (data_list[i]->send_size +
9845 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9848 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9849 data_list[i]->whoTo->flight_size,
9850 data_list[i]->book_size,
9851 (uintptr_t) data_list[i]->whoTo,
9852 data_list[i]->rec.data.TSN_seq);
9854 sctp_flight_size_increase(data_list[i]);
9855 sctp_total_flight_increase(stcb, data_list[i]);
9856 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9857 /* SWS sender side engages */
9858 asoc->peers_rwnd = 0;
9861 (data_list[i]->rec.data.doing_fast_retransmit)) {
9862 SCTP_STAT_INCR(sctps_sendfastretrans);
9863 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9864 (tmr_started == 0)) {
9866 * ok we just fast-retrans'd
9867 * the lowest TSN, i.e the
9868 * first on the list. In
9869 * this case we want to give
9870 * some more time to get a
9871 * SACK back without a
9874 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9875 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9876 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9881 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9883 #ifdef SCTP_AUDITING_ENABLED
9884 sctp_auditing(21, inp, stcb, NULL);
9890 if (asoc->sent_queue_retran_cnt <= 0) {
9891 /* all done we have no more to retran */
9892 asoc->sent_queue_retran_cnt = 0;
9896 /* No more room in rwnd */
9899 /* stop the for loop here. we sent out a packet */
9906 sctp_timer_validation(struct sctp_inpcb *inp,
9907 struct sctp_tcb *stcb,
9908 struct sctp_association *asoc)
9910 struct sctp_nets *net;
9912 /* Validate that a timer is running somewhere */
9913 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9914 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9915 /* Here is a timer */
9919 SCTP_TCB_LOCK_ASSERT(stcb);
9920 /* Gak, we did not have a timer somewhere */
9921 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9922 if (asoc->alternate) {
9923 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9925 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9931 sctp_chunk_output(struct sctp_inpcb *inp,
9932 struct sctp_tcb *stcb,
9935 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9941 * Ok this is the generic chunk service queue. we must do the
9943 * - See if there are retransmits pending, if so we must
9945 * - Service the stream queue that is next, moving any
9946 * message (note I must get a complete message i.e.
9947 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9949 * - Check to see if the cwnd/rwnd allows any output, if so we
9950 * go ahead and fomulate and send the low level chunks. Making sure
9951 * to combine any control in the control chunk queue also.
9953 struct sctp_association *asoc;
9954 struct sctp_nets *net;
9955 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9956 unsigned int burst_cnt = 0;
9960 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9963 unsigned int tot_frs = 0;
9966 /* The Nagle algorithm is only applied when handling a send call. */
9967 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9968 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9976 SCTP_TCB_LOCK_ASSERT(stcb);
9978 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9980 if ((un_sent <= 0) &&
9981 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9982 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9983 (asoc->sent_queue_retran_cnt == 0)) {
9984 /* Nothing to do unless there is something to be sent left */
9988 * Do we have something to send, data or control AND a sack timer
9989 * running, if so piggy-back the sack.
9991 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9992 sctp_send_sack(stcb, so_locked);
9993 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9995 while (asoc->sent_queue_retran_cnt) {
9997 * Ok, it is retransmission time only, we send out only ONE
9998 * packet with a single call off to the retran code.
10000 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10002 * Special hook for handling cookiess discarded
10003 * by peer that carried data. Send cookie-ack only
10004 * and then the next call with get the retran's.
10006 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10008 &now, &now_filled, frag_point, so_locked);
10010 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10011 /* if its not from a HB then do it */
10013 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10019 * its from any other place, we don't allow retran
10020 * output (only control)
10025 /* Can't send anymore */
10027 * now lets push out control by calling med-level
10028 * output once. this assures that we WILL send HB's
10031 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10033 &now, &now_filled, frag_point, so_locked);
10034 #ifdef SCTP_AUDITING_ENABLED
10035 sctp_auditing(8, inp, stcb, NULL);
10037 sctp_timer_validation(inp, stcb, asoc);
10042 * The count was off.. retran is not happening so do
10043 * the normal retransmission.
10045 #ifdef SCTP_AUDITING_ENABLED
10046 sctp_auditing(9, inp, stcb, NULL);
10048 if (ret == SCTP_RETRAN_EXIT) {
10053 if (from_where == SCTP_OUTPUT_FROM_T3) {
10054 /* Only one transmission allowed out of a timeout */
10055 #ifdef SCTP_AUDITING_ENABLED
10056 sctp_auditing(10, inp, stcb, NULL);
10058 /* Push out any control */
10059 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10060 &now, &now_filled, frag_point, so_locked);
10063 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10064 /* Hit FR burst limit */
10067 if ((num_out == 0) && (ret == 0)) {
10068 /* No more retrans to send */
10072 #ifdef SCTP_AUDITING_ENABLED
10073 sctp_auditing(12, inp, stcb, NULL);
10075 /* Check for bad destinations, if they exist move chunks around. */
10076 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10077 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10079 * if possible move things off of this address we
10080 * still may send below due to the dormant state but
10081 * we try to find an alternate address to send to
10082 * and if we have one we move all queued data on the
10083 * out wheel to this alternate address.
10085 if (net->ref_count > 1)
10086 sctp_move_chunks_from_net(stcb, net);
10089 * if ((asoc->sat_network) || (net->addr_is_local))
10090 * { burst_limit = asoc->max_burst *
10091 * SCTP_SAT_NETWORK_BURST_INCR; }
10093 if (asoc->max_burst > 0) {
10094 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10095 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10097 * JRS - Use the congestion
10098 * control given in the
10099 * congestion control module
10101 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10103 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10105 SCTP_STAT_INCR(sctps_maxburstqueued);
10107 net->fast_retran_ip = 0;
10109 if (net->flight_size == 0) {
10111 * Should be decaying the
10123 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10124 &reason_code, 0, from_where,
10125 &now, &now_filled, frag_point, so_locked);
10127 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10129 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10132 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10133 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10137 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10139 tot_out += num_out;
10141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10142 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10143 if (num_out == 0) {
10144 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10149 * When the Nagle algorithm is used, look at how
10150 * much is unsent, then if its smaller than an MTU
10151 * and we have data in flight we stop, except if we
10152 * are handling a fragmented user message.
10154 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10155 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10156 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10157 (stcb->asoc.total_flight > 0) &&
10158 ((stcb->asoc.locked_on_sending == NULL) ||
10159 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10163 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10164 TAILQ_EMPTY(&asoc->send_queue) &&
10165 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10166 /* Nothing left to send */
10169 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10170 /* Nothing left to send */
10173 } while (num_out &&
10174 ((asoc->max_burst == 0) ||
10175 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10176 (burst_cnt < asoc->max_burst)));
10178 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10179 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10180 SCTP_STAT_INCR(sctps_maxburstqueued);
10181 asoc->burst_limit_applied = 1;
10182 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10183 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10186 asoc->burst_limit_applied = 0;
10189 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10190 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10192 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10196 * Now we need to clean up the control chunk chain if a ECNE is on
10197 * it. It must be marked as UNSENT again so next call will continue
10198 * to send it until such time that we get a CWR, to remove it.
10200 if (stcb->asoc.ecn_echo_cnt_onq)
10201 sctp_fix_ecn_echo(asoc);
10208 struct sctp_inpcb *inp,
10210 struct sockaddr *addr,
10211 struct mbuf *control,
10216 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10219 if (inp->sctp_socket == NULL) {
10220 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10223 return (sctp_sosend(inp->sctp_socket,
10225 (struct uio *)NULL,
10233 send_forward_tsn(struct sctp_tcb *stcb,
10234 struct sctp_association *asoc)
10236 struct sctp_tmit_chunk *chk;
10237 struct sctp_forward_tsn_chunk *fwdtsn;
10238 uint32_t advance_peer_ack_point;
10240 SCTP_TCB_LOCK_ASSERT(stcb);
10241 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10242 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10243 /* mark it to unsent */
10244 chk->sent = SCTP_DATAGRAM_UNSENT;
10245 chk->snd_count = 0;
10246 /* Do we correct its output location? */
10248 sctp_free_remote_addr(chk->whoTo);
10251 goto sctp_fill_in_rest;
10254 /* Ok if we reach here we must build one */
10255 sctp_alloc_a_chunk(stcb, chk);
10259 asoc->fwd_tsn_cnt++;
10260 chk->copy_by_ref = 0;
10261 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10262 chk->rec.chunk_id.can_take_data = 0;
10265 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10266 if (chk->data == NULL) {
10267 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10270 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10271 chk->sent = SCTP_DATAGRAM_UNSENT;
10272 chk->snd_count = 0;
10273 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10274 asoc->ctrl_queue_cnt++;
10277 * Here we go through and fill out the part that deals with
10278 * stream/seq of the ones we skip.
10280 SCTP_BUF_LEN(chk->data) = 0;
10282 struct sctp_tmit_chunk *at, *tp1, *last;
10283 struct sctp_strseq *strseq;
10284 unsigned int cnt_of_space, i, ovh;
10285 unsigned int space_needed;
10286 unsigned int cnt_of_skipped = 0;
10288 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10289 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10290 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10291 /* no more to look at */
10294 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10295 /* We don't report these */
10300 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10301 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10303 cnt_of_space = M_TRAILINGSPACE(chk->data);
10305 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10306 ovh = SCTP_MIN_OVERHEAD;
10308 ovh = SCTP_MIN_V4_OVERHEAD;
10310 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10311 /* trim to a mtu size */
10312 cnt_of_space = asoc->smallest_mtu - ovh;
10314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10315 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10316 0xff, 0, cnt_of_skipped,
10317 asoc->advanced_peer_ack_point);
10320 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10321 if (cnt_of_space < space_needed) {
10323 * ok we must trim down the chunk by lowering the
10324 * advance peer ack point.
10326 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10327 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10328 0xff, 0xff, cnt_of_space,
10331 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10332 cnt_of_skipped /= sizeof(struct sctp_strseq);
10334 * Go through and find the TSN that will be the one
10337 at = TAILQ_FIRST(&asoc->sent_queue);
10339 for (i = 0; i < cnt_of_skipped; i++) {
10340 tp1 = TAILQ_NEXT(at, sctp_next);
10347 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10348 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10349 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10350 asoc->advanced_peer_ack_point);
10354 * last now points to last one I can report, update
10358 advance_peer_ack_point = last->rec.data.TSN_seq;
10359 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10360 cnt_of_skipped * sizeof(struct sctp_strseq);
10362 chk->send_size = space_needed;
10363 /* Setup the chunk */
10364 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10365 fwdtsn->ch.chunk_length = htons(chk->send_size);
10366 fwdtsn->ch.chunk_flags = 0;
10367 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10368 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10369 SCTP_BUF_LEN(chk->data) = chk->send_size;
10372 * Move pointer to after the fwdtsn and transfer to the
10375 strseq = (struct sctp_strseq *)fwdtsn;
10377 * Now populate the strseq list. This is done blindly
10378 * without pulling out duplicate stream info. This is
10379 * inefficent but won't harm the process since the peer will
10380 * look at these in sequence and will thus release anything.
10381 * It could mean we exceed the PMTU and chop off some that
10382 * we could have included.. but this is unlikely (aka 1432/4
10383 * would mean 300+ stream seq's would have to be reported in
10384 * one FWD-TSN. With a bit of work we can later FIX this to
10385 * optimize and pull out duplcates.. but it does add more
10386 * overhead. So for now... not!
10388 at = TAILQ_FIRST(&asoc->sent_queue);
10389 for (i = 0; i < cnt_of_skipped; i++) {
10390 tp1 = TAILQ_NEXT(at, sctp_next);
10393 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10394 /* We don't report these */
10399 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10400 at->rec.data.fwd_tsn_cnt = 0;
10402 strseq->stream = ntohs(at->rec.data.stream_number);
10403 strseq->sequence = ntohs(at->rec.data.stream_seq);
10412 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10413 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10419 * Queue up a SACK or NR-SACK in the control queue.
10420 * We must first check to see if a SACK or NR-SACK is
10421 * somehow on the control queue.
10422 * If so, we will take and and remove the old one.
10424 struct sctp_association *asoc;
10425 struct sctp_tmit_chunk *chk, *a_chk;
10426 struct sctp_sack_chunk *sack;
10427 struct sctp_nr_sack_chunk *nr_sack;
10428 struct sctp_gap_ack_block *gap_descriptor;
10429 struct sack_track *selector;
10434 int limit_reached = 0;
10435 unsigned int i, siz, j;
10436 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10439 uint32_t highest_tsn;
10444 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10445 (stcb->asoc.peer_supports_nr_sack == 1)) {
10446 type = SCTP_NR_SELECTIVE_ACK;
10448 type = SCTP_SELECTIVE_ACK;
10451 asoc = &stcb->asoc;
10452 SCTP_TCB_LOCK_ASSERT(stcb);
10453 if (asoc->last_data_chunk_from == NULL) {
10454 /* Hmm we never received anything */
10457 sctp_slide_mapping_arrays(stcb);
10458 sctp_set_rwnd(stcb, asoc);
10459 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10460 if (chk->rec.chunk_id.id == type) {
10461 /* Hmm, found a sack already on queue, remove it */
10462 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10463 asoc->ctrl_queue_cnt--;
10466 sctp_m_freem(a_chk->data);
10467 a_chk->data = NULL;
10469 if (a_chk->whoTo) {
10470 sctp_free_remote_addr(a_chk->whoTo);
10471 a_chk->whoTo = NULL;
10476 if (a_chk == NULL) {
10477 sctp_alloc_a_chunk(stcb, a_chk);
10478 if (a_chk == NULL) {
10479 /* No memory so we drop the idea, and set a timer */
10480 if (stcb->asoc.delayed_ack) {
10481 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10482 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10483 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10484 stcb->sctp_ep, stcb, NULL);
10486 stcb->asoc.send_sack = 1;
10490 a_chk->copy_by_ref = 0;
10491 a_chk->rec.chunk_id.id = type;
10492 a_chk->rec.chunk_id.can_take_data = 1;
10494 /* Clear our pkt counts */
10495 asoc->data_pkts_seen = 0;
10497 a_chk->asoc = asoc;
10498 a_chk->snd_count = 0;
10499 a_chk->send_size = 0; /* fill in later */
10500 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10501 a_chk->whoTo = NULL;
10503 if ((asoc->numduptsns) ||
10504 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10506 * Ok, we have some duplicates or the destination for the
10507 * sack is unreachable, lets see if we can select an
10508 * alternate than asoc->last_data_chunk_from
10510 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10511 (asoc->used_alt_onsack > asoc->numnets)) {
10512 /* We used an alt last time, don't this time */
10513 a_chk->whoTo = NULL;
10515 asoc->used_alt_onsack++;
10516 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10518 if (a_chk->whoTo == NULL) {
10519 /* Nope, no alternate */
10520 a_chk->whoTo = asoc->last_data_chunk_from;
10521 asoc->used_alt_onsack = 0;
10525 * No duplicates so we use the last place we received data
10528 asoc->used_alt_onsack = 0;
10529 a_chk->whoTo = asoc->last_data_chunk_from;
10531 if (a_chk->whoTo) {
10532 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10534 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10535 highest_tsn = asoc->highest_tsn_inside_map;
10537 highest_tsn = asoc->highest_tsn_inside_nr_map;
10539 if (highest_tsn == asoc->cumulative_tsn) {
10541 if (type == SCTP_SELECTIVE_ACK) {
10542 space_req = sizeof(struct sctp_sack_chunk);
10544 space_req = sizeof(struct sctp_nr_sack_chunk);
10547 /* gaps get a cluster */
10548 space_req = MCLBYTES;
10550 /* Ok now lets formulate a MBUF with our sack */
10551 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10552 if ((a_chk->data == NULL) ||
10553 (a_chk->whoTo == NULL)) {
10554 /* rats, no mbuf memory */
10556 /* was a problem with the destination */
10557 sctp_m_freem(a_chk->data);
10558 a_chk->data = NULL;
10560 sctp_free_a_chunk(stcb, a_chk, so_locked);
10561 /* sa_ignore NO_NULL_CHK */
10562 if (stcb->asoc.delayed_ack) {
10563 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10564 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10565 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10566 stcb->sctp_ep, stcb, NULL);
10568 stcb->asoc.send_sack = 1;
10572 /* ok, lets go through and fill it in */
10573 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10574 space = M_TRAILINGSPACE(a_chk->data);
10575 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10576 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10578 limit = mtod(a_chk->data, caddr_t);
10583 if ((asoc->sctp_cmt_on_off > 0) &&
10584 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10586 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10587 * received, then set high bit to 1, else 0. Reset
10590 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10591 asoc->cmt_dac_pkts_rcvd = 0;
10593 #ifdef SCTP_ASOCLOG_OF_TSNS
10594 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10595 stcb->asoc.cumack_log_atsnt++;
10596 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10597 stcb->asoc.cumack_log_atsnt = 0;
10600 /* reset the readers interpretation */
10601 stcb->freed_by_sorcv_sincelast = 0;
10603 if (type == SCTP_SELECTIVE_ACK) {
10604 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10606 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10607 if (highest_tsn > asoc->mapping_array_base_tsn) {
10608 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10610 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10614 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10615 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10616 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10617 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10619 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10623 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10626 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10628 if (((type == SCTP_SELECTIVE_ACK) &&
10629 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10630 ((type == SCTP_NR_SELECTIVE_ACK) &&
10631 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10632 /* we have a gap .. maybe */
10633 for (i = 0; i < siz; i++) {
10634 tsn_map = asoc->mapping_array[i];
10635 if (type == SCTP_SELECTIVE_ACK) {
10636 tsn_map |= asoc->nr_mapping_array[i];
10640 * Clear all bits corresponding to TSNs
10641 * smaller or equal to the cumulative TSN.
10643 tsn_map &= (~0 << (1 - offset));
10645 selector = &sack_array[tsn_map];
10646 if (mergeable && selector->right_edge) {
10648 * Backup, left and right edges were ok to
10654 if (selector->num_entries == 0)
10657 for (j = 0; j < selector->num_entries; j++) {
10658 if (mergeable && selector->right_edge) {
10660 * do a merge by NOT setting
10666 * no merge, set the left
10670 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10672 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10675 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10681 if (selector->left_edge) {
10685 if (limit_reached) {
10686 /* Reached the limit stop */
10692 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10693 (limit_reached == 0)) {
10697 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10698 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10700 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10703 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10706 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10708 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10709 /* we have a gap .. maybe */
10710 for (i = 0; i < siz; i++) {
10711 tsn_map = asoc->nr_mapping_array[i];
10714 * Clear all bits corresponding to
10715 * TSNs smaller or equal to the
10718 tsn_map &= (~0 << (1 - offset));
10720 selector = &sack_array[tsn_map];
10721 if (mergeable && selector->right_edge) {
10723 * Backup, left and right edges were
10726 num_nr_gap_blocks--;
10729 if (selector->num_entries == 0)
10732 for (j = 0; j < selector->num_entries; j++) {
10733 if (mergeable && selector->right_edge) {
10735 * do a merge by NOT
10742 * no merge, set the
10746 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10748 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10749 num_nr_gap_blocks++;
10751 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10757 if (selector->left_edge) {
10761 if (limit_reached) {
10762 /* Reached the limit stop */
10769 /* now we must add any dups we are going to report. */
10770 if ((limit_reached == 0) && (asoc->numduptsns)) {
10771 dup = (uint32_t *) gap_descriptor;
10772 for (i = 0; i < asoc->numduptsns; i++) {
10773 *dup = htonl(asoc->dup_tsns[i]);
10776 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10781 asoc->numduptsns = 0;
10784 * now that the chunk is prepared queue it to the control chunk
10787 if (type == SCTP_SELECTIVE_ACK) {
10788 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10789 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10790 num_dups * sizeof(int32_t);
10791 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10792 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10793 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10794 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10795 sack->sack.num_dup_tsns = htons(num_dups);
10796 sack->ch.chunk_type = type;
10797 sack->ch.chunk_flags = flags;
10798 sack->ch.chunk_length = htons(a_chk->send_size);
10800 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10801 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10802 num_dups * sizeof(int32_t);
10803 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10804 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10805 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10806 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10807 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10808 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10809 nr_sack->nr_sack.reserved = 0;
10810 nr_sack->ch.chunk_type = type;
10811 nr_sack->ch.chunk_flags = flags;
10812 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10814 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10815 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10816 asoc->ctrl_queue_cnt++;
10817 asoc->send_sack = 0;
10818 SCTP_STAT_INCR(sctps_sendsacks);
10823 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10824 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10829 struct mbuf *m_abort, *m, *m_last;
10830 struct mbuf *m_out, *m_end = NULL;
10831 struct sctp_abort_chunk *abort;
10832 struct sctp_auth_chunk *auth = NULL;
10833 struct sctp_nets *net;
10835 uint32_t auth_offset = 0;
10836 uint16_t cause_len, chunk_len, padding_len;
10838 SCTP_TCB_LOCK_ASSERT(stcb);
10840 * Add an AUTH chunk, if chunk requires it and save the offset into
10841 * the chain for AUTH
10843 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10844 stcb->asoc.peer_auth_chunks)) {
10845 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10846 stcb, SCTP_ABORT_ASSOCIATION);
10847 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10851 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10852 if (m_abort == NULL) {
10854 sctp_m_freem(m_out);
10857 sctp_m_freem(operr);
10861 /* link in any error */
10862 SCTP_BUF_NEXT(m_abort) = operr;
10865 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10866 cause_len += (uint16_t) SCTP_BUF_LEN(m);
10867 if (SCTP_BUF_NEXT(m) == NULL) {
10871 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10872 chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
10873 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10874 if (m_out == NULL) {
10875 /* NO Auth chunk prepended, so reserve space in front */
10876 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10879 /* Put AUTH chunk at the front of the chain */
10880 SCTP_BUF_NEXT(m_end) = m_abort;
10882 if (stcb->asoc.alternate) {
10883 net = stcb->asoc.alternate;
10885 net = stcb->asoc.primary_destination;
10887 /* Fill in the ABORT chunk header. */
10888 abort = mtod(m_abort, struct sctp_abort_chunk *);
10889 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10890 if (stcb->asoc.peer_vtag == 0) {
10891 /* This happens iff the assoc is in COOKIE-WAIT state. */
10892 vtag = stcb->asoc.my_vtag;
10893 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10895 vtag = stcb->asoc.peer_vtag;
10896 abort->ch.chunk_flags = 0;
10898 abort->ch.chunk_length = htons(chunk_len);
10899 /* Add padding, if necessary. */
10900 if (padding_len > 0) {
10901 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
10902 sctp_m_freem(m_out);
10906 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10907 (struct sockaddr *)&net->ro._l_addr,
10908 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10909 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10910 stcb->asoc.primary_destination->port, NULL,
10913 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10917 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10918 struct sctp_nets *net,
10921 /* formulate and SEND a SHUTDOWN-COMPLETE */
10922 struct mbuf *m_shutdown_comp;
10923 struct sctp_shutdown_complete_chunk *shutdown_complete;
10927 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10928 if (m_shutdown_comp == NULL) {
10932 if (reflect_vtag) {
10933 flags = SCTP_HAD_NO_TCB;
10934 vtag = stcb->asoc.my_vtag;
10937 vtag = stcb->asoc.peer_vtag;
10939 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10940 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10941 shutdown_complete->ch.chunk_flags = flags;
10942 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10943 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10944 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10945 (struct sockaddr *)&net->ro._l_addr,
10946 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10947 stcb->sctp_ep->sctp_lport, stcb->rport,
10951 SCTP_SO_NOT_LOCKED);
10952 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10957 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
10958 struct sctphdr *sh, uint32_t vtag,
10959 uint8_t type, struct mbuf *cause,
10960 uint8_t use_mflowid, uint32_t mflowid,
10961 uint32_t vrf_id, uint16_t port)
10963 struct mbuf *o_pak;
10965 struct sctphdr *shout;
10966 struct sctp_chunkhdr *ch;
10967 struct udphdr *udp;
10968 int len, cause_len, padding_len;
10970 #if defined(INET) || defined(INET6)
10975 struct sockaddr_in *src_sin, *dst_sin;
10980 struct sockaddr_in6 *src_sin6, *dst_sin6;
10981 struct ip6_hdr *ip6;
10985 /* Compute the length of the cause and add final padding. */
10987 if (cause != NULL) {
10988 struct mbuf *m_at, *m_last = NULL;
10990 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
10991 if (SCTP_BUF_NEXT(m_at) == NULL)
10993 cause_len += SCTP_BUF_LEN(m_at);
10995 padding_len = cause_len % 4;
10996 if (padding_len != 0) {
10997 padding_len = 4 - padding_len;
10999 if (padding_len != 0) {
11000 if (sctp_add_pad_tombuf(m_last, padding_len)) {
11001 sctp_m_freem(cause);
11008 /* Get an mbuf for the header. */
11009 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11010 switch (dst->sa_family) {
11013 len += sizeof(struct ip);
11018 len += sizeof(struct ip6_hdr);
11025 len += sizeof(struct udphdr);
11027 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11028 if (mout == NULL) {
11030 sctp_m_freem(cause);
11034 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11035 SCTP_BUF_LEN(mout) = len;
11036 SCTP_BUF_NEXT(mout) = cause;
11037 if (use_mflowid != 0) {
11038 mout->m_pkthdr.flowid = mflowid;
11039 mout->m_flags |= M_FLOWID;
11047 switch (dst->sa_family) {
11050 src_sin = (struct sockaddr_in *)src;
11051 dst_sin = (struct sockaddr_in *)dst;
11052 ip = mtod(mout, struct ip *);
11053 ip->ip_v = IPVERSION;
11054 ip->ip_hl = (sizeof(struct ip) >> 2);
11056 ip->ip_id = ip_newid();
11058 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11060 ip->ip_p = IPPROTO_UDP;
11062 ip->ip_p = IPPROTO_SCTP;
11064 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11065 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11067 len = sizeof(struct ip);
11068 shout = (struct sctphdr *)((caddr_t)ip + len);
11073 src_sin6 = (struct sockaddr_in6 *)src;
11074 dst_sin6 = (struct sockaddr_in6 *)dst;
11075 ip6 = mtod(mout, struct ip6_hdr *);
11076 ip6->ip6_flow = htonl(0x60000000);
11077 if (V_ip6_auto_flowlabel) {
11078 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11080 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11082 ip6->ip6_nxt = IPPROTO_UDP;
11084 ip6->ip6_nxt = IPPROTO_SCTP;
11086 ip6->ip6_src = dst_sin6->sin6_addr;
11087 ip6->ip6_dst = src_sin6->sin6_addr;
11088 len = sizeof(struct ip6_hdr);
11089 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11094 shout = mtod(mout, struct sctphdr *);
11098 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11099 sctp_m_freem(mout);
11102 udp = (struct udphdr *)shout;
11103 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11104 udp->uh_dport = port;
11106 udp->uh_ulen = htons(sizeof(struct udphdr) +
11107 sizeof(struct sctphdr) +
11108 sizeof(struct sctp_chunkhdr) +
11109 cause_len + padding_len);
11110 len += sizeof(struct udphdr);
11111 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11115 shout->src_port = sh->dest_port;
11116 shout->dest_port = sh->src_port;
11117 shout->checksum = 0;
11119 shout->v_tag = htonl(vtag);
11121 shout->v_tag = sh->v_tag;
11123 len += sizeof(struct sctphdr);
11124 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11125 ch->chunk_type = type;
11127 ch->chunk_flags = 0;
11129 ch->chunk_flags = SCTP_HAD_NO_TCB;
11131 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11132 len += sizeof(struct sctp_chunkhdr);
11133 len += cause_len + padding_len;
11135 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11136 sctp_m_freem(mout);
11139 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11140 switch (dst->sa_family) {
11145 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11152 #if defined(SCTP_WITH_NO_CSUM)
11153 SCTP_STAT_INCR(sctps_sendnocrc);
11155 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11156 SCTP_STAT_INCR(sctps_sendswcrc);
11159 SCTP_ENABLE_UDP_CSUM(o_pak);
11162 #if defined(SCTP_WITH_NO_CSUM)
11163 SCTP_STAT_INCR(sctps_sendnocrc);
11165 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11166 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11167 SCTP_STAT_INCR(sctps_sendhwcrc);
11170 #ifdef SCTP_PACKET_LOGGING
11171 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11172 sctp_packet_log(o_pak);
11175 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11180 ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11182 #if defined(SCTP_WITH_NO_CSUM)
11183 SCTP_STAT_INCR(sctps_sendnocrc);
11185 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11186 SCTP_STAT_INCR(sctps_sendswcrc);
11188 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11189 udp->uh_sum = 0xffff;
11192 #if defined(SCTP_WITH_NO_CSUM)
11193 SCTP_STAT_INCR(sctps_sendnocrc);
11195 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11196 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11197 SCTP_STAT_INCR(sctps_sendhwcrc);
11200 #ifdef SCTP_PACKET_LOGGING
11201 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11202 sctp_packet_log(o_pak);
11205 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11209 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11211 sctp_m_freem(mout);
11212 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11215 SCTP_STAT_INCR(sctps_sendpackets);
11216 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11217 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11222 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11223 struct sctphdr *sh,
11224 uint8_t use_mflowid, uint32_t mflowid,
11225 uint32_t vrf_id, uint16_t port)
11227 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11228 use_mflowid, mflowid,
11233 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11234 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11239 struct sctp_tmit_chunk *chk;
11240 struct sctp_heartbeat_chunk *hb;
11241 struct timeval now;
11243 SCTP_TCB_LOCK_ASSERT(stcb);
11247 (void)SCTP_GETTIME_TIMEVAL(&now);
11248 switch (net->ro._l_addr.sa.sa_family) {
11260 sctp_alloc_a_chunk(stcb, chk);
11262 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11265 chk->copy_by_ref = 0;
11266 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11267 chk->rec.chunk_id.can_take_data = 1;
11268 chk->asoc = &stcb->asoc;
11269 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11271 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11272 if (chk->data == NULL) {
11273 sctp_free_a_chunk(stcb, chk, so_locked);
11276 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11277 SCTP_BUF_LEN(chk->data) = chk->send_size;
11278 chk->sent = SCTP_DATAGRAM_UNSENT;
11279 chk->snd_count = 0;
11281 atomic_add_int(&chk->whoTo->ref_count, 1);
11282 /* Now we have a mbuf that we can fill in with the details */
11283 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11284 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11285 /* fill out chunk header */
11286 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11287 hb->ch.chunk_flags = 0;
11288 hb->ch.chunk_length = htons(chk->send_size);
11289 /* Fill out hb parameter */
11290 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11291 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11292 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11293 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11294 /* Did our user request this one, put it in */
11295 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11296 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11297 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11299 * we only take from the entropy pool if the address is not
11302 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11303 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11305 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11306 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11308 switch (net->ro._l_addr.sa.sa_family) {
11311 memcpy(hb->heartbeat.hb_info.address,
11312 &net->ro._l_addr.sin.sin_addr,
11313 sizeof(net->ro._l_addr.sin.sin_addr));
11318 memcpy(hb->heartbeat.hb_info.address,
11319 &net->ro._l_addr.sin6.sin6_addr,
11320 sizeof(net->ro._l_addr.sin6.sin6_addr));
11327 net->hb_responded = 0;
11328 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11329 stcb->asoc.ctrl_queue_cnt++;
11330 SCTP_STAT_INCR(sctps_sendheartbeat);
11335 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11338 struct sctp_association *asoc;
11339 struct sctp_ecne_chunk *ecne;
11340 struct sctp_tmit_chunk *chk;
11345 asoc = &stcb->asoc;
11346 SCTP_TCB_LOCK_ASSERT(stcb);
11347 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11348 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11349 /* found a previous ECN_ECHO update it if needed */
11350 uint32_t cnt, ctsn;
11352 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11353 ctsn = ntohl(ecne->tsn);
11354 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11355 ecne->tsn = htonl(high_tsn);
11356 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11358 cnt = ntohl(ecne->num_pkts_since_cwr);
11360 ecne->num_pkts_since_cwr = htonl(cnt);
11364 /* nope could not find one to update so we must build one */
11365 sctp_alloc_a_chunk(stcb, chk);
11369 chk->copy_by_ref = 0;
11370 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11371 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11372 chk->rec.chunk_id.can_take_data = 0;
11373 chk->asoc = &stcb->asoc;
11374 chk->send_size = sizeof(struct sctp_ecne_chunk);
11375 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11376 if (chk->data == NULL) {
11377 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11380 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11381 SCTP_BUF_LEN(chk->data) = chk->send_size;
11382 chk->sent = SCTP_DATAGRAM_UNSENT;
11383 chk->snd_count = 0;
11385 atomic_add_int(&chk->whoTo->ref_count, 1);
11387 stcb->asoc.ecn_echo_cnt_onq++;
11388 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11389 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11390 ecne->ch.chunk_flags = 0;
11391 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11392 ecne->tsn = htonl(high_tsn);
11393 ecne->num_pkts_since_cwr = htonl(1);
11394 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11395 asoc->ctrl_queue_cnt++;
11399 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11400 struct mbuf *m, int len, int iphlen, int bad_crc)
11402 struct sctp_association *asoc;
11403 struct sctp_pktdrop_chunk *drp;
11404 struct sctp_tmit_chunk *chk;
11410 struct sctp_chunkhdr *ch, chunk_buf;
11411 unsigned int chk_length;
11416 asoc = &stcb->asoc;
11417 SCTP_TCB_LOCK_ASSERT(stcb);
11418 if (asoc->peer_supports_pktdrop == 0) {
11420 * peer must declare support before I send one.
11424 if (stcb->sctp_socket == NULL) {
11427 sctp_alloc_a_chunk(stcb, chk);
11431 chk->copy_by_ref = 0;
11433 chk->send_size = len;
11434 /* Validate that we do not have an ABORT in here. */
11435 offset = iphlen + sizeof(struct sctphdr);
11436 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11437 sizeof(*ch), (uint8_t *) & chunk_buf);
11438 while (ch != NULL) {
11439 chk_length = ntohs(ch->chunk_length);
11440 if (chk_length < sizeof(*ch)) {
11441 /* break to abort land */
11444 switch (ch->chunk_type) {
11445 case SCTP_PACKET_DROPPED:
11446 case SCTP_ABORT_ASSOCIATION:
11447 case SCTP_INITIATION_ACK:
11449 * We don't respond with an PKT-DROP to an ABORT
11450 * or PKT-DROP. We also do not respond to an
11451 * INIT-ACK, because we can't know if the initiation
11452 * tag is correct or not.
11454 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11459 offset += SCTP_SIZE32(chk_length);
11460 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11461 sizeof(*ch), (uint8_t *) & chunk_buf);
11464 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11465 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11467 * only send 1 mtu worth, trim off the excess on the end.
11470 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11473 chk->asoc = &stcb->asoc;
11474 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11475 if (chk->data == NULL) {
11477 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11480 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11481 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11483 sctp_m_freem(chk->data);
11487 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11488 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11489 chk->book_size_scale = 0;
11491 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11492 drp->trunc_len = htons(fullsz);
11494 * Len is already adjusted to size minus overhead above take
11495 * out the pkt_drop chunk itself from it.
11497 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11498 len = chk->send_size;
11500 /* no truncation needed */
11501 drp->ch.chunk_flags = 0;
11502 drp->trunc_len = htons(0);
11505 drp->ch.chunk_flags |= SCTP_BADCRC;
11507 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11508 SCTP_BUF_LEN(chk->data) = chk->send_size;
11509 chk->sent = SCTP_DATAGRAM_UNSENT;
11510 chk->snd_count = 0;
11512 /* we should hit here */
11514 atomic_add_int(&chk->whoTo->ref_count, 1);
11518 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11519 chk->rec.chunk_id.can_take_data = 1;
11520 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11521 drp->ch.chunk_length = htons(chk->send_size);
11522 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11526 drp->bottle_bw = htonl(spc);
11527 if (asoc->my_rwnd) {
11528 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11529 asoc->size_on_all_streams +
11530 asoc->my_rwnd_control_len +
11531 stcb->sctp_socket->so_rcv.sb_cc);
11534 * If my rwnd is 0, possibly from mbuf depletion as well as
11535 * space used, tell the peer there is NO space aka onq == bw
11537 drp->current_onq = htonl(spc);
11541 m_copydata(m, iphlen, len, (caddr_t)datap);
11542 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11543 asoc->ctrl_queue_cnt++;
11547 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11549 struct sctp_association *asoc;
11550 struct sctp_cwr_chunk *cwr;
11551 struct sctp_tmit_chunk *chk;
11553 SCTP_TCB_LOCK_ASSERT(stcb);
11557 asoc = &stcb->asoc;
11558 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11559 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11561 * found a previous CWR queued to same destination
11562 * update it if needed
11566 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11567 ctsn = ntohl(cwr->tsn);
11568 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11569 cwr->tsn = htonl(high_tsn);
11571 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11572 /* Make sure override is carried */
11573 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11578 sctp_alloc_a_chunk(stcb, chk);
11582 chk->copy_by_ref = 0;
11583 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11584 chk->rec.chunk_id.can_take_data = 1;
11585 chk->asoc = &stcb->asoc;
11586 chk->send_size = sizeof(struct sctp_cwr_chunk);
11587 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11588 if (chk->data == NULL) {
11589 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11592 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11593 SCTP_BUF_LEN(chk->data) = chk->send_size;
11594 chk->sent = SCTP_DATAGRAM_UNSENT;
11595 chk->snd_count = 0;
11597 atomic_add_int(&chk->whoTo->ref_count, 1);
11598 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11599 cwr->ch.chunk_type = SCTP_ECN_CWR;
11600 cwr->ch.chunk_flags = override;
11601 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11602 cwr->tsn = htonl(high_tsn);
11603 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11604 asoc->ctrl_queue_cnt++;
11608 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11609 int number_entries, uint16_t * list,
11610 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11612 uint16_t len, old_len, i;
11613 struct sctp_stream_reset_out_request *req_out;
11614 struct sctp_chunkhdr *ch;
11616 ch = mtod(chk->data, struct sctp_chunkhdr *);
11617 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11619 /* get to new offset for the param. */
11620 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11621 /* now how long will this param be? */
11622 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11623 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11624 req_out->ph.param_length = htons(len);
11625 req_out->request_seq = htonl(seq);
11626 req_out->response_seq = htonl(resp_seq);
11627 req_out->send_reset_at_tsn = htonl(last_sent);
11628 if (number_entries) {
11629 for (i = 0; i < number_entries; i++) {
11630 req_out->list_of_streams[i] = htons(list[i]);
11633 if (SCTP_SIZE32(len) > len) {
11635 * Need to worry about the pad we may end up adding to the
11636 * end. This is easy since the struct is either aligned to 4
11637 * bytes or 2 bytes off.
11639 req_out->list_of_streams[number_entries] = 0;
11641 /* now fix the chunk length */
11642 ch->chunk_length = htons(len + old_len);
11643 chk->book_size = len + old_len;
11644 chk->book_size_scale = 0;
11645 chk->send_size = SCTP_SIZE32(chk->book_size);
11646 SCTP_BUF_LEN(chk->data) = chk->send_size;
11651 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11652 int number_entries, uint16_t * list,
11655 uint16_t len, old_len, i;
11656 struct sctp_stream_reset_in_request *req_in;
11657 struct sctp_chunkhdr *ch;
11659 ch = mtod(chk->data, struct sctp_chunkhdr *);
11660 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11662 /* get to new offset for the param. */
11663 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11664 /* now how long will this param be? */
11665 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11666 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11667 req_in->ph.param_length = htons(len);
11668 req_in->request_seq = htonl(seq);
11669 if (number_entries) {
11670 for (i = 0; i < number_entries; i++) {
11671 req_in->list_of_streams[i] = htons(list[i]);
11674 if (SCTP_SIZE32(len) > len) {
11676 * Need to worry about the pad we may end up adding to the
11677 * end. This is easy since the struct is either aligned to 4
11678 * bytes or 2 bytes off.
11680 req_in->list_of_streams[number_entries] = 0;
11682 /* now fix the chunk length */
11683 ch->chunk_length = htons(len + old_len);
11684 chk->book_size = len + old_len;
11685 chk->book_size_scale = 0;
11686 chk->send_size = SCTP_SIZE32(chk->book_size);
11687 SCTP_BUF_LEN(chk->data) = chk->send_size;
11692 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11695 uint16_t len, old_len;
11696 struct sctp_stream_reset_tsn_request *req_tsn;
11697 struct sctp_chunkhdr *ch;
11699 ch = mtod(chk->data, struct sctp_chunkhdr *);
11700 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11702 /* get to new offset for the param. */
11703 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11704 /* now how long will this param be? */
11705 len = sizeof(struct sctp_stream_reset_tsn_request);
11706 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11707 req_tsn->ph.param_length = htons(len);
11708 req_tsn->request_seq = htonl(seq);
11710 /* now fix the chunk length */
11711 ch->chunk_length = htons(len + old_len);
11712 chk->send_size = len + old_len;
11713 chk->book_size = SCTP_SIZE32(chk->send_size);
11714 chk->book_size_scale = 0;
11715 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11720 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11721 uint32_t resp_seq, uint32_t result)
11723 uint16_t len, old_len;
11724 struct sctp_stream_reset_response *resp;
11725 struct sctp_chunkhdr *ch;
11727 ch = mtod(chk->data, struct sctp_chunkhdr *);
11728 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11730 /* get to new offset for the param. */
11731 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11732 /* now how long will this param be? */
11733 len = sizeof(struct sctp_stream_reset_response);
11734 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11735 resp->ph.param_length = htons(len);
11736 resp->response_seq = htonl(resp_seq);
11737 resp->result = ntohl(result);
11739 /* now fix the chunk length */
11740 ch->chunk_length = htons(len + old_len);
11741 chk->book_size = len + old_len;
11742 chk->book_size_scale = 0;
11743 chk->send_size = SCTP_SIZE32(chk->book_size);
11744 SCTP_BUF_LEN(chk->data) = chk->send_size;
11749 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11750 uint32_t resp_seq, uint32_t result,
11751 uint32_t send_una, uint32_t recv_next)
11753 uint16_t len, old_len;
11754 struct sctp_stream_reset_response_tsn *resp;
11755 struct sctp_chunkhdr *ch;
11757 ch = mtod(chk->data, struct sctp_chunkhdr *);
11758 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11760 /* get to new offset for the param. */
11761 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11762 /* now how long will this param be? */
11763 len = sizeof(struct sctp_stream_reset_response_tsn);
11764 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11765 resp->ph.param_length = htons(len);
11766 resp->response_seq = htonl(resp_seq);
11767 resp->result = htonl(result);
11768 resp->senders_next_tsn = htonl(send_una);
11769 resp->receivers_next_tsn = htonl(recv_next);
11771 /* now fix the chunk length */
11772 ch->chunk_length = htons(len + old_len);
11773 chk->book_size = len + old_len;
11774 chk->send_size = SCTP_SIZE32(chk->book_size);
11775 chk->book_size_scale = 0;
11776 SCTP_BUF_LEN(chk->data) = chk->send_size;
11781 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11785 uint16_t len, old_len;
11786 struct sctp_chunkhdr *ch;
11787 struct sctp_stream_reset_add_strm *addstr;
11789 ch = mtod(chk->data, struct sctp_chunkhdr *);
11790 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11792 /* get to new offset for the param. */
11793 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11794 /* now how long will this param be? */
11795 len = sizeof(struct sctp_stream_reset_add_strm);
11798 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11799 addstr->ph.param_length = htons(len);
11800 addstr->request_seq = htonl(seq);
11801 addstr->number_of_streams = htons(adding);
11802 addstr->reserved = 0;
11804 /* now fix the chunk length */
11805 ch->chunk_length = htons(len + old_len);
11806 chk->send_size = len + old_len;
11807 chk->book_size = SCTP_SIZE32(chk->send_size);
11808 chk->book_size_scale = 0;
11809 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11814 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11818 uint16_t len, old_len;
11819 struct sctp_chunkhdr *ch;
11820 struct sctp_stream_reset_add_strm *addstr;
11822 ch = mtod(chk->data, struct sctp_chunkhdr *);
11823 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11825 /* get to new offset for the param. */
11826 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11827 /* now how long will this param be? */
11828 len = sizeof(struct sctp_stream_reset_add_strm);
11830 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11831 addstr->ph.param_length = htons(len);
11832 addstr->request_seq = htonl(seq);
11833 addstr->number_of_streams = htons(adding);
11834 addstr->reserved = 0;
11836 /* now fix the chunk length */
11837 ch->chunk_length = htons(len + old_len);
11838 chk->send_size = len + old_len;
11839 chk->book_size = SCTP_SIZE32(chk->send_size);
11840 chk->book_size_scale = 0;
11841 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11846 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11847 int number_entries, uint16_t * list,
11848 uint8_t send_out_req,
11849 uint8_t send_in_req,
11850 uint8_t send_tsn_req,
11851 uint8_t add_stream,
11853 uint16_t adding_i, uint8_t peer_asked)
11856 struct sctp_association *asoc;
11857 struct sctp_tmit_chunk *chk;
11858 struct sctp_chunkhdr *ch;
11861 asoc = &stcb->asoc;
11862 if (asoc->stream_reset_outstanding) {
11864 * Already one pending, must get ACK back to clear the flag.
11866 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11869 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11870 (add_stream == 0)) {
11871 /* nothing to do */
11872 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11875 if (send_tsn_req && (send_out_req || send_in_req)) {
11876 /* error, can't do that */
11877 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11880 sctp_alloc_a_chunk(stcb, chk);
11882 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11885 chk->copy_by_ref = 0;
11886 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11887 chk->rec.chunk_id.can_take_data = 0;
11888 chk->asoc = &stcb->asoc;
11889 chk->book_size = sizeof(struct sctp_chunkhdr);
11890 chk->send_size = SCTP_SIZE32(chk->book_size);
11891 chk->book_size_scale = 0;
11893 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11894 if (chk->data == NULL) {
11895 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11896 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11899 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11901 /* setup chunk parameters */
11902 chk->sent = SCTP_DATAGRAM_UNSENT;
11903 chk->snd_count = 0;
11904 if (stcb->asoc.alternate) {
11905 chk->whoTo = stcb->asoc.alternate;
11907 chk->whoTo = stcb->asoc.primary_destination;
11909 atomic_add_int(&chk->whoTo->ref_count, 1);
11910 ch = mtod(chk->data, struct sctp_chunkhdr *);
11911 ch->chunk_type = SCTP_STREAM_RESET;
11912 ch->chunk_flags = 0;
11913 ch->chunk_length = htons(chk->book_size);
11914 SCTP_BUF_LEN(chk->data) = chk->send_size;
11916 seq = stcb->asoc.str_reset_seq_out;
11917 if (send_out_req) {
11918 sctp_add_stream_reset_out(chk, number_entries, list,
11919 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
11920 asoc->stream_reset_out_is_outstanding = 1;
11922 asoc->stream_reset_outstanding++;
11924 if ((add_stream & 1) &&
11925 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
11926 /* Need to allocate more */
11927 struct sctp_stream_out *oldstream;
11928 struct sctp_stream_queue_pending *sp, *nsp;
11931 oldstream = stcb->asoc.strmout;
11932 /* get some more */
11933 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
11934 ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
11936 if (stcb->asoc.strmout == NULL) {
11939 stcb->asoc.strmout = oldstream;
11940 /* Turn off the bit */
11941 x = add_stream & 0xfe;
11946 * Ok now we proceed with copying the old out stuff and
11947 * initializing the new stuff.
11949 SCTP_TCB_SEND_LOCK(stcb);
11950 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
11951 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11952 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11953 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
11954 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
11955 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
11956 stcb->asoc.strmout[i].stream_no = i;
11957 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
11958 /* now anything on those queues? */
11959 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
11960 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
11961 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
11963 /* Now move assoc pointers too */
11964 if (stcb->asoc.last_out_stream == &oldstream[i]) {
11965 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
11967 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
11968 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
11971 /* now the new streams */
11972 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
11973 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
11974 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11975 stcb->asoc.strmout[i].chunks_on_queues = 0;
11976 stcb->asoc.strmout[i].next_sequence_send = 0x0;
11977 stcb->asoc.strmout[i].stream_no = i;
11978 stcb->asoc.strmout[i].last_msg_incomplete = 0;
11979 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
11981 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
11982 SCTP_FREE(oldstream, SCTP_M_STRMO);
11983 SCTP_TCB_SEND_UNLOCK(stcb);
11986 if ((add_stream & 1) && (adding_o > 0)) {
11987 asoc->strm_pending_add_size = adding_o;
11988 asoc->peer_req_out = peer_asked;
11989 sctp_add_an_out_stream(chk, seq, adding_o);
11991 asoc->stream_reset_outstanding++;
11993 if ((add_stream & 2) && (adding_i > 0)) {
11994 sctp_add_an_in_stream(chk, seq, adding_i);
11996 asoc->stream_reset_outstanding++;
11999 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12001 asoc->stream_reset_outstanding++;
12003 if (send_tsn_req) {
12004 sctp_add_stream_reset_tsn(chk, seq);
12005 asoc->stream_reset_outstanding++;
12007 asoc->str_reset = chk;
12008 /* insert the chunk for sending */
12009 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12012 asoc->ctrl_queue_cnt++;
12013 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12018 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12019 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12020 uint8_t use_mflowid, uint32_t mflowid,
12021 uint32_t vrf_id, uint16_t port)
12023 /* Don't respond to an ABORT with an ABORT. */
12024 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12026 sctp_m_freem(cause);
12029 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12030 use_mflowid, mflowid,
12036 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12037 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12038 uint8_t use_mflowid, uint32_t mflowid,
12039 uint32_t vrf_id, uint16_t port)
12041 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12042 use_mflowid, mflowid,
12047 static struct mbuf *
12048 sctp_copy_resume(struct uio *uio,
12050 int user_marks_eor,
12053 struct mbuf **new_tail)
12057 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12058 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12060 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12063 *sndout = m_length(m, NULL);
12064 *new_tail = m_last(m);
12070 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12077 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12079 if (sp->data == NULL) {
12080 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12083 sp->tail_mbuf = m_last(sp->data);
12089 static struct sctp_stream_queue_pending *
12090 sctp_copy_it_in(struct sctp_tcb *stcb,
12091 struct sctp_association *asoc,
12092 struct sctp_sndrcvinfo *srcv,
12094 struct sctp_nets *net,
12096 int user_marks_eor,
12100 * This routine must be very careful in its work. Protocol
12101 * processing is up and running so care must be taken to spl...()
12102 * when you need to do something that may effect the stcb/asoc. The
12103 * sb is locked however. When data is copied the protocol processing
12104 * should be enabled since this is a slower operation...
12106 struct sctp_stream_queue_pending *sp = NULL;
12110 /* Now can we send this? */
12111 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12112 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12113 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12114 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12115 /* got data while shutting down */
12116 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12117 *error = ECONNRESET;
12120 sctp_alloc_a_strmoq(stcb, sp);
12122 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12127 sp->sender_all_done = 0;
12128 sp->sinfo_flags = srcv->sinfo_flags;
12129 sp->timetolive = srcv->sinfo_timetolive;
12130 sp->ppid = srcv->sinfo_ppid;
12131 sp->context = srcv->sinfo_context;
12132 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12134 sp->stream = srcv->sinfo_stream;
12135 sp->length = min(uio->uio_resid, max_send_len);
12136 if ((sp->length == (uint32_t) uio->uio_resid) &&
12137 ((user_marks_eor == 0) ||
12138 (srcv->sinfo_flags & SCTP_EOF) ||
12139 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12140 sp->msg_is_complete = 1;
12142 sp->msg_is_complete = 0;
12144 sp->sender_all_done = 0;
12145 sp->some_taken = 0;
12146 sp->put_last_out = 0;
12147 resv_in_first = sizeof(struct sctp_data_chunk);
12148 sp->data = sp->tail_mbuf = NULL;
12149 if (sp->length == 0) {
12153 if (srcv->sinfo_keynumber_valid) {
12154 sp->auth_keyid = srcv->sinfo_keynumber;
12156 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12158 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12159 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12160 sp->holds_key_ref = 1;
12162 *error = sctp_copy_one(sp, uio, resv_in_first);
12165 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12168 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12170 atomic_add_int(&sp->net->ref_count, 1);
12174 sctp_set_prsctp_policy(sp);
12182 sctp_sosend(struct socket *so,
12183 struct sockaddr *addr,
12186 struct mbuf *control,
12191 int error, use_sndinfo = 0;
12192 struct sctp_sndrcvinfo sndrcvninfo;
12193 struct sockaddr *addr_to_use;
12195 #if defined(INET) && defined(INET6)
12196 struct sockaddr_in sin;
12201 /* process cmsg snd/rcv info (maybe a assoc-id) */
12202 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12203 sizeof(sndrcvninfo))) {
12208 addr_to_use = addr;
12209 #if defined(INET) && defined(INET6)
12210 if ((addr) && (addr->sa_family == AF_INET6)) {
12211 struct sockaddr_in6 *sin6;
12213 sin6 = (struct sockaddr_in6 *)addr;
12214 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12215 in6_sin6_2_sin(&sin, sin6);
12216 addr_to_use = (struct sockaddr *)&sin;
12220 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12223 use_sndinfo ? &sndrcvninfo : NULL
12231 sctp_lower_sosend(struct socket *so,
12232 struct sockaddr *addr,
12234 struct mbuf *i_pak,
12235 struct mbuf *control,
12237 struct sctp_sndrcvinfo *srcv
12242 unsigned int sndlen = 0, max_len;
12244 struct mbuf *top = NULL;
12245 int queue_only = 0, queue_only_for_init = 0;
12246 int free_cnt_applied = 0;
12248 int now_filled = 0;
12249 unsigned int inqueue_bytes = 0;
12250 struct sctp_block_entry be;
12251 struct sctp_inpcb *inp;
12252 struct sctp_tcb *stcb = NULL;
12253 struct timeval now;
12254 struct sctp_nets *net;
12255 struct sctp_association *asoc;
12256 struct sctp_inpcb *t_inp;
12257 int user_marks_eor;
12258 int create_lock_applied = 0;
12259 int nagle_applies = 0;
12260 int some_on_control = 0;
12261 int got_all_of_the_send = 0;
12262 int hold_tcblock = 0;
12263 int non_blocking = 0;
12264 uint32_t local_add_more, local_soresv = 0;
12266 uint16_t sinfo_flags;
12267 sctp_assoc_t sinfo_assoc_id;
12274 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12276 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12279 SCTP_RELEASE_PKT(i_pak);
12283 if ((uio == NULL) && (i_pak == NULL)) {
12284 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12287 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12288 atomic_add_int(&inp->total_sends, 1);
12290 if (uio->uio_resid < 0) {
12291 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12294 sndlen = uio->uio_resid;
12296 top = SCTP_HEADER_TO_CHAIN(i_pak);
12297 sndlen = SCTP_HEADER_LEN(i_pak);
12299 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12302 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12303 (inp->sctp_socket->so_qlimit)) {
12304 /* The listener can NOT send */
12305 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12310 * Pre-screen address, if one is given the sin-len
12311 * must be set correctly!
12314 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12316 switch (raddr->sa.sa_family) {
12319 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12320 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12324 port = raddr->sin.sin_port;
12329 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12330 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12334 port = raddr->sin6.sin6_port;
12338 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12339 error = EAFNOSUPPORT;
12346 sinfo_flags = srcv->sinfo_flags;
12347 sinfo_assoc_id = srcv->sinfo_assoc_id;
12348 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12349 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12350 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12354 if (srcv->sinfo_flags)
12355 SCTP_STAT_INCR(sctps_sends_with_flags);
12357 sinfo_flags = inp->def_send.sinfo_flags;
12358 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12360 if (sinfo_flags & SCTP_SENDALL) {
12361 /* its a sendall */
12362 error = sctp_sendall(inp, uio, top, srcv);
12366 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12367 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12371 /* now we must find the assoc */
12372 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12373 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12374 SCTP_INP_RLOCK(inp);
12375 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12377 SCTP_TCB_LOCK(stcb);
12380 SCTP_INP_RUNLOCK(inp);
12381 } else if (sinfo_assoc_id) {
12382 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12385 * Since we did not use findep we must
12386 * increment it, and if we don't find a tcb
12389 SCTP_INP_WLOCK(inp);
12390 SCTP_INP_INCR_REF(inp);
12391 SCTP_INP_WUNLOCK(inp);
12392 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12393 if (stcb == NULL) {
12394 SCTP_INP_WLOCK(inp);
12395 SCTP_INP_DECR_REF(inp);
12396 SCTP_INP_WUNLOCK(inp);
12401 if ((stcb == NULL) && (addr)) {
12402 /* Possible implicit send? */
12403 SCTP_ASOC_CREATE_LOCK(inp);
12404 create_lock_applied = 1;
12405 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12406 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12407 /* Should I really unlock ? */
12408 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12413 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12414 (addr->sa_family == AF_INET6)) {
12415 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12419 SCTP_INP_WLOCK(inp);
12420 SCTP_INP_INCR_REF(inp);
12421 SCTP_INP_WUNLOCK(inp);
12422 /* With the lock applied look again */
12423 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12424 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12425 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12427 if (stcb == NULL) {
12428 SCTP_INP_WLOCK(inp);
12429 SCTP_INP_DECR_REF(inp);
12430 SCTP_INP_WUNLOCK(inp);
12437 if (t_inp != inp) {
12438 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12443 if (stcb == NULL) {
12444 if (addr == NULL) {
12445 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12449 /* We must go ahead and start the INIT process */
12452 if ((sinfo_flags & SCTP_ABORT) ||
12453 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12455 * User asks to abort a non-existant assoc,
12456 * or EOF a non-existant assoc with no data
12458 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12462 /* get an asoc/stcb struct */
12463 vrf_id = inp->def_vrf_id;
12465 if (create_lock_applied == 0) {
12466 panic("Error, should hold create lock and I don't?");
12469 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12472 if (stcb == NULL) {
12473 /* Error is setup for us in the call */
12476 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12477 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12479 * Set the connected flag so we can queue
12482 soisconnecting(so);
12485 if (create_lock_applied) {
12486 SCTP_ASOC_CREATE_UNLOCK(inp);
12487 create_lock_applied = 0;
12489 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12492 * Turn on queue only flag to prevent data from
12496 asoc = &stcb->asoc;
12497 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12498 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12500 /* initialize authentication params for the assoc */
12501 sctp_initialize_auth_params(inp, stcb);
12504 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12505 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12511 /* out with the INIT */
12512 queue_only_for_init = 1;
12514 * we may want to dig in after this call and adjust the MTU
12515 * value. It defaulted to 1500 (constant) but the ro
12516 * structure may now have an update and thus we may need to
12517 * change it BEFORE we append the message.
12521 asoc = &stcb->asoc;
12523 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12524 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12526 net = sctp_findnet(stcb, addr);
12529 if ((net == NULL) ||
12530 ((port != 0) && (port != stcb->rport))) {
12531 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12536 if (stcb->asoc.alternate) {
12537 net = stcb->asoc.alternate;
12539 net = stcb->asoc.primary_destination;
12542 atomic_add_int(&stcb->total_sends, 1);
12543 /* Keep the stcb from being freed under our feet */
12544 atomic_add_int(&asoc->refcnt, 1);
12545 free_cnt_applied = 1;
12547 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12548 if (sndlen > asoc->smallest_mtu) {
12549 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12554 if (SCTP_SO_IS_NBIO(so)
12555 || (flags & MSG_NBIO)
12559 /* would we block? */
12560 if (non_blocking) {
12561 if (hold_tcblock == 0) {
12562 SCTP_TCB_LOCK(stcb);
12565 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12566 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12567 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12568 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12569 if (sndlen > SCTP_SB_LIMIT_SND(so))
12572 error = EWOULDBLOCK;
12575 stcb->asoc.sb_send_resv += sndlen;
12576 SCTP_TCB_UNLOCK(stcb);
12579 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12581 local_soresv = sndlen;
12582 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12583 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12584 error = ECONNRESET;
12587 if (create_lock_applied) {
12588 SCTP_ASOC_CREATE_UNLOCK(inp);
12589 create_lock_applied = 0;
12591 if (asoc->stream_reset_outstanding) {
12593 * Can't queue any data while stream reset is underway.
12595 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12599 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12600 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12603 /* we are now done with all control */
12605 sctp_m_freem(control);
12608 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12609 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12610 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12611 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12612 if (srcv->sinfo_flags & SCTP_ABORT) {
12615 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12616 error = ECONNRESET;
12620 /* Ok, we will attempt a msgsnd :> */
12622 p->td_ru.ru_msgsnd++;
12624 /* Are we aborting? */
12625 if (srcv->sinfo_flags & SCTP_ABORT) {
12627 int tot_demand, tot_out = 0, max_out;
12629 SCTP_STAT_INCR(sctps_sends_with_abort);
12630 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12631 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12632 /* It has to be up before we abort */
12633 /* how big is the user initiated abort? */
12634 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12638 if (hold_tcblock) {
12639 SCTP_TCB_UNLOCK(stcb);
12643 struct mbuf *cntm = NULL;
12645 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAIT, 1, MT_DATA);
12647 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12648 tot_out += SCTP_BUF_LEN(cntm);
12652 /* Must fit in a MTU */
12654 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12655 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12657 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12661 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12664 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12668 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12669 max_out -= sizeof(struct sctp_abort_msg);
12670 if (tot_out > max_out) {
12674 struct sctp_paramhdr *ph;
12676 /* now move forward the data pointer */
12677 ph = mtod(mm, struct sctp_paramhdr *);
12678 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12679 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
12681 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12683 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12686 * Here if we can't get his data we
12687 * still abort we just don't get to
12688 * send the users note :-0
12695 SCTP_BUF_NEXT(mm) = top;
12699 if (hold_tcblock == 0) {
12700 SCTP_TCB_LOCK(stcb);
12702 atomic_add_int(&stcb->asoc.refcnt, -1);
12703 free_cnt_applied = 0;
12704 /* release this lock, otherwise we hang on ourselves */
12705 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12706 /* now relock the stcb so everything is sane */
12710 * In this case top is already chained to mm avoid double
12711 * free, since we free it below if top != NULL and driver
12712 * would free it after sending the packet out
12719 /* Calculate the maximum we can send */
12720 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12721 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12722 if (non_blocking) {
12723 /* we already checked for non-blocking above. */
12726 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12731 if (hold_tcblock) {
12732 SCTP_TCB_UNLOCK(stcb);
12735 /* Is the stream no. valid? */
12736 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12737 /* Invalid stream number */
12738 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12742 if (asoc->strmout == NULL) {
12743 /* huh? software error */
12744 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12748 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12749 if ((user_marks_eor == 0) &&
12750 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12751 /* It will NEVER fit */
12752 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12756 if ((uio == NULL) && user_marks_eor) {
12758 * We do not support eeor mode for
12759 * sending with mbuf chains (like sendfile).
12761 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12765 if (user_marks_eor) {
12766 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12769 * For non-eeor the whole message must fit in
12770 * the socket send buffer.
12772 local_add_more = sndlen;
12775 if (non_blocking) {
12776 goto skip_preblock;
12778 if (((max_len <= local_add_more) &&
12779 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12781 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12782 /* No room right now ! */
12783 SOCKBUF_LOCK(&so->so_snd);
12784 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12785 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12786 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12787 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12788 (unsigned int)SCTP_SB_LIMIT_SND(so),
12791 stcb->asoc.stream_queue_cnt,
12792 stcb->asoc.chunks_on_out_queue,
12793 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12795 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
12798 stcb->block_entry = &be;
12799 error = sbwait(&so->so_snd);
12800 stcb->block_entry = NULL;
12801 if (error || so->so_error || be.error) {
12804 error = so->so_error;
12809 SOCKBUF_UNLOCK(&so->so_snd);
12812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12813 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12814 asoc, stcb->asoc.total_output_queue_size);
12816 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12819 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12821 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12822 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12826 SOCKBUF_UNLOCK(&so->so_snd);
12829 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12833 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12834 * case NOTE: uio will be null when top/mbuf is passed
12837 if (srcv->sinfo_flags & SCTP_EOF) {
12838 got_all_of_the_send = 1;
12841 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12847 struct sctp_stream_queue_pending *sp;
12848 struct sctp_stream_out *strm;
12851 SCTP_TCB_SEND_LOCK(stcb);
12852 if ((asoc->stream_locked) &&
12853 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12854 SCTP_TCB_SEND_UNLOCK(stcb);
12855 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12859 SCTP_TCB_SEND_UNLOCK(stcb);
12861 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12862 if (strm->last_msg_incomplete == 0) {
12864 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
12865 if ((sp == NULL) || (error)) {
12868 SCTP_TCB_SEND_LOCK(stcb);
12869 if (sp->msg_is_complete) {
12870 strm->last_msg_incomplete = 0;
12871 asoc->stream_locked = 0;
12874 * Just got locked to this guy in case of an
12877 strm->last_msg_incomplete = 1;
12878 asoc->stream_locked = 1;
12879 asoc->stream_locked_on = srcv->sinfo_stream;
12880 sp->sender_all_done = 0;
12882 sctp_snd_sb_alloc(stcb, sp->length);
12883 atomic_add_int(&asoc->stream_queue_cnt, 1);
12884 if (srcv->sinfo_flags & SCTP_UNORDERED) {
12885 SCTP_STAT_INCR(sctps_sends_with_unord);
12887 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12888 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
12889 SCTP_TCB_SEND_UNLOCK(stcb);
12891 SCTP_TCB_SEND_LOCK(stcb);
12892 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12893 SCTP_TCB_SEND_UNLOCK(stcb);
12895 /* ???? Huh ??? last msg is gone */
12897 panic("Warning: Last msg marked incomplete, yet nothing left?");
12899 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12900 strm->last_msg_incomplete = 0;
12906 while (uio->uio_resid > 0) {
12907 /* How much room do we have? */
12908 struct mbuf *new_tail, *mm;
12910 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12911 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12915 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12916 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12917 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
12920 if (hold_tcblock) {
12921 SCTP_TCB_UNLOCK(stcb);
12924 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
12925 if ((mm == NULL) || error) {
12931 /* Update the mbuf and count */
12932 SCTP_TCB_SEND_LOCK(stcb);
12933 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12935 * we need to get out. Peer probably
12939 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12940 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12941 error = ECONNRESET;
12943 SCTP_TCB_SEND_UNLOCK(stcb);
12946 if (sp->tail_mbuf) {
12947 /* tack it to the end */
12948 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12949 sp->tail_mbuf = new_tail;
12951 /* A stolen mbuf */
12953 sp->tail_mbuf = new_tail;
12955 sctp_snd_sb_alloc(stcb, sndout);
12956 atomic_add_int(&sp->length, sndout);
12959 /* Did we reach EOR? */
12960 if ((uio->uio_resid == 0) &&
12961 ((user_marks_eor == 0) ||
12962 (srcv->sinfo_flags & SCTP_EOF) ||
12963 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12964 sp->msg_is_complete = 1;
12966 sp->msg_is_complete = 0;
12968 SCTP_TCB_SEND_UNLOCK(stcb);
12970 if (uio->uio_resid == 0) {
12975 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12977 * This is ugly but we must assure locking
12980 if (hold_tcblock == 0) {
12981 SCTP_TCB_LOCK(stcb);
12984 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12985 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12986 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12987 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12993 SCTP_TCB_UNLOCK(stcb);
12996 /* wait for space now */
12997 if (non_blocking) {
12998 /* Non-blocking io in place out */
13001 /* What about the INIT, send it maybe */
13002 if (queue_only_for_init) {
13003 if (hold_tcblock == 0) {
13004 SCTP_TCB_LOCK(stcb);
13007 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13008 /* a collision took us forward? */
13011 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13012 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13016 if ((net->flight_size > net->cwnd) &&
13017 (asoc->sctp_cmt_on_off == 0)) {
13018 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13020 } else if (asoc->ifp_had_enobuf) {
13021 SCTP_STAT_INCR(sctps_ifnomemqueued);
13022 if (net->flight_size > (2 * net->mtu)) {
13025 asoc->ifp_had_enobuf = 0;
13027 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13028 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13029 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13030 (stcb->asoc.total_flight > 0) &&
13031 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13032 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13035 * Ok, Nagle is set on and we have data outstanding.
13036 * Don't send anything and let SACKs drive out the
13037 * data unless wen have a "full" segment to send.
13039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13040 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13042 SCTP_STAT_INCR(sctps_naglequeued);
13045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13046 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13047 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13049 SCTP_STAT_INCR(sctps_naglesent);
13052 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13054 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13055 nagle_applies, un_sent);
13056 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13057 stcb->asoc.total_flight,
13058 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13060 if (queue_only_for_init)
13061 queue_only_for_init = 0;
13062 if ((queue_only == 0) && (nagle_applies == 0)) {
13064 * need to start chunk output
13065 * before blocking.. note that if
13066 * a lock is already applied, then
13067 * the input via the net is happening
13068 * and I don't need to start output :-D
13070 if (hold_tcblock == 0) {
13071 if (SCTP_TCB_TRYLOCK(stcb)) {
13073 sctp_chunk_output(inp,
13075 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13078 sctp_chunk_output(inp,
13080 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13082 if (hold_tcblock == 1) {
13083 SCTP_TCB_UNLOCK(stcb);
13087 SOCKBUF_LOCK(&so->so_snd);
13089 * This is a bit strange, but I think it will
13090 * work. The total_output_queue_size is locked and
13091 * protected by the TCB_LOCK, which we just released.
13092 * There is a race that can occur between releasing it
13093 * above, and me getting the socket lock, where sacks
13094 * come in but we have not put the SB_WAIT on the
13095 * so_snd buffer to get the wakeup. After the LOCK
13096 * is applied the sack_processing will also need to
13097 * LOCK the so->so_snd to do the actual sowwakeup(). So
13098 * once we have the socket buffer lock if we recheck the
13099 * size we KNOW we will get to sleep safely with the
13100 * wakeup flag in place.
13102 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13103 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13104 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13105 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13106 asoc, uio->uio_resid);
13109 stcb->block_entry = &be;
13110 error = sbwait(&so->so_snd);
13111 stcb->block_entry = NULL;
13113 if (error || so->so_error || be.error) {
13116 error = so->so_error;
13121 SOCKBUF_UNLOCK(&so->so_snd);
13124 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13125 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13126 asoc, stcb->asoc.total_output_queue_size);
13129 SOCKBUF_UNLOCK(&so->so_snd);
13130 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13134 SCTP_TCB_SEND_LOCK(stcb);
13136 if (sp->msg_is_complete == 0) {
13137 strm->last_msg_incomplete = 1;
13138 asoc->stream_locked = 1;
13139 asoc->stream_locked_on = srcv->sinfo_stream;
13141 sp->sender_all_done = 1;
13142 strm->last_msg_incomplete = 0;
13143 asoc->stream_locked = 0;
13146 SCTP_PRINTF("Huh no sp TSNH?\n");
13147 strm->last_msg_incomplete = 0;
13148 asoc->stream_locked = 0;
13150 SCTP_TCB_SEND_UNLOCK(stcb);
13151 if (uio->uio_resid == 0) {
13152 got_all_of_the_send = 1;
13155 /* We send in a 0, since we do NOT have any locks */
13156 error = sctp_msg_append(stcb, net, top, srcv, 0);
13158 if (srcv->sinfo_flags & SCTP_EOF) {
13160 * This should only happen for Panda for the mbuf
13161 * send case, which does NOT yet support EEOR mode.
13162 * Thus, we can just set this flag to do the proper
13165 got_all_of_the_send = 1;
13173 if ((srcv->sinfo_flags & SCTP_EOF) &&
13174 (got_all_of_the_send == 1)) {
13177 SCTP_STAT_INCR(sctps_sends_with_eof);
13179 if (hold_tcblock == 0) {
13180 SCTP_TCB_LOCK(stcb);
13183 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13184 if (TAILQ_EMPTY(&asoc->send_queue) &&
13185 TAILQ_EMPTY(&asoc->sent_queue) &&
13187 if (asoc->locked_on_sending) {
13190 /* there is nothing queued to send, so I'm done... */
13191 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13192 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13193 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13194 struct sctp_nets *netp;
13196 /* only send SHUTDOWN the first time through */
13197 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13198 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13200 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13201 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13202 sctp_stop_timers_for_shutdown(stcb);
13203 if (stcb->asoc.alternate) {
13204 netp = stcb->asoc.alternate;
13206 netp = stcb->asoc.primary_destination;
13208 sctp_send_shutdown(stcb, netp);
13209 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13211 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13212 asoc->primary_destination);
13216 * we still got (or just got) data to send, so set
13220 * XXX sockets draft says that SCTP_EOF should be
13221 * sent with no data. currently, we will allow user
13222 * data to be sent first and move to
13225 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13226 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13227 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13228 if (hold_tcblock == 0) {
13229 SCTP_TCB_LOCK(stcb);
13232 if (asoc->locked_on_sending) {
13233 /* Locked to send out the data */
13234 struct sctp_stream_queue_pending *sp;
13236 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13238 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13239 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13242 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13243 if (TAILQ_EMPTY(&asoc->send_queue) &&
13244 TAILQ_EMPTY(&asoc->sent_queue) &&
13245 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13247 if (free_cnt_applied) {
13248 atomic_add_int(&stcb->asoc.refcnt, -1);
13249 free_cnt_applied = 0;
13251 sctp_abort_an_association(stcb->sctp_ep, stcb,
13252 NULL, SCTP_SO_LOCKED);
13254 * now relock the stcb so everything
13261 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13262 asoc->primary_destination);
13263 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13268 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13269 some_on_control = 1;
13271 if (queue_only_for_init) {
13272 if (hold_tcblock == 0) {
13273 SCTP_TCB_LOCK(stcb);
13276 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13277 /* a collision took us forward? */
13280 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13281 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13285 if ((net->flight_size > net->cwnd) &&
13286 (stcb->asoc.sctp_cmt_on_off == 0)) {
13287 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13289 } else if (asoc->ifp_had_enobuf) {
13290 SCTP_STAT_INCR(sctps_ifnomemqueued);
13291 if (net->flight_size > (2 * net->mtu)) {
13294 asoc->ifp_had_enobuf = 0;
13296 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13297 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13298 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13299 (stcb->asoc.total_flight > 0) &&
13300 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13301 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13303 * Ok, Nagle is set on and we have data outstanding.
13304 * Don't send anything and let SACKs drive out the
13305 * data unless wen have a "full" segment to send.
13307 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13308 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13310 SCTP_STAT_INCR(sctps_naglequeued);
13313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13314 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13315 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13317 SCTP_STAT_INCR(sctps_naglesent);
13320 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13321 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13322 nagle_applies, un_sent);
13323 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13324 stcb->asoc.total_flight,
13325 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13327 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13328 /* we can attempt to send too. */
13329 if (hold_tcblock == 0) {
13331 * If there is activity recv'ing sacks no need to
13334 if (SCTP_TCB_TRYLOCK(stcb)) {
13335 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13339 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13341 } else if ((queue_only == 0) &&
13342 (stcb->asoc.peers_rwnd == 0) &&
13343 (stcb->asoc.total_flight == 0)) {
13344 /* We get to have a probe outstanding */
13345 if (hold_tcblock == 0) {
13347 SCTP_TCB_LOCK(stcb);
13349 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13350 } else if (some_on_control) {
13351 int num_out, reason, frag_point;
13353 /* Here we do control only */
13354 if (hold_tcblock == 0) {
13356 SCTP_TCB_LOCK(stcb);
13358 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13359 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13360 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13362 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13363 queue_only, stcb->asoc.peers_rwnd, un_sent,
13364 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13365 stcb->asoc.total_output_queue_size, error);
13370 if (local_soresv && stcb) {
13371 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13373 if (create_lock_applied) {
13374 SCTP_ASOC_CREATE_UNLOCK(inp);
13376 if ((stcb) && hold_tcblock) {
13377 SCTP_TCB_UNLOCK(stcb);
13379 if (stcb && free_cnt_applied) {
13380 atomic_add_int(&stcb->asoc.refcnt, -1);
13384 if (mtx_owned(&stcb->tcb_mtx)) {
13385 panic("Leaving with tcb mtx owned?");
13387 if (mtx_owned(&stcb->tcb_send_mtx)) {
13388 panic("Leaving with tcb send mtx owned?");
13394 sctp_validate_no_locks(inp);
13396 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
13403 sctp_m_freem(control);
13410 * generate an AUTHentication chunk, if required
13413 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13414 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13415 struct sctp_tcb *stcb, uint8_t chunk)
13417 struct mbuf *m_auth;
13418 struct sctp_auth_chunk *auth;
13422 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13426 /* sysctl disabled auth? */
13427 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13430 /* peer doesn't do auth... */
13431 if (!stcb->asoc.peer_supports_auth) {
13434 /* does the requested chunk require auth? */
13435 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13438 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13439 if (m_auth == NULL) {
13443 /* reserve some space if this will be the first mbuf */
13445 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13446 /* fill in the AUTH chunk details */
13447 auth = mtod(m_auth, struct sctp_auth_chunk *);
13448 bzero(auth, sizeof(*auth));
13449 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13450 auth->ch.chunk_flags = 0;
13451 chunk_len = sizeof(*auth) +
13452 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13453 auth->ch.chunk_length = htons(chunk_len);
13454 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13455 /* key id and hmac digest will be computed and filled in upon send */
13457 /* save the offset where the auth was inserted into the chain */
13459 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13460 *offset += SCTP_BUF_LEN(cn);
13463 /* update length and return pointer to the auth chunk */
13464 SCTP_BUF_LEN(m_auth) = chunk_len;
13465 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13466 if (auth_ret != NULL)
13474 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13476 struct nd_prefix *pfx = NULL;
13477 struct nd_pfxrouter *pfxrtr = NULL;
13478 struct sockaddr_in6 gw6;
13480 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13483 /* get prefix entry of address */
13484 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13485 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13487 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13488 &src6->sin6_addr, &pfx->ndpr_mask))
13491 /* no prefix entry in the prefix list */
13493 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13494 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13497 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13498 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13500 /* search installed gateway from prefix entry */
13501 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13502 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13503 gw6.sin6_family = AF_INET6;
13504 gw6.sin6_len = sizeof(struct sockaddr_in6);
13505 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13506 sizeof(struct in6_addr));
13507 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13508 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13509 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13510 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13511 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13512 ro->ro_rt->rt_gateway)) {
13513 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13517 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13524 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13527 struct sockaddr_in *sin, *mask;
13528 struct ifaddr *ifa;
13529 struct in_addr srcnetaddr, gwnetaddr;
13531 if (ro == NULL || ro->ro_rt == NULL ||
13532 sifa->address.sa.sa_family != AF_INET) {
13535 ifa = (struct ifaddr *)sifa->ifa;
13536 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13537 sin = (struct sockaddr_in *)&sifa->address.sin;
13538 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13539 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13540 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13541 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13543 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13544 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13545 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13546 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13547 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13548 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {