2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <netinet/udp_var.h>
55 #include <machine/in_cksum.h>
59 #define SCTP_MAX_GAPS_INARRAY 4
61 uint8_t right_edge; /* mergable on the right edge */
62 uint8_t left_edge; /* mergable on the left edge */
65 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
68 struct sack_track sack_array[256] = {
69 {0, 0, 0, 0, /* 0x00 */
76 {1, 0, 1, 0, /* 0x01 */
83 {0, 0, 1, 0, /* 0x02 */
90 {1, 0, 1, 0, /* 0x03 */
97 {0, 0, 1, 0, /* 0x04 */
104 {1, 0, 2, 0, /* 0x05 */
111 {0, 0, 1, 0, /* 0x06 */
118 {1, 0, 1, 0, /* 0x07 */
125 {0, 0, 1, 0, /* 0x08 */
132 {1, 0, 2, 0, /* 0x09 */
139 {0, 0, 2, 0, /* 0x0a */
146 {1, 0, 2, 0, /* 0x0b */
153 {0, 0, 1, 0, /* 0x0c */
160 {1, 0, 2, 0, /* 0x0d */
167 {0, 0, 1, 0, /* 0x0e */
174 {1, 0, 1, 0, /* 0x0f */
181 {0, 0, 1, 0, /* 0x10 */
188 {1, 0, 2, 0, /* 0x11 */
195 {0, 0, 2, 0, /* 0x12 */
202 {1, 0, 2, 0, /* 0x13 */
209 {0, 0, 2, 0, /* 0x14 */
216 {1, 0, 3, 0, /* 0x15 */
223 {0, 0, 2, 0, /* 0x16 */
230 {1, 0, 2, 0, /* 0x17 */
237 {0, 0, 1, 0, /* 0x18 */
244 {1, 0, 2, 0, /* 0x19 */
251 {0, 0, 2, 0, /* 0x1a */
258 {1, 0, 2, 0, /* 0x1b */
265 {0, 0, 1, 0, /* 0x1c */
272 {1, 0, 2, 0, /* 0x1d */
279 {0, 0, 1, 0, /* 0x1e */
286 {1, 0, 1, 0, /* 0x1f */
293 {0, 0, 1, 0, /* 0x20 */
300 {1, 0, 2, 0, /* 0x21 */
307 {0, 0, 2, 0, /* 0x22 */
314 {1, 0, 2, 0, /* 0x23 */
321 {0, 0, 2, 0, /* 0x24 */
328 {1, 0, 3, 0, /* 0x25 */
335 {0, 0, 2, 0, /* 0x26 */
342 {1, 0, 2, 0, /* 0x27 */
349 {0, 0, 2, 0, /* 0x28 */
356 {1, 0, 3, 0, /* 0x29 */
363 {0, 0, 3, 0, /* 0x2a */
370 {1, 0, 3, 0, /* 0x2b */
377 {0, 0, 2, 0, /* 0x2c */
384 {1, 0, 3, 0, /* 0x2d */
391 {0, 0, 2, 0, /* 0x2e */
398 {1, 0, 2, 0, /* 0x2f */
405 {0, 0, 1, 0, /* 0x30 */
412 {1, 0, 2, 0, /* 0x31 */
419 {0, 0, 2, 0, /* 0x32 */
426 {1, 0, 2, 0, /* 0x33 */
433 {0, 0, 2, 0, /* 0x34 */
440 {1, 0, 3, 0, /* 0x35 */
447 {0, 0, 2, 0, /* 0x36 */
454 {1, 0, 2, 0, /* 0x37 */
461 {0, 0, 1, 0, /* 0x38 */
468 {1, 0, 2, 0, /* 0x39 */
475 {0, 0, 2, 0, /* 0x3a */
482 {1, 0, 2, 0, /* 0x3b */
489 {0, 0, 1, 0, /* 0x3c */
496 {1, 0, 2, 0, /* 0x3d */
503 {0, 0, 1, 0, /* 0x3e */
510 {1, 0, 1, 0, /* 0x3f */
517 {0, 0, 1, 0, /* 0x40 */
524 {1, 0, 2, 0, /* 0x41 */
531 {0, 0, 2, 0, /* 0x42 */
538 {1, 0, 2, 0, /* 0x43 */
545 {0, 0, 2, 0, /* 0x44 */
552 {1, 0, 3, 0, /* 0x45 */
559 {0, 0, 2, 0, /* 0x46 */
566 {1, 0, 2, 0, /* 0x47 */
573 {0, 0, 2, 0, /* 0x48 */
580 {1, 0, 3, 0, /* 0x49 */
587 {0, 0, 3, 0, /* 0x4a */
594 {1, 0, 3, 0, /* 0x4b */
601 {0, 0, 2, 0, /* 0x4c */
608 {1, 0, 3, 0, /* 0x4d */
615 {0, 0, 2, 0, /* 0x4e */
622 {1, 0, 2, 0, /* 0x4f */
629 {0, 0, 2, 0, /* 0x50 */
636 {1, 0, 3, 0, /* 0x51 */
643 {0, 0, 3, 0, /* 0x52 */
650 {1, 0, 3, 0, /* 0x53 */
657 {0, 0, 3, 0, /* 0x54 */
664 {1, 0, 4, 0, /* 0x55 */
671 {0, 0, 3, 0, /* 0x56 */
678 {1, 0, 3, 0, /* 0x57 */
685 {0, 0, 2, 0, /* 0x58 */
692 {1, 0, 3, 0, /* 0x59 */
699 {0, 0, 3, 0, /* 0x5a */
706 {1, 0, 3, 0, /* 0x5b */
713 {0, 0, 2, 0, /* 0x5c */
720 {1, 0, 3, 0, /* 0x5d */
727 {0, 0, 2, 0, /* 0x5e */
734 {1, 0, 2, 0, /* 0x5f */
741 {0, 0, 1, 0, /* 0x60 */
748 {1, 0, 2, 0, /* 0x61 */
755 {0, 0, 2, 0, /* 0x62 */
762 {1, 0, 2, 0, /* 0x63 */
769 {0, 0, 2, 0, /* 0x64 */
776 {1, 0, 3, 0, /* 0x65 */
783 {0, 0, 2, 0, /* 0x66 */
790 {1, 0, 2, 0, /* 0x67 */
797 {0, 0, 2, 0, /* 0x68 */
804 {1, 0, 3, 0, /* 0x69 */
811 {0, 0, 3, 0, /* 0x6a */
818 {1, 0, 3, 0, /* 0x6b */
825 {0, 0, 2, 0, /* 0x6c */
832 {1, 0, 3, 0, /* 0x6d */
839 {0, 0, 2, 0, /* 0x6e */
846 {1, 0, 2, 0, /* 0x6f */
853 {0, 0, 1, 0, /* 0x70 */
860 {1, 0, 2, 0, /* 0x71 */
867 {0, 0, 2, 0, /* 0x72 */
874 {1, 0, 2, 0, /* 0x73 */
881 {0, 0, 2, 0, /* 0x74 */
888 {1, 0, 3, 0, /* 0x75 */
895 {0, 0, 2, 0, /* 0x76 */
902 {1, 0, 2, 0, /* 0x77 */
909 {0, 0, 1, 0, /* 0x78 */
916 {1, 0, 2, 0, /* 0x79 */
923 {0, 0, 2, 0, /* 0x7a */
930 {1, 0, 2, 0, /* 0x7b */
937 {0, 0, 1, 0, /* 0x7c */
944 {1, 0, 2, 0, /* 0x7d */
951 {0, 0, 1, 0, /* 0x7e */
958 {1, 0, 1, 0, /* 0x7f */
965 {0, 1, 1, 0, /* 0x80 */
972 {1, 1, 2, 0, /* 0x81 */
979 {0, 1, 2, 0, /* 0x82 */
986 {1, 1, 2, 0, /* 0x83 */
993 {0, 1, 2, 0, /* 0x84 */
1000 {1, 1, 3, 0, /* 0x85 */
1007 {0, 1, 2, 0, /* 0x86 */
1014 {1, 1, 2, 0, /* 0x87 */
1021 {0, 1, 2, 0, /* 0x88 */
1028 {1, 1, 3, 0, /* 0x89 */
1035 {0, 1, 3, 0, /* 0x8a */
1042 {1, 1, 3, 0, /* 0x8b */
1049 {0, 1, 2, 0, /* 0x8c */
1056 {1, 1, 3, 0, /* 0x8d */
1063 {0, 1, 2, 0, /* 0x8e */
1070 {1, 1, 2, 0, /* 0x8f */
1077 {0, 1, 2, 0, /* 0x90 */
1084 {1, 1, 3, 0, /* 0x91 */
1091 {0, 1, 3, 0, /* 0x92 */
1098 {1, 1, 3, 0, /* 0x93 */
1105 {0, 1, 3, 0, /* 0x94 */
1112 {1, 1, 4, 0, /* 0x95 */
1119 {0, 1, 3, 0, /* 0x96 */
1126 {1, 1, 3, 0, /* 0x97 */
1133 {0, 1, 2, 0, /* 0x98 */
1140 {1, 1, 3, 0, /* 0x99 */
1147 {0, 1, 3, 0, /* 0x9a */
1154 {1, 1, 3, 0, /* 0x9b */
1161 {0, 1, 2, 0, /* 0x9c */
1168 {1, 1, 3, 0, /* 0x9d */
1175 {0, 1, 2, 0, /* 0x9e */
1182 {1, 1, 2, 0, /* 0x9f */
1189 {0, 1, 2, 0, /* 0xa0 */
1196 {1, 1, 3, 0, /* 0xa1 */
1203 {0, 1, 3, 0, /* 0xa2 */
1210 {1, 1, 3, 0, /* 0xa3 */
1217 {0, 1, 3, 0, /* 0xa4 */
1224 {1, 1, 4, 0, /* 0xa5 */
1231 {0, 1, 3, 0, /* 0xa6 */
1238 {1, 1, 3, 0, /* 0xa7 */
1245 {0, 1, 3, 0, /* 0xa8 */
1252 {1, 1, 4, 0, /* 0xa9 */
1259 {0, 1, 4, 0, /* 0xaa */
1266 {1, 1, 4, 0, /* 0xab */
1273 {0, 1, 3, 0, /* 0xac */
1280 {1, 1, 4, 0, /* 0xad */
1287 {0, 1, 3, 0, /* 0xae */
1294 {1, 1, 3, 0, /* 0xaf */
1301 {0, 1, 2, 0, /* 0xb0 */
1308 {1, 1, 3, 0, /* 0xb1 */
1315 {0, 1, 3, 0, /* 0xb2 */
1322 {1, 1, 3, 0, /* 0xb3 */
1329 {0, 1, 3, 0, /* 0xb4 */
1336 {1, 1, 4, 0, /* 0xb5 */
1343 {0, 1, 3, 0, /* 0xb6 */
1350 {1, 1, 3, 0, /* 0xb7 */
1357 {0, 1, 2, 0, /* 0xb8 */
1364 {1, 1, 3, 0, /* 0xb9 */
1371 {0, 1, 3, 0, /* 0xba */
1378 {1, 1, 3, 0, /* 0xbb */
1385 {0, 1, 2, 0, /* 0xbc */
1392 {1, 1, 3, 0, /* 0xbd */
1399 {0, 1, 2, 0, /* 0xbe */
1406 {1, 1, 2, 0, /* 0xbf */
1413 {0, 1, 1, 0, /* 0xc0 */
1420 {1, 1, 2, 0, /* 0xc1 */
1427 {0, 1, 2, 0, /* 0xc2 */
1434 {1, 1, 2, 0, /* 0xc3 */
1441 {0, 1, 2, 0, /* 0xc4 */
1448 {1, 1, 3, 0, /* 0xc5 */
1455 {0, 1, 2, 0, /* 0xc6 */
1462 {1, 1, 2, 0, /* 0xc7 */
1469 {0, 1, 2, 0, /* 0xc8 */
1476 {1, 1, 3, 0, /* 0xc9 */
1483 {0, 1, 3, 0, /* 0xca */
1490 {1, 1, 3, 0, /* 0xcb */
1497 {0, 1, 2, 0, /* 0xcc */
1504 {1, 1, 3, 0, /* 0xcd */
1511 {0, 1, 2, 0, /* 0xce */
1518 {1, 1, 2, 0, /* 0xcf */
1525 {0, 1, 2, 0, /* 0xd0 */
1532 {1, 1, 3, 0, /* 0xd1 */
1539 {0, 1, 3, 0, /* 0xd2 */
1546 {1, 1, 3, 0, /* 0xd3 */
1553 {0, 1, 3, 0, /* 0xd4 */
1560 {1, 1, 4, 0, /* 0xd5 */
1567 {0, 1, 3, 0, /* 0xd6 */
1574 {1, 1, 3, 0, /* 0xd7 */
1581 {0, 1, 2, 0, /* 0xd8 */
1588 {1, 1, 3, 0, /* 0xd9 */
1595 {0, 1, 3, 0, /* 0xda */
1602 {1, 1, 3, 0, /* 0xdb */
1609 {0, 1, 2, 0, /* 0xdc */
1616 {1, 1, 3, 0, /* 0xdd */
1623 {0, 1, 2, 0, /* 0xde */
1630 {1, 1, 2, 0, /* 0xdf */
1637 {0, 1, 1, 0, /* 0xe0 */
1644 {1, 1, 2, 0, /* 0xe1 */
1651 {0, 1, 2, 0, /* 0xe2 */
1658 {1, 1, 2, 0, /* 0xe3 */
1665 {0, 1, 2, 0, /* 0xe4 */
1672 {1, 1, 3, 0, /* 0xe5 */
1679 {0, 1, 2, 0, /* 0xe6 */
1686 {1, 1, 2, 0, /* 0xe7 */
1693 {0, 1, 2, 0, /* 0xe8 */
1700 {1, 1, 3, 0, /* 0xe9 */
1707 {0, 1, 3, 0, /* 0xea */
1714 {1, 1, 3, 0, /* 0xeb */
1721 {0, 1, 2, 0, /* 0xec */
1728 {1, 1, 3, 0, /* 0xed */
1735 {0, 1, 2, 0, /* 0xee */
1742 {1, 1, 2, 0, /* 0xef */
1749 {0, 1, 1, 0, /* 0xf0 */
1756 {1, 1, 2, 0, /* 0xf1 */
1763 {0, 1, 2, 0, /* 0xf2 */
1770 {1, 1, 2, 0, /* 0xf3 */
1777 {0, 1, 2, 0, /* 0xf4 */
1784 {1, 1, 3, 0, /* 0xf5 */
1791 {0, 1, 2, 0, /* 0xf6 */
1798 {1, 1, 2, 0, /* 0xf7 */
1805 {0, 1, 1, 0, /* 0xf8 */
1812 {1, 1, 2, 0, /* 0xf9 */
1819 {0, 1, 2, 0, /* 0xfa */
1826 {1, 1, 2, 0, /* 0xfb */
1833 {0, 1, 1, 0, /* 0xfc */
1840 {1, 1, 2, 0, /* 0xfd */
1847 {0, 1, 1, 0, /* 0xfe */
1854 {1, 1, 1, 0, /* 0xff */
1865 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1866 struct sctp_scoping *scope,
1869 if ((scope->loopback_scope == 0) &&
1870 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1872 * skip loopback if not in scope *
1876 switch (ifa->address.sa.sa_family) {
1879 if (scope->ipv4_addr_legal) {
1880 struct sockaddr_in *sin;
1882 sin = (struct sockaddr_in *)&ifa->address.sin;
1883 if (sin->sin_addr.s_addr == 0) {
1884 /* not in scope , unspecified */
1887 if ((scope->ipv4_local_scope == 0) &&
1888 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1889 /* private address not in scope */
1899 if (scope->ipv6_addr_legal) {
1900 struct sockaddr_in6 *sin6;
1903 * Must update the flags, bummer, which means any
1904 * IFA locks must now be applied HERE <->
1907 sctp_gather_internal_ifa_flags(ifa);
1909 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1912 /* ok to use deprecated addresses? */
1913 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1914 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1915 /* skip unspecifed addresses */
1918 if ( /* (local_scope == 0) && */
1919 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1922 if ((scope->site_scope == 0) &&
1923 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1937 static struct mbuf *
1938 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
1940 #if defined(INET) || defined(INET6)
1941 struct sctp_paramhdr *parmh;
1947 switch (ifa->address.sa.sa_family) {
1950 plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
1955 plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
1961 #if defined(INET) || defined(INET6)
1962 if (M_TRAILINGSPACE(m) >= plen) {
1963 /* easy side we just drop it on the end */
1964 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1967 /* Need more space */
1969 while (SCTP_BUF_NEXT(mret) != NULL) {
1970 mret = SCTP_BUF_NEXT(mret);
1972 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1973 if (SCTP_BUF_NEXT(mret) == NULL) {
1974 /* We are hosed, can't add more addresses */
1977 mret = SCTP_BUF_NEXT(mret);
1978 parmh = mtod(mret, struct sctp_paramhdr *);
1980 /* now add the parameter */
1981 switch (ifa->address.sa.sa_family) {
1985 struct sctp_ipv4addr_param *ipv4p;
1986 struct sockaddr_in *sin;
1988 sin = (struct sockaddr_in *)&ifa->address.sin;
1989 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1990 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1991 parmh->param_length = htons(plen);
1992 ipv4p->addr = sin->sin_addr.s_addr;
1993 SCTP_BUF_LEN(mret) += plen;
2000 struct sctp_ipv6addr_param *ipv6p;
2001 struct sockaddr_in6 *sin6;
2003 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2004 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2005 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2006 parmh->param_length = htons(plen);
2007 memcpy(ipv6p->addr, &sin6->sin6_addr,
2008 sizeof(ipv6p->addr));
2009 /* clear embedded scope in the address */
2010 in6_clearscope((struct in6_addr *)ipv6p->addr);
2011 SCTP_BUF_LEN(mret) += plen;
2027 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2028 struct sctp_scoping *scope,
2029 struct mbuf *m_at, int cnt_inits_to,
2030 uint16_t * padding_len, uint16_t * chunk_len)
2032 struct sctp_vrf *vrf = NULL;
2033 int cnt, limit_out = 0, total_count;
2036 vrf_id = inp->def_vrf_id;
2037 SCTP_IPI_ADDR_RLOCK();
2038 vrf = sctp_find_vrf(vrf_id);
2040 SCTP_IPI_ADDR_RUNLOCK();
2043 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2044 struct sctp_ifa *sctp_ifap;
2045 struct sctp_ifn *sctp_ifnp;
2048 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2050 cnt = SCTP_ADDRESS_LIMIT;
2053 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2054 if ((scope->loopback_scope == 0) &&
2055 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2057 * Skip loopback devices if loopback_scope
2062 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2063 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2066 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2070 if (cnt > SCTP_ADDRESS_LIMIT) {
2074 if (cnt > SCTP_ADDRESS_LIMIT) {
2081 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2083 if ((scope->loopback_scope == 0) &&
2084 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2086 * Skip loopback devices if
2087 * loopback_scope not set
2091 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2092 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2095 if (sctp_is_address_in_scope(sctp_ifap,
2099 if ((chunk_len != NULL) &&
2100 (padding_len != NULL) &&
2101 (*padding_len > 0)) {
2102 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2103 SCTP_BUF_LEN(m_at) += *padding_len;
2104 *chunk_len += *padding_len;
2107 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2118 if (total_count > SCTP_ADDRESS_LIMIT) {
2119 /* No more addresses */
2127 struct sctp_laddr *laddr;
2130 /* First, how many ? */
2131 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2132 if (laddr->ifa == NULL) {
2135 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2137 * Address being deleted by the system, dont
2141 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2143 * Address being deleted on this ep don't
2148 if (sctp_is_address_in_scope(laddr->ifa,
2155 * To get through a NAT we only list addresses if we have
2156 * more than one. That way if you just bind a single address
2157 * we let the source of the init dictate our address.
2161 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2162 if (laddr->ifa == NULL) {
2165 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2168 if (sctp_is_address_in_scope(laddr->ifa,
2172 if ((chunk_len != NULL) &&
2173 (padding_len != NULL) &&
2174 (*padding_len > 0)) {
2175 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2176 SCTP_BUF_LEN(m_at) += *padding_len;
2177 *chunk_len += *padding_len;
2180 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2182 if (cnt >= SCTP_ADDRESS_LIMIT) {
2188 SCTP_IPI_ADDR_RUNLOCK();
2192 static struct sctp_ifa *
2193 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2194 uint8_t dest_is_loop,
2195 uint8_t dest_is_priv,
2198 uint8_t dest_is_global = 0;
2200 /* dest_is_priv is true if destination is a private address */
2201 /* dest_is_loop is true if destination is a loopback addresses */
2204 * Here we determine if its a preferred address. A preferred address
2205 * means it is the same scope or higher scope then the destination.
2206 * L = loopback, P = private, G = global
2207 * -----------------------------------------
2208 * src | dest | result
2209 * ----------------------------------------
2211 * -----------------------------------------
2212 * P | L | yes-v4 no-v6
2213 * -----------------------------------------
2214 * G | L | yes-v4 no-v6
2215 * -----------------------------------------
2217 * -----------------------------------------
2219 * -----------------------------------------
2221 * -----------------------------------------
2223 * -----------------------------------------
2225 * -----------------------------------------
2227 * -----------------------------------------
2230 if (ifa->address.sa.sa_family != fam) {
2231 /* forget mis-matched family */
2234 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2237 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2238 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2239 /* Ok the address may be ok */
2241 if (fam == AF_INET6) {
2242 /* ok to use deprecated addresses? no lets not! */
2243 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2244 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2247 if (ifa->src_is_priv && !ifa->src_is_loop) {
2249 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2253 if (ifa->src_is_glob) {
2255 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2262 * Now that we know what is what, implement or table this could in
2263 * theory be done slicker (it used to be), but this is
2264 * straightforward and easier to validate :-)
2266 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2267 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2268 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2269 dest_is_loop, dest_is_priv, dest_is_global);
2271 if ((ifa->src_is_loop) && (dest_is_priv)) {
2272 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2275 if ((ifa->src_is_glob) && (dest_is_priv)) {
2276 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2279 if ((ifa->src_is_loop) && (dest_is_global)) {
2280 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2283 if ((ifa->src_is_priv) && (dest_is_global)) {
2284 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2287 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2288 /* its a preferred address */
2292 static struct sctp_ifa *
2293 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2294 uint8_t dest_is_loop,
2295 uint8_t dest_is_priv,
2298 uint8_t dest_is_global = 0;
2301 * Here we determine if its a acceptable address. A acceptable
2302 * address means it is the same scope or higher scope but we can
2303 * allow for NAT which means its ok to have a global dest and a
2306 * L = loopback, P = private, G = global
2307 * -----------------------------------------
2308 * src | dest | result
2309 * -----------------------------------------
2311 * -----------------------------------------
2312 * P | L | yes-v4 no-v6
2313 * -----------------------------------------
2315 * -----------------------------------------
2317 * -----------------------------------------
2319 * -----------------------------------------
2320 * G | P | yes - May not work
2321 * -----------------------------------------
2323 * -----------------------------------------
2324 * P | G | yes - May not work
2325 * -----------------------------------------
2327 * -----------------------------------------
2330 if (ifa->address.sa.sa_family != fam) {
2331 /* forget non matching family */
2332 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2333 ifa->address.sa.sa_family, fam);
2336 /* Ok the address may be ok */
2337 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2338 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2339 dest_is_loop, dest_is_priv);
2340 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2344 if (fam == AF_INET6) {
2345 /* ok to use deprecated addresses? */
2346 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2349 if (ifa->src_is_priv) {
2350 /* Special case, linklocal to loop */
2357 * Now that we know what is what, implement our table. This could in
2358 * theory be done slicker (it used to be), but this is
2359 * straightforward and easier to validate :-)
2361 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2364 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2370 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2373 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2374 /* its an acceptable address */
2379 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2381 struct sctp_laddr *laddr;
2384 /* There are no restrictions, no TCB :-) */
2387 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2388 if (laddr->ifa == NULL) {
2389 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2393 if (laddr->ifa == ifa) {
2394 /* Yes it is on the list */
2403 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2405 struct sctp_laddr *laddr;
2409 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2410 if (laddr->ifa == NULL) {
2411 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2415 if ((laddr->ifa == ifa) && laddr->action == 0)
2424 static struct sctp_ifa *
2425 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2428 int non_asoc_addr_ok,
2429 uint8_t dest_is_priv,
2430 uint8_t dest_is_loop,
2433 struct sctp_laddr *laddr, *starting_point;
2436 struct sctp_ifn *sctp_ifn;
2437 struct sctp_ifa *sctp_ifa, *sifa;
2438 struct sctp_vrf *vrf;
2441 vrf = sctp_find_vrf(vrf_id);
2445 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2446 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2447 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2449 * first question, is the ifn we will emit on in our list, if so, we
2450 * want such an address. Note that we first looked for a preferred
2454 /* is a preferred one on the interface we route out? */
2455 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2456 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2457 (non_asoc_addr_ok == 0))
2459 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2464 if (sctp_is_addr_in_ep(inp, sifa)) {
2465 atomic_add_int(&sifa->refcount, 1);
2471 * ok, now we now need to find one on the list of the addresses. We
2472 * can't get one on the emitting interface so let's find first a
2473 * preferred one. If not that an acceptable one otherwise... we
2476 starting_point = inp->next_addr_touse;
2478 if (inp->next_addr_touse == NULL) {
2479 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2482 for (laddr = inp->next_addr_touse; laddr;
2483 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2484 if (laddr->ifa == NULL) {
2485 /* address has been removed */
2488 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2489 /* address is being deleted */
2492 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2496 atomic_add_int(&sifa->refcount, 1);
2499 if (resettotop == 0) {
2500 inp->next_addr_touse = NULL;
2503 inp->next_addr_touse = starting_point;
2506 if (inp->next_addr_touse == NULL) {
2507 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2510 /* ok, what about an acceptable address in the inp */
2511 for (laddr = inp->next_addr_touse; laddr;
2512 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2513 if (laddr->ifa == NULL) {
2514 /* address has been removed */
2517 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2518 /* address is being deleted */
2521 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2525 atomic_add_int(&sifa->refcount, 1);
2528 if (resettotop == 0) {
2529 inp->next_addr_touse = NULL;
2530 goto once_again_too;
2533 * no address bound can be a source for the destination we are in
2541 static struct sctp_ifa *
2542 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2543 struct sctp_tcb *stcb,
2546 uint8_t dest_is_priv,
2547 uint8_t dest_is_loop,
2548 int non_asoc_addr_ok,
2551 struct sctp_laddr *laddr, *starting_point;
2553 struct sctp_ifn *sctp_ifn;
2554 struct sctp_ifa *sctp_ifa, *sifa;
2555 uint8_t start_at_beginning = 0;
2556 struct sctp_vrf *vrf;
2560 * first question, is the ifn we will emit on in our list, if so, we
2563 vrf = sctp_find_vrf(vrf_id);
2567 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2568 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2569 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2572 * first question, is the ifn we will emit on in our list? If so,
2573 * we want that one. First we look for a preferred. Second, we go
2574 * for an acceptable.
2577 /* first try for a preferred address on the ep */
2578 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2579 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2581 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2582 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2585 if (((non_asoc_addr_ok == 0) &&
2586 (sctp_is_addr_restricted(stcb, sifa))) ||
2587 (non_asoc_addr_ok &&
2588 (sctp_is_addr_restricted(stcb, sifa)) &&
2589 (!sctp_is_addr_pending(stcb, sifa)))) {
2590 /* on the no-no list */
2593 atomic_add_int(&sifa->refcount, 1);
2597 /* next try for an acceptable address on the ep */
2598 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2599 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2601 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2602 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2605 if (((non_asoc_addr_ok == 0) &&
2606 (sctp_is_addr_restricted(stcb, sifa))) ||
2607 (non_asoc_addr_ok &&
2608 (sctp_is_addr_restricted(stcb, sifa)) &&
2609 (!sctp_is_addr_pending(stcb, sifa)))) {
2610 /* on the no-no list */
2613 atomic_add_int(&sifa->refcount, 1);
2620 * if we can't find one like that then we must look at all addresses
2621 * bound to pick one at first preferable then secondly acceptable.
2623 starting_point = stcb->asoc.last_used_address;
2625 if (stcb->asoc.last_used_address == NULL) {
2626 start_at_beginning = 1;
2627 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2629 /* search beginning with the last used address */
2630 for (laddr = stcb->asoc.last_used_address; laddr;
2631 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2632 if (laddr->ifa == NULL) {
2633 /* address has been removed */
2636 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2637 /* address is being deleted */
2640 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2643 if (((non_asoc_addr_ok == 0) &&
2644 (sctp_is_addr_restricted(stcb, sifa))) ||
2645 (non_asoc_addr_ok &&
2646 (sctp_is_addr_restricted(stcb, sifa)) &&
2647 (!sctp_is_addr_pending(stcb, sifa)))) {
2648 /* on the no-no list */
2651 stcb->asoc.last_used_address = laddr;
2652 atomic_add_int(&sifa->refcount, 1);
2655 if (start_at_beginning == 0) {
2656 stcb->asoc.last_used_address = NULL;
2657 goto sctp_from_the_top;
2659 /* now try for any higher scope than the destination */
2660 stcb->asoc.last_used_address = starting_point;
2661 start_at_beginning = 0;
2663 if (stcb->asoc.last_used_address == NULL) {
2664 start_at_beginning = 1;
2665 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2667 /* search beginning with the last used address */
2668 for (laddr = stcb->asoc.last_used_address; laddr;
2669 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2670 if (laddr->ifa == NULL) {
2671 /* address has been removed */
2674 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2675 /* address is being deleted */
2678 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2682 if (((non_asoc_addr_ok == 0) &&
2683 (sctp_is_addr_restricted(stcb, sifa))) ||
2684 (non_asoc_addr_ok &&
2685 (sctp_is_addr_restricted(stcb, sifa)) &&
2686 (!sctp_is_addr_pending(stcb, sifa)))) {
2687 /* on the no-no list */
2690 stcb->asoc.last_used_address = laddr;
2691 atomic_add_int(&sifa->refcount, 1);
2694 if (start_at_beginning == 0) {
2695 stcb->asoc.last_used_address = NULL;
2696 goto sctp_from_the_top2;
2701 static struct sctp_ifa *
2702 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2703 struct sctp_tcb *stcb,
2704 int non_asoc_addr_ok,
2705 uint8_t dest_is_loop,
2706 uint8_t dest_is_priv,
2712 struct sctp_ifa *ifa, *sifa;
2713 int num_eligible_addr = 0;
2716 struct sockaddr_in6 sin6, lsa6;
2718 if (fam == AF_INET6) {
2719 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2720 (void)sa6_recoverscope(&sin6);
2723 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2724 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2725 (non_asoc_addr_ok == 0))
2727 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2732 if (fam == AF_INET6 &&
2734 sifa->src_is_loop && sifa->src_is_priv) {
2736 * don't allow fe80::1 to be a src on loop ::1, we
2737 * don't list it to the peer so we will get an
2742 if (fam == AF_INET6 &&
2743 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2744 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2746 * link-local <-> link-local must belong to the same
2749 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2750 (void)sa6_recoverscope(&lsa6);
2751 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2758 * Check if the IPv6 address matches to next-hop. In the
2759 * mobile case, old IPv6 address may be not deleted from the
2760 * interface. Then, the interface has previous and new
2761 * addresses. We should use one corresponding to the
2762 * next-hop. (by micchie)
2765 if (stcb && fam == AF_INET6 &&
2766 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2767 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2774 /* Avoid topologically incorrect IPv4 address */
2775 if (stcb && fam == AF_INET &&
2776 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2777 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2783 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2786 if (((non_asoc_addr_ok == 0) &&
2787 (sctp_is_addr_restricted(stcb, sifa))) ||
2788 (non_asoc_addr_ok &&
2789 (sctp_is_addr_restricted(stcb, sifa)) &&
2790 (!sctp_is_addr_pending(stcb, sifa)))) {
2792 * It is restricted for some reason..
2793 * probably not yet added.
2798 if (num_eligible_addr >= addr_wanted) {
2801 num_eligible_addr++;
2808 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2809 struct sctp_tcb *stcb,
2810 int non_asoc_addr_ok,
2811 uint8_t dest_is_loop,
2812 uint8_t dest_is_priv,
2815 struct sctp_ifa *ifa, *sifa;
2816 int num_eligible_addr = 0;
2818 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2819 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2820 (non_asoc_addr_ok == 0)) {
2823 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2829 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2832 if (((non_asoc_addr_ok == 0) &&
2833 (sctp_is_addr_restricted(stcb, sifa))) ||
2834 (non_asoc_addr_ok &&
2835 (sctp_is_addr_restricted(stcb, sifa)) &&
2836 (!sctp_is_addr_pending(stcb, sifa)))) {
2838 * It is restricted for some reason..
2839 * probably not yet added.
2844 num_eligible_addr++;
2846 return (num_eligible_addr);
2849 static struct sctp_ifa *
2850 sctp_choose_boundall(struct sctp_tcb *stcb,
2851 struct sctp_nets *net,
2854 uint8_t dest_is_priv,
2855 uint8_t dest_is_loop,
2856 int non_asoc_addr_ok,
2859 int cur_addr_num = 0, num_preferred = 0;
2861 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2862 struct sctp_ifa *sctp_ifa, *sifa;
2864 struct sctp_vrf *vrf;
2872 * For boundall we can use any address in the association.
2873 * If non_asoc_addr_ok is set we can use any address (at least in
2874 * theory). So we look for preferred addresses first. If we find one,
2875 * we use it. Otherwise we next try to get an address on the
2876 * interface, which we should be able to do (unless non_asoc_addr_ok
2877 * is false and we are routed out that way). In these cases where we
2878 * can't use the address of the interface we go through all the
2879 * ifn's looking for an address we can use and fill that in. Punting
2880 * means we send back address 0, which will probably cause problems
2881 * actually since then IP will fill in the address of the route ifn,
2882 * which means we probably already rejected it.. i.e. here comes an
2885 vrf = sctp_find_vrf(vrf_id);
2889 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2890 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2891 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2892 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2893 if (sctp_ifn == NULL) {
2894 /* ?? We don't have this guy ?? */
2895 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2896 goto bound_all_plan_b;
2898 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2899 ifn_index, sctp_ifn->ifn_name);
2902 cur_addr_num = net->indx_of_eligible_next_to_use;
2904 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2909 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2910 num_preferred, sctp_ifn->ifn_name);
2911 if (num_preferred == 0) {
2913 * no eligible addresses, we must use some other interface
2914 * address if we can find one.
2916 goto bound_all_plan_b;
2919 * Ok we have num_eligible_addr set with how many we can use, this
2920 * may vary from call to call due to addresses being deprecated
2923 if (cur_addr_num >= num_preferred) {
2927 * select the nth address from the list (where cur_addr_num is the
2928 * nth) and 0 is the first one, 1 is the second one etc...
2930 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2932 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2933 dest_is_priv, cur_addr_num, fam, ro);
2935 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2937 atomic_add_int(&sctp_ifa->refcount, 1);
2939 /* save off where the next one we will want */
2940 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2945 * plan_b: Look at all interfaces and find a preferred address. If
2946 * no preferred fall through to plan_c.
2949 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2950 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2951 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2952 sctp_ifn->ifn_name);
2953 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2954 /* wrong base scope */
2955 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2958 if ((sctp_ifn == looked_at) && looked_at) {
2959 /* already looked at this guy */
2960 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2963 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2964 dest_is_loop, dest_is_priv, fam);
2965 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2966 "Found ifn:%p %d preferred source addresses\n",
2967 ifn, num_preferred);
2968 if (num_preferred == 0) {
2969 /* None on this interface. */
2970 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2973 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2974 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2975 num_preferred, (void *)sctp_ifn, cur_addr_num);
2978 * Ok we have num_eligible_addr set with how many we can
2979 * use, this may vary from call to call due to addresses
2980 * being deprecated etc..
2982 if (cur_addr_num >= num_preferred) {
2985 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2986 dest_is_priv, cur_addr_num, fam, ro);
2990 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2991 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
2993 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
2994 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
2995 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
2996 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
2998 atomic_add_int(&sifa->refcount, 1);
3002 again_with_private_addresses_allowed:
3004 /* plan_c: do we have an acceptable address on the emit interface */
3006 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3007 if (emit_ifn == NULL) {
3008 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3011 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3012 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3013 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3014 (non_asoc_addr_ok == 0)) {
3015 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3018 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3021 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3025 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3026 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3030 if (((non_asoc_addr_ok == 0) &&
3031 (sctp_is_addr_restricted(stcb, sifa))) ||
3032 (non_asoc_addr_ok &&
3033 (sctp_is_addr_restricted(stcb, sifa)) &&
3034 (!sctp_is_addr_pending(stcb, sifa)))) {
3036 * It is restricted for some reason..
3037 * probably not yet added.
3039 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3044 SCTP_PRINTF("Stcb is null - no print\n");
3046 atomic_add_int(&sifa->refcount, 1);
3051 * plan_d: We are in trouble. No preferred address on the emit
3052 * interface. And not even a preferred address on all interfaces. Go
3053 * out and see if we can find an acceptable address somewhere
3054 * amongst all interfaces.
3056 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3057 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3058 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3059 /* wrong base scope */
3062 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3063 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3064 (non_asoc_addr_ok == 0))
3066 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3072 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3076 if (((non_asoc_addr_ok == 0) &&
3077 (sctp_is_addr_restricted(stcb, sifa))) ||
3078 (non_asoc_addr_ok &&
3079 (sctp_is_addr_restricted(stcb, sifa)) &&
3080 (!sctp_is_addr_pending(stcb, sifa)))) {
3082 * It is restricted for some
3083 * reason.. probably not yet added.
3093 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3094 stcb->asoc.scope.ipv4_local_scope = 1;
3096 goto again_with_private_addresses_allowed;
3097 } else if (retried == 1) {
3098 stcb->asoc.scope.ipv4_local_scope = 0;
3105 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3106 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3107 /* wrong base scope */
3110 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3111 struct sctp_ifa *tmp_sifa;
3113 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3114 (non_asoc_addr_ok == 0))
3116 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3119 if (tmp_sifa == NULL) {
3122 if (tmp_sifa == sifa) {
3126 if (sctp_is_address_in_scope(tmp_sifa,
3127 &stcb->asoc.scope, 0) == 0) {
3130 if (((non_asoc_addr_ok == 0) &&
3131 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3132 (non_asoc_addr_ok &&
3133 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3134 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3144 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3145 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3146 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3151 atomic_add_int(&sifa->refcount, 1);
3159 /* tcb may be NULL */
3161 sctp_source_address_selection(struct sctp_inpcb *inp,
3162 struct sctp_tcb *stcb,
3164 struct sctp_nets *net,
3165 int non_asoc_addr_ok, uint32_t vrf_id)
3167 struct sctp_ifa *answer;
3168 uint8_t dest_is_priv, dest_is_loop;
3172 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3176 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3181 * Rules: - Find the route if needed, cache if I can. - Look at
3182 * interface address in route, Is it in the bound list. If so we
3183 * have the best source. - If not we must rotate amongst the
3188 * Do we need to pay attention to scope. We can have a private address
3189 * or a global address we are sourcing or sending to. So if we draw
3191 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3193 * ------------------------------------------
3194 * source * dest * result
3195 * -----------------------------------------
3196 * <a> Private * Global * NAT
3197 * -----------------------------------------
3198 * <b> Private * Private * No problem
3199 * -----------------------------------------
3200 * <c> Global * Private * Huh, How will this work?
3201 * -----------------------------------------
3202 * <d> Global * Global * No Problem
3203 *------------------------------------------
3204 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3206 *------------------------------------------
3207 * source * dest * result
3208 * -----------------------------------------
3209 * <a> Linklocal * Global *
3210 * -----------------------------------------
3211 * <b> Linklocal * Linklocal * No problem
3212 * -----------------------------------------
3213 * <c> Global * Linklocal * Huh, How will this work?
3214 * -----------------------------------------
3215 * <d> Global * Global * No Problem
3216 *------------------------------------------
3217 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3219 * And then we add to that what happens if there are multiple addresses
3220 * assigned to an interface. Remember the ifa on a ifn is a linked
3221 * list of addresses. So one interface can have more than one IP
3222 * address. What happens if we have both a private and a global
3223 * address? Do we then use context of destination to sort out which
3224 * one is best? And what about NAT's sending P->G may get you a NAT
3225 * translation, or should you select the G thats on the interface in
3230 * - count the number of addresses on the interface.
3231 * - if it is one, no problem except case <c>.
3232 * For <a> we will assume a NAT out there.
3233 * - if there are more than one, then we need to worry about scope P
3234 * or G. We should prefer G -> G and P -> P if possible.
3235 * Then as a secondary fall back to mixed types G->P being a last
3237 * - The above all works for bound all, but bound specific we need to
3238 * use the same concept but instead only consider the bound
3239 * addresses. If the bound set is NOT assigned to the interface then
3240 * we must use rotation amongst the bound addresses..
3242 if (ro->ro_rt == NULL) {
3244 * Need a route to cache.
3246 SCTP_RTALLOC(ro, vrf_id);
3248 if (ro->ro_rt == NULL) {
3251 fam = ro->ro_dst.sa_family;
3252 dest_is_priv = dest_is_loop = 0;
3253 /* Setup our scopes for the destination */
3257 /* Scope based on outbound address */
3258 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3261 /* mark it as local */
3262 net->addr_is_local = 1;
3264 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3271 /* Scope based on outbound address */
3272 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3273 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3275 * If the address is a loopback address, which
3276 * consists of "::1" OR "fe80::1%lo0", we are
3277 * loopback scope. But we don't use dest_is_priv
3278 * (link local addresses).
3282 /* mark it as local */
3283 net->addr_is_local = 1;
3285 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3291 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3292 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3293 SCTP_IPI_ADDR_RLOCK();
3294 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3298 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3299 dest_is_priv, dest_is_loop,
3300 non_asoc_addr_ok, fam);
3301 SCTP_IPI_ADDR_RUNLOCK();
3308 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3309 vrf_id, dest_is_priv,
3311 non_asoc_addr_ok, fam);
3313 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3318 SCTP_IPI_ADDR_RUNLOCK();
3323 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3326 int tlen, at, found;
3327 struct sctp_sndinfo sndinfo;
3328 struct sctp_prinfo prinfo;
3329 struct sctp_authinfo authinfo;
3331 tlen = SCTP_BUF_LEN(control);
3335 * Independent of how many mbufs, find the c_type inside the control
3336 * structure and copy out the data.
3339 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3340 /* There is not enough room for one more. */
3343 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3344 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3345 /* We dont't have a complete CMSG header. */
3348 if (((int)cmh.cmsg_len + at) > tlen) {
3349 /* We don't have the complete CMSG. */
3352 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3353 ((c_type == cmh.cmsg_type) ||
3354 ((c_type == SCTP_SNDRCV) &&
3355 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3356 (cmh.cmsg_type == SCTP_PRINFO) ||
3357 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3358 if (c_type == cmh.cmsg_type) {
3359 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3362 /* It is exactly what we want. Copy it out. */
3363 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3366 struct sctp_sndrcvinfo *sndrcvinfo;
3368 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3370 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3373 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3375 switch (cmh.cmsg_type) {
3377 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3380 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3381 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3382 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3383 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3384 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3385 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3388 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3391 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3392 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3393 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3395 sndrcvinfo->sinfo_timetolive = 0;
3397 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3400 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3403 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3404 sndrcvinfo->sinfo_keynumber_valid = 1;
3405 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3413 at += CMSG_ALIGN(cmh.cmsg_len);
3419 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3423 struct sctp_initmsg initmsg;
3426 struct sockaddr_in sin;
3430 struct sockaddr_in6 sin6;
3434 tlen = SCTP_BUF_LEN(control);
3437 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3438 /* There is not enough room for one more. */
3442 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3443 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3444 /* We dont't have a complete CMSG header. */
3448 if (((int)cmh.cmsg_len + at) > tlen) {
3449 /* We don't have the complete CMSG. */
3453 if (cmh.cmsg_level == IPPROTO_SCTP) {
3454 switch (cmh.cmsg_type) {
3456 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3460 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3461 if (initmsg.sinit_max_attempts)
3462 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3463 if (initmsg.sinit_num_ostreams)
3464 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3465 if (initmsg.sinit_max_instreams)
3466 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3467 if (initmsg.sinit_max_init_timeo)
3468 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3469 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3470 struct sctp_stream_out *tmp_str;
3473 /* Default is NOT correct */
3474 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3475 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3476 SCTP_TCB_UNLOCK(stcb);
3477 SCTP_MALLOC(tmp_str,
3478 struct sctp_stream_out *,
3479 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3481 SCTP_TCB_LOCK(stcb);
3482 if (tmp_str != NULL) {
3483 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3484 stcb->asoc.strmout = tmp_str;
3485 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3487 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3489 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3490 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3491 stcb->asoc.strmout[i].chunks_on_queues = 0;
3492 stcb->asoc.strmout[i].next_sequence_send = 0;
3493 stcb->asoc.strmout[i].stream_no = i;
3494 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3495 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3500 case SCTP_DSTADDRV4:
3501 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3505 memset(&sin, 0, sizeof(struct sockaddr_in));
3506 sin.sin_family = AF_INET;
3507 sin.sin_len = sizeof(struct sockaddr_in);
3508 sin.sin_port = stcb->rport;
3509 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3510 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3511 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3512 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3516 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3517 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3524 case SCTP_DSTADDRV6:
3525 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3529 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3530 sin6.sin6_family = AF_INET6;
3531 sin6.sin6_len = sizeof(struct sockaddr_in6);
3532 sin6.sin6_port = stcb->rport;
3533 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3534 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3535 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3540 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3541 in6_sin6_2_sin(&sin, &sin6);
3542 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3543 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3544 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3548 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3549 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3555 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3556 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3566 at += CMSG_ALIGN(cmh.cmsg_len);
3571 static struct sctp_tcb *
3572 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3574 struct mbuf *control,
3575 struct sctp_nets **net_p,
3580 struct sctp_tcb *stcb;
3581 struct sockaddr *addr;
3584 struct sockaddr_in sin;
3588 struct sockaddr_in6 sin6;
3592 tlen = SCTP_BUF_LEN(control);
3595 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3596 /* There is not enough room for one more. */
3600 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3601 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3602 /* We dont't have a complete CMSG header. */
3606 if (((int)cmh.cmsg_len + at) > tlen) {
3607 /* We don't have the complete CMSG. */
3611 if (cmh.cmsg_level == IPPROTO_SCTP) {
3612 switch (cmh.cmsg_type) {
3614 case SCTP_DSTADDRV4:
3615 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3619 memset(&sin, 0, sizeof(struct sockaddr_in));
3620 sin.sin_family = AF_INET;
3621 sin.sin_len = sizeof(struct sockaddr_in);
3622 sin.sin_port = port;
3623 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3624 addr = (struct sockaddr *)&sin;
3628 case SCTP_DSTADDRV6:
3629 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3633 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3634 sin6.sin6_family = AF_INET6;
3635 sin6.sin6_len = sizeof(struct sockaddr_in6);
3636 sin6.sin6_port = port;
3637 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3639 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3640 in6_sin6_2_sin(&sin, &sin6);
3641 addr = (struct sockaddr *)&sin;
3644 addr = (struct sockaddr *)&sin6;
3652 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3658 at += CMSG_ALIGN(cmh.cmsg_len);
3663 static struct mbuf *
3664 sctp_add_cookie(struct mbuf *init, int init_offset,
3665 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3667 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3668 struct sctp_state_cookie *stc;
3669 struct sctp_paramhdr *ph;
3674 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3675 sizeof(struct sctp_paramhdr)), 0,
3676 M_NOWAIT, 1, MT_DATA);
3680 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3681 if (copy_init == NULL) {
3685 #ifdef SCTP_MBUF_LOGGING
3686 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3689 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3690 if (SCTP_BUF_IS_EXTENDED(mat)) {
3691 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3696 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3698 if (copy_initack == NULL) {
3700 sctp_m_freem(copy_init);
3703 #ifdef SCTP_MBUF_LOGGING
3704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3707 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3708 if (SCTP_BUF_IS_EXTENDED(mat)) {
3709 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3714 /* easy side we just drop it on the end */
3715 ph = mtod(mret, struct sctp_paramhdr *);
3716 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3717 sizeof(struct sctp_paramhdr);
3718 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3719 sizeof(struct sctp_paramhdr));
3720 ph->param_type = htons(SCTP_STATE_COOKIE);
3721 ph->param_length = 0; /* fill in at the end */
3722 /* Fill in the stc cookie data */
3723 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3725 /* tack the INIT and then the INIT-ACK onto the chain */
3727 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3728 cookie_sz += SCTP_BUF_LEN(m_at);
3729 if (SCTP_BUF_NEXT(m_at) == NULL) {
3730 SCTP_BUF_NEXT(m_at) = copy_init;
3734 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3735 cookie_sz += SCTP_BUF_LEN(m_at);
3736 if (SCTP_BUF_NEXT(m_at) == NULL) {
3737 SCTP_BUF_NEXT(m_at) = copy_initack;
3741 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3742 cookie_sz += SCTP_BUF_LEN(m_at);
3743 if (SCTP_BUF_NEXT(m_at) == NULL) {
3747 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3749 /* no space, so free the entire chain */
3753 SCTP_BUF_LEN(sig) = 0;
3754 SCTP_BUF_NEXT(m_at) = sig;
3756 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3757 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3759 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3760 cookie_sz += SCTP_SIGNATURE_SIZE;
3761 ph->param_length = htons(cookie_sz);
3767 sctp_get_ect(struct sctp_tcb *stcb)
3769 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3770 return (SCTP_ECT0_BIT);
3776 #if defined(INET) || defined(INET6)
3778 sctp_handle_no_route(struct sctp_tcb *stcb,
3779 struct sctp_nets *net,
3782 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3785 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3786 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3787 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3788 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3789 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3790 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3794 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3795 net->dest_state &= ~SCTP_ADDR_PF;
3799 if (net == stcb->asoc.primary_destination) {
3800 /* need a new primary */
3801 struct sctp_nets *alt;
3803 alt = sctp_find_alternate_net(stcb, net, 0);
3805 if (stcb->asoc.alternate) {
3806 sctp_free_remote_addr(stcb->asoc.alternate);
3808 stcb->asoc.alternate = alt;
3809 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3810 if (net->ro._s_addr) {
3811 sctp_free_ifa(net->ro._s_addr);
3812 net->ro._s_addr = NULL;
3814 net->src_addr_selected = 0;
3824 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3825 struct sctp_tcb *stcb, /* may be NULL */
3826 struct sctp_nets *net,
3827 struct sockaddr *to,
3829 uint32_t auth_offset,
3830 struct sctp_auth_chunk *auth,
3831 uint16_t auth_keyid,
3832 int nofragment_flag,
3839 union sctp_sockstore *over_addr,
3840 uint8_t use_mflowid, uint32_t mflowid,
3841 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3842 int so_locked SCTP_UNUSED
3847 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3850 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3851 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3852 * - fill in the HMAC digest of any AUTH chunk in the packet.
3853 * - calculate and fill in the SCTP checksum.
3854 * - prepend an IP address header.
3855 * - if boundall use INADDR_ANY.
3856 * - if boundspecific do source address selection.
3857 * - set fragmentation option for ipV4.
3858 * - On return from IP output, check/adjust mtu size of output
3859 * interface and smallest_mtu size as well.
3861 /* Will need ifdefs around this */
3863 struct sctphdr *sctphdr;
3867 #if defined(INET) || defined(INET6)
3871 #if defined(INET) || defined(INET6)
3873 sctp_route_t *ro = NULL;
3874 struct udphdr *udp = NULL;
3879 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3880 struct socket *so = NULL;
3884 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3885 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3889 #if defined(INET) || defined(INET6)
3891 vrf_id = stcb->asoc.vrf_id;
3893 vrf_id = inp->def_vrf_id;
3896 /* fill in the HMAC digest for any AUTH chunk in the packet */
3897 if ((auth != NULL) && (stcb != NULL)) {
3898 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3901 tos_value = net->dscp;
3903 tos_value = stcb->asoc.default_dscp;
3905 tos_value = inp->sctp_ep.default_dscp;
3908 switch (to->sa_family) {
3912 struct ip *ip = NULL;
3913 sctp_route_t iproute;
3916 len = sizeof(struct ip) + sizeof(struct sctphdr);
3918 len += sizeof(struct udphdr);
3920 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
3923 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3926 SCTP_ALIGN_TO_END(newm, len);
3927 SCTP_BUF_LEN(newm) = len;
3928 SCTP_BUF_NEXT(newm) = m;
3932 if (net->flowidset == 0) {
3933 panic("Flow ID not set");
3936 m->m_pkthdr.flowid = net->flowid;
3937 m->m_flags |= M_FLOWID;
3939 if (use_mflowid != 0) {
3940 m->m_pkthdr.flowid = mflowid;
3941 m->m_flags |= M_FLOWID;
3944 packet_length = sctp_calculate_len(m);
3945 ip = mtod(m, struct ip *);
3946 ip->ip_v = IPVERSION;
3947 ip->ip_hl = (sizeof(struct ip) >> 2);
3948 if (tos_value == 0) {
3950 * This means especially, that it is not set
3951 * at the SCTP layer. So use the value from
3954 tos_value = inp->ip_inp.inp.inp_ip_tos;
3958 tos_value |= sctp_get_ect(stcb);
3960 if ((nofragment_flag) && (port == 0)) {
3961 ip->ip_off = htons(IP_DF);
3963 ip->ip_off = htons(0);
3965 /* FreeBSD has a function for ip_id's */
3966 ip->ip_id = ip_newid();
3968 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3969 ip->ip_len = htons(packet_length);
3970 ip->ip_tos = tos_value;
3972 ip->ip_p = IPPROTO_UDP;
3974 ip->ip_p = IPPROTO_SCTP;
3979 memset(&iproute, 0, sizeof(iproute));
3980 memcpy(&ro->ro_dst, to, to->sa_len);
3982 ro = (sctp_route_t *) & net->ro;
3984 /* Now the address selection part */
3985 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3987 /* call the routine to select the src address */
3988 if (net && out_of_asoc_ok == 0) {
3989 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3990 sctp_free_ifa(net->ro._s_addr);
3991 net->ro._s_addr = NULL;
3992 net->src_addr_selected = 0;
3998 if (net->src_addr_selected == 0) {
3999 /* Cache the source address */
4000 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4003 net->src_addr_selected = 1;
4005 if (net->ro._s_addr == NULL) {
4006 /* No route to host */
4007 net->src_addr_selected = 0;
4008 sctp_handle_no_route(stcb, net, so_locked);
4009 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4011 return (EHOSTUNREACH);
4013 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4015 if (over_addr == NULL) {
4016 struct sctp_ifa *_lsrc;
4018 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4022 if (_lsrc == NULL) {
4023 sctp_handle_no_route(stcb, net, so_locked);
4024 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4026 return (EHOSTUNREACH);
4028 ip->ip_src = _lsrc->address.sin.sin_addr;
4029 sctp_free_ifa(_lsrc);
4031 ip->ip_src = over_addr->sin.sin_addr;
4032 SCTP_RTALLOC(ro, vrf_id);
4036 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4037 sctp_handle_no_route(stcb, net, so_locked);
4038 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4040 return (EHOSTUNREACH);
4042 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4043 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4044 udp->uh_dport = port;
4045 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4047 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4051 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4053 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4056 sctphdr->src_port = src_port;
4057 sctphdr->dest_port = dest_port;
4058 sctphdr->v_tag = v_tag;
4059 sctphdr->checksum = 0;
4062 * If source address selection fails and we find no
4063 * route then the ip_output should fail as well with
4064 * a NO_ROUTE_TO_HOST type error. We probably should
4065 * catch that somewhere and abort the association
4066 * right away (assuming this is an INIT being sent).
4068 if (ro->ro_rt == NULL) {
4070 * src addr selection failed to find a route
4071 * (or valid source addr), so we can't get
4072 * there from here (yet)!
4074 sctp_handle_no_route(stcb, net, so_locked);
4075 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4077 return (EHOSTUNREACH);
4079 if (ro != &iproute) {
4080 memcpy(&iproute, ro, sizeof(*ro));
4082 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4083 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4084 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4085 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4086 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4089 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4090 /* failed to prepend data, give up */
4091 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4095 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4097 #if defined(SCTP_WITH_NO_CSUM)
4098 SCTP_STAT_INCR(sctps_sendnocrc);
4100 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4101 SCTP_STAT_INCR(sctps_sendswcrc);
4104 SCTP_ENABLE_UDP_CSUM(o_pak);
4107 #if defined(SCTP_WITH_NO_CSUM)
4108 SCTP_STAT_INCR(sctps_sendnocrc);
4110 m->m_pkthdr.csum_flags = CSUM_SCTP;
4111 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4112 SCTP_STAT_INCR(sctps_sendhwcrc);
4115 #ifdef SCTP_PACKET_LOGGING
4116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4117 sctp_packet_log(o_pak);
4119 /* send it out. table id is taken from stcb */
4120 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4121 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4122 so = SCTP_INP_SO(inp);
4123 SCTP_SOCKET_UNLOCK(so, 0);
4126 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4127 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4128 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4129 atomic_add_int(&stcb->asoc.refcnt, 1);
4130 SCTP_TCB_UNLOCK(stcb);
4131 SCTP_SOCKET_LOCK(so, 0);
4132 SCTP_TCB_LOCK(stcb);
4133 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4136 SCTP_STAT_INCR(sctps_sendpackets);
4137 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4139 SCTP_STAT_INCR(sctps_senderrors);
4141 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4143 /* free tempy routes */
4147 * PMTU check versus smallest asoc MTU goes
4150 if ((ro->ro_rt != NULL) &&
4151 (net->ro._s_addr)) {
4154 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4156 mtu -= sizeof(struct udphdr);
4158 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4159 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4162 } else if (ro->ro_rt == NULL) {
4163 /* route was freed */
4164 if (net->ro._s_addr &&
4165 net->src_addr_selected) {
4166 sctp_free_ifa(net->ro._s_addr);
4167 net->ro._s_addr = NULL;
4169 net->src_addr_selected = 0;
4178 uint32_t flowlabel, flowinfo;
4179 struct ip6_hdr *ip6h;
4180 struct route_in6 ip6route;
4182 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4184 struct sockaddr_in6 lsa6_storage;
4186 u_short prev_port = 0;
4190 flowlabel = net->flowlabel;
4192 flowlabel = stcb->asoc.default_flowlabel;
4194 flowlabel = inp->sctp_ep.default_flowlabel;
4196 if (flowlabel == 0) {
4198 * This means especially, that it is not set
4199 * at the SCTP layer. So use the value from
4202 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4204 flowlabel &= 0x000fffff;
4205 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4207 len += sizeof(struct udphdr);
4209 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4212 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4215 SCTP_ALIGN_TO_END(newm, len);
4216 SCTP_BUF_LEN(newm) = len;
4217 SCTP_BUF_NEXT(newm) = m;
4221 if (net->flowidset == 0) {
4222 panic("Flow ID not set");
4225 m->m_pkthdr.flowid = net->flowid;
4226 m->m_flags |= M_FLOWID;
4228 if (use_mflowid != 0) {
4229 m->m_pkthdr.flowid = mflowid;
4230 m->m_flags |= M_FLOWID;
4233 packet_length = sctp_calculate_len(m);
4235 ip6h = mtod(m, struct ip6_hdr *);
4236 /* protect *sin6 from overwrite */
4237 sin6 = (struct sockaddr_in6 *)to;
4241 /* KAME hack: embed scopeid */
4242 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4243 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4247 memset(&ip6route, 0, sizeof(ip6route));
4248 ro = (sctp_route_t *) & ip6route;
4249 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4251 ro = (sctp_route_t *) & net->ro;
4254 * We assume here that inp_flow is in host byte
4255 * order within the TCB!
4257 if (tos_value == 0) {
4259 * This means especially, that it is not set
4260 * at the SCTP layer. So use the value from
4263 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4267 tos_value |= sctp_get_ect(stcb);
4271 flowinfo |= tos_value;
4273 flowinfo |= flowlabel;
4274 ip6h->ip6_flow = htonl(flowinfo);
4276 ip6h->ip6_nxt = IPPROTO_UDP;
4278 ip6h->ip6_nxt = IPPROTO_SCTP;
4280 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4281 ip6h->ip6_dst = sin6->sin6_addr;
4284 * Add SRC address selection here: we can only reuse
4285 * to a limited degree the kame src-addr-sel, since
4286 * we can try their selection but it may not be
4289 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4290 lsa6_tmp.sin6_family = AF_INET6;
4291 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4293 if (net && out_of_asoc_ok == 0) {
4294 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4295 sctp_free_ifa(net->ro._s_addr);
4296 net->ro._s_addr = NULL;
4297 net->src_addr_selected = 0;
4303 if (net->src_addr_selected == 0) {
4304 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4305 /* KAME hack: embed scopeid */
4306 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4307 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4310 /* Cache the source address */
4311 net->ro._s_addr = sctp_source_address_selection(inp,
4317 (void)sa6_recoverscope(sin6);
4318 net->src_addr_selected = 1;
4320 if (net->ro._s_addr == NULL) {
4321 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4322 net->src_addr_selected = 0;
4323 sctp_handle_no_route(stcb, net, so_locked);
4324 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4326 return (EHOSTUNREACH);
4328 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4330 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4331 /* KAME hack: embed scopeid */
4332 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4333 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4336 if (over_addr == NULL) {
4337 struct sctp_ifa *_lsrc;
4339 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4343 if (_lsrc == NULL) {
4344 sctp_handle_no_route(stcb, net, so_locked);
4345 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4347 return (EHOSTUNREACH);
4349 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4350 sctp_free_ifa(_lsrc);
4352 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4353 SCTP_RTALLOC(ro, vrf_id);
4355 (void)sa6_recoverscope(sin6);
4357 lsa6->sin6_port = inp->sctp_lport;
4359 if (ro->ro_rt == NULL) {
4361 * src addr selection failed to find a route
4362 * (or valid source addr), so we can't get
4365 sctp_handle_no_route(stcb, net, so_locked);
4366 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4368 return (EHOSTUNREACH);
4371 * XXX: sa6 may not have a valid sin6_scope_id in
4372 * the non-SCOPEDROUTING case.
4374 bzero(&lsa6_storage, sizeof(lsa6_storage));
4375 lsa6_storage.sin6_family = AF_INET6;
4376 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4377 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4378 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4379 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4384 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4385 lsa6_storage.sin6_port = inp->sctp_lport;
4386 lsa6 = &lsa6_storage;
4387 ip6h->ip6_src = lsa6->sin6_addr;
4390 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4391 sctp_handle_no_route(stcb, net, so_locked);
4392 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4394 return (EHOSTUNREACH);
4396 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4397 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4398 udp->uh_dport = port;
4399 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4401 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4403 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4406 sctphdr->src_port = src_port;
4407 sctphdr->dest_port = dest_port;
4408 sctphdr->v_tag = v_tag;
4409 sctphdr->checksum = 0;
4412 * We set the hop limit now since there is a good
4413 * chance that our ro pointer is now filled
4415 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4416 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4419 /* Copy to be sure something bad is not happening */
4420 sin6->sin6_addr = ip6h->ip6_dst;
4421 lsa6->sin6_addr = ip6h->ip6_src;
4424 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4425 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4426 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4427 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4428 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4430 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4432 * preserve the port and scope for link
4435 prev_scope = sin6->sin6_scope_id;
4436 prev_port = sin6->sin6_port;
4438 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4439 /* failed to prepend data, give up */
4441 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4444 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4446 #if defined(SCTP_WITH_NO_CSUM)
4447 SCTP_STAT_INCR(sctps_sendnocrc);
4449 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4450 SCTP_STAT_INCR(sctps_sendswcrc);
4452 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4453 udp->uh_sum = 0xffff;
4456 #if defined(SCTP_WITH_NO_CSUM)
4457 SCTP_STAT_INCR(sctps_sendnocrc);
4459 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4460 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4461 SCTP_STAT_INCR(sctps_sendhwcrc);
4464 /* send it out. table id is taken from stcb */
4465 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4466 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4467 so = SCTP_INP_SO(inp);
4468 SCTP_SOCKET_UNLOCK(so, 0);
4471 #ifdef SCTP_PACKET_LOGGING
4472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4473 sctp_packet_log(o_pak);
4475 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4476 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4477 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4478 atomic_add_int(&stcb->asoc.refcnt, 1);
4479 SCTP_TCB_UNLOCK(stcb);
4480 SCTP_SOCKET_LOCK(so, 0);
4481 SCTP_TCB_LOCK(stcb);
4482 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4486 /* for link local this must be done */
4487 sin6->sin6_scope_id = prev_scope;
4488 sin6->sin6_port = prev_port;
4490 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4491 SCTP_STAT_INCR(sctps_sendpackets);
4492 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4494 SCTP_STAT_INCR(sctps_senderrors);
4497 /* Now if we had a temp route free it */
4501 * PMTU check versus smallest asoc MTU goes
4504 if (ro->ro_rt == NULL) {
4505 /* Route was freed */
4506 if (net->ro._s_addr &&
4507 net->src_addr_selected) {
4508 sctp_free_ifa(net->ro._s_addr);
4509 net->ro._s_addr = NULL;
4511 net->src_addr_selected = 0;
4513 if ((ro->ro_rt != NULL) &&
4514 (net->ro._s_addr)) {
4517 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4519 (stcb->asoc.smallest_mtu > mtu)) {
4520 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4523 net->mtu -= sizeof(struct udphdr);
4527 if (ND_IFINFO(ifp)->linkmtu &&
4528 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4529 sctp_mtu_size_reset(inp,
4531 ND_IFINFO(ifp)->linkmtu);
4539 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4540 ((struct sockaddr *)to)->sa_family);
4542 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4549 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4550 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4556 struct sctp_nets *net;
4557 struct sctp_init_chunk *init;
4558 struct sctp_supported_addr_param *sup_addr;
4559 struct sctp_adaptation_layer_indication *ali;
4560 struct sctp_supported_chunk_types_param *pr_supported;
4561 struct sctp_paramhdr *ph;
4562 int cnt_inits_to = 0;
4564 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4566 /* INIT's always go to the primary (and usually ONLY address) */
4567 net = stcb->asoc.primary_destination;
4569 net = TAILQ_FIRST(&stcb->asoc.nets);
4574 /* we confirm any address we send an INIT to */
4575 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4576 (void)sctp_set_primary_addr(stcb, NULL, net);
4578 /* we confirm any address we send an INIT to */
4579 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4581 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4583 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4585 * special hook, if we are sending to link local it will not
4586 * show up in our private address count.
4588 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4592 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4593 /* This case should not happen */
4594 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4597 /* start the INIT timer */
4598 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4600 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4602 /* No memory, INIT timer will re-attempt. */
4603 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4606 chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
4609 * assume peer supports asconf in order to be able to queue local
4610 * address changes while an INIT is in flight and before the assoc
4613 stcb->asoc.peer_supports_asconf = 1;
4614 /* Now lets put the chunk header in place */
4615 init = mtod(m, struct sctp_init_chunk *);
4616 /* now the chunk header */
4617 init->ch.chunk_type = SCTP_INITIATION;
4618 init->ch.chunk_flags = 0;
4619 /* fill in later from mbuf we build */
4620 init->ch.chunk_length = 0;
4621 /* place in my tag */
4622 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4623 /* set up some of the credits. */
4624 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4625 SCTP_MINIMAL_RWND));
4626 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4627 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4628 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4630 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4633 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4634 if (stcb->asoc.scope.ipv4_addr_legal) {
4635 parameter_len += (uint16_t) sizeof(uint16_t);
4637 if (stcb->asoc.scope.ipv6_addr_legal) {
4638 parameter_len += (uint16_t) sizeof(uint16_t);
4640 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4641 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4642 sup_addr->ph.param_length = htons(parameter_len);
4644 if (stcb->asoc.scope.ipv4_addr_legal) {
4645 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4647 if (stcb->asoc.scope.ipv6_addr_legal) {
4648 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4650 padding_len = 4 - 2 * i;
4651 chunk_len += parameter_len;
4653 /* Adaptation layer indication parameter */
4654 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4655 if (padding_len > 0) {
4656 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4657 chunk_len += padding_len;
4660 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
4661 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4662 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4663 ali->ph.param_length = htons(parameter_len);
4664 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4665 chunk_len += parameter_len;
4667 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4668 /* Add NAT friendly parameter. */
4669 if (padding_len > 0) {
4670 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4671 chunk_len += padding_len;
4674 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4675 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4676 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4677 ph->param_length = htons(parameter_len);
4678 chunk_len += parameter_len;
4680 /* now any cookie time extensions */
4681 if (stcb->asoc.cookie_preserve_req) {
4682 struct sctp_cookie_perserve_param *cookie_preserve;
4684 if (padding_len > 0) {
4685 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4686 chunk_len += padding_len;
4689 parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
4690 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4691 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4692 cookie_preserve->ph.param_length = htons(parameter_len);
4693 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4694 stcb->asoc.cookie_preserve_req = 0;
4695 chunk_len += parameter_len;
4698 if (stcb->asoc.ecn_allowed == 1) {
4699 if (padding_len > 0) {
4700 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4701 chunk_len += padding_len;
4704 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4705 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4706 ph->param_type = htons(SCTP_ECN_CAPABLE);
4707 ph->param_length = htons(parameter_len);
4708 chunk_len += parameter_len;
4710 /* And now tell the peer we do support PR-SCTP. */
4711 if (padding_len > 0) {
4712 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4713 chunk_len += padding_len;
4716 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4717 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4718 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4719 ph->param_length = htons(parameter_len);
4720 chunk_len += parameter_len;
4722 /* And now tell the peer we do all the extensions */
4723 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4724 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4726 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4727 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4728 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4729 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4730 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4731 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4732 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4734 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4735 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4737 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4738 pr_supported->ph.param_length = htons(parameter_len);
4739 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4740 chunk_len += parameter_len;
4742 /* add authentication parameters */
4743 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4744 /* attach RANDOM parameter, if available */
4745 if (stcb->asoc.authinfo.random != NULL) {
4746 struct sctp_auth_random *randp;
4748 if (padding_len > 0) {
4749 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4750 chunk_len += padding_len;
4753 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4754 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4755 /* random key already contains the header */
4756 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4757 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4758 chunk_len += parameter_len;
4760 /* add HMAC_ALGO parameter */
4761 if ((stcb->asoc.local_hmacs != NULL) &&
4762 (stcb->asoc.local_hmacs->num_algo > 0)) {
4763 struct sctp_auth_hmac_algo *hmacs;
4765 if (padding_len > 0) {
4766 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4767 chunk_len += padding_len;
4770 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4771 parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
4772 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4773 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4774 hmacs->ph.param_length = htons(parameter_len);
4775 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
4776 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4777 chunk_len += parameter_len;
4779 /* add CHUNKS parameter */
4780 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
4781 struct sctp_auth_chunk_list *chunks;
4783 if (padding_len > 0) {
4784 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4785 chunk_len += padding_len;
4788 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4789 parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
4790 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4791 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4792 chunks->ph.param_length = htons(parameter_len);
4793 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4794 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4795 chunk_len += parameter_len;
4798 SCTP_BUF_LEN(m) = chunk_len;
4800 /* now the addresses */
4802 * To optimize this we could put the scoping stuff into a structure
4803 * and remove the individual uint8's from the assoc structure. Then
4804 * we could just sifa in the address within the stcb. But for now
4805 * this is a quick hack to get the address stuff teased apart.
4807 sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
4809 init->ch.chunk_length = htons(chunk_len);
4810 if (padding_len > 0) {
4811 struct mbuf *m_at, *mp_last;
4814 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4815 if (SCTP_BUF_NEXT(m_at) == NULL)
4818 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
4823 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4824 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4825 (struct sockaddr *)&net->ro._l_addr,
4826 m, 0, NULL, 0, 0, 0, 0,
4827 inp->sctp_lport, stcb->rport, htonl(0),
4831 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4832 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4833 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4837 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4838 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4841 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4842 * being equal to the beginning of the params i.e. (iphlen +
4843 * sizeof(struct sctp_init_msg) parse through the parameters to the
4844 * end of the mbuf verifying that all parameters are known.
4846 * For unknown parameters build and return a mbuf with
4847 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4848 * processing this chunk stop, and set *abort_processing to 1.
4850 * By having param_offset be pre-set to where parameters begin it is
4851 * hoped that this routine may be reused in the future by new
4854 struct sctp_paramhdr *phdr, params;
4856 struct mbuf *mat, *op_err;
4857 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4858 int at, limit, pad_needed;
4859 uint16_t ptype, plen, padded_size;
4862 *abort_processing = 0;
4865 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4868 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4869 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4870 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4871 ptype = ntohs(phdr->param_type);
4872 plen = ntohs(phdr->param_length);
4873 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4874 /* wacked parameter */
4875 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4878 limit -= SCTP_SIZE32(plen);
4880 * All parameters for all chunks that we know/understand are
4881 * listed here. We process them other places and make
4882 * appropriate stop actions per the upper bits. However this
4883 * is the generic routine processor's can call to get back
4884 * an operr.. to either incorporate (init-ack) or send.
4886 padded_size = SCTP_SIZE32(plen);
4888 /* Param's with variable size */
4889 case SCTP_HEARTBEAT_INFO:
4890 case SCTP_STATE_COOKIE:
4891 case SCTP_UNRECOG_PARAM:
4892 case SCTP_ERROR_CAUSE_IND:
4896 /* Param's with variable size within a range */
4897 case SCTP_CHUNK_LIST:
4898 case SCTP_SUPPORTED_CHUNK_EXT:
4899 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4900 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4905 case SCTP_SUPPORTED_ADDRTYPE:
4906 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4907 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4913 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4914 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4919 case SCTP_SET_PRIM_ADDR:
4920 case SCTP_DEL_IP_ADDRESS:
4921 case SCTP_ADD_IP_ADDRESS:
4922 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4923 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4924 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4929 /* Param's with a fixed size */
4930 case SCTP_IPV4_ADDRESS:
4931 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4932 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4937 case SCTP_IPV6_ADDRESS:
4938 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4939 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4944 case SCTP_COOKIE_PRESERVE:
4945 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4946 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4951 case SCTP_HAS_NAT_SUPPORT:
4954 case SCTP_PRSCTP_SUPPORTED:
4956 if (padded_size != sizeof(struct sctp_paramhdr)) {
4957 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4962 case SCTP_ECN_CAPABLE:
4963 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4964 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4969 case SCTP_ULP_ADAPTATION:
4970 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4971 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4976 case SCTP_SUCCESS_REPORT:
4977 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4978 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4983 case SCTP_HOSTNAME_ADDRESS:
4985 /* We can NOT handle HOST NAME addresses!! */
4988 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
4989 *abort_processing = 1;
4990 if (op_err == NULL) {
4991 /* Ok need to try to get a mbuf */
4993 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4995 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4998 l_len += sizeof(struct sctp_paramhdr);
4999 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5001 SCTP_BUF_LEN(op_err) = 0;
5003 * pre-reserve space for ip
5004 * and sctp header and
5008 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5010 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5012 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5013 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5017 /* If we have space */
5018 struct sctp_paramhdr s;
5021 uint32_t cpthis = 0;
5023 pad_needed = 4 - (err_at % 4);
5024 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5025 err_at += pad_needed;
5027 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5028 s.param_length = htons(sizeof(s) + plen);
5029 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5030 err_at += sizeof(s);
5031 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5033 sctp_m_freem(op_err);
5035 * we are out of memory but
5036 * we still need to have a
5037 * look at what to do (the
5038 * system is in trouble
5043 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5050 * we do not recognize the parameter figure out what
5053 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5054 if ((ptype & 0x4000) == 0x4000) {
5055 /* Report bit is set?? */
5056 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5057 if (op_err == NULL) {
5060 /* Ok need to try to get an mbuf */
5062 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5064 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5067 l_len += sizeof(struct sctp_paramhdr);
5068 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5070 SCTP_BUF_LEN(op_err) = 0;
5072 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5074 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5076 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5077 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5081 /* If we have space */
5082 struct sctp_paramhdr s;
5085 uint32_t cpthis = 0;
5087 pad_needed = 4 - (err_at % 4);
5088 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5089 err_at += pad_needed;
5091 s.param_type = htons(SCTP_UNRECOG_PARAM);
5092 s.param_length = htons(sizeof(s) + plen);
5093 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5094 err_at += sizeof(s);
5095 if (plen > sizeof(tempbuf)) {
5096 plen = sizeof(tempbuf);
5098 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5100 sctp_m_freem(op_err);
5102 * we are out of memory but
5103 * we still need to have a
5104 * look at what to do (the
5105 * system is in trouble
5109 goto more_processing;
5111 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5116 if ((ptype & 0x8000) == 0x0000) {
5117 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5120 /* skip this chunk and continue processing */
5121 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5122 at += SCTP_SIZE32(plen);
5127 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5131 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5132 *abort_processing = 1;
5133 if ((op_err == NULL) && phdr) {
5137 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5139 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5141 l_len += (2 * sizeof(struct sctp_paramhdr));
5142 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5144 SCTP_BUF_LEN(op_err) = 0;
5146 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5148 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5150 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5151 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5154 if ((op_err) && phdr) {
5155 struct sctp_paramhdr s;
5158 uint32_t cpthis = 0;
5160 pad_needed = 4 - (err_at % 4);
5161 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5162 err_at += pad_needed;
5164 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5165 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5166 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5167 err_at += sizeof(s);
5168 /* Only copy back the p-hdr that caused the issue */
5169 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5175 sctp_are_there_new_addresses(struct sctp_association *asoc,
5176 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5179 * Given a INIT packet, look through the packet to verify that there
5180 * are NO new addresses. As we go through the parameters add reports
5181 * of any un-understood parameters that require an error. Also we
5182 * must return (1) to drop the packet if we see a un-understood
5183 * parameter that tells us to drop the chunk.
5185 struct sockaddr *sa_touse;
5186 struct sockaddr *sa;
5187 struct sctp_paramhdr *phdr, params;
5188 uint16_t ptype, plen;
5190 struct sctp_nets *net;
5193 struct sockaddr_in sin4, *sa4;
5197 struct sockaddr_in6 sin6, *sa6;
5202 memset(&sin4, 0, sizeof(sin4));
5203 sin4.sin_family = AF_INET;
5204 sin4.sin_len = sizeof(sin4);
5207 memset(&sin6, 0, sizeof(sin6));
5208 sin6.sin6_family = AF_INET6;
5209 sin6.sin6_len = sizeof(sin6);
5211 /* First what about the src address of the pkt ? */
5213 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5214 sa = (struct sockaddr *)&net->ro._l_addr;
5215 if (sa->sa_family == src->sa_family) {
5217 if (sa->sa_family == AF_INET) {
5218 struct sockaddr_in *src4;
5220 sa4 = (struct sockaddr_in *)sa;
5221 src4 = (struct sockaddr_in *)src;
5222 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5229 if (sa->sa_family == AF_INET6) {
5230 struct sockaddr_in6 *src6;
5232 sa6 = (struct sockaddr_in6 *)sa;
5233 src6 = (struct sockaddr_in6 *)src;
5234 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5243 /* New address added! no need to look futher. */
5246 /* Ok so far lets munge through the rest of the packet */
5247 offset += sizeof(struct sctp_init_chunk);
5248 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5251 ptype = ntohs(phdr->param_type);
5252 plen = ntohs(phdr->param_length);
5255 case SCTP_IPV4_ADDRESS:
5257 struct sctp_ipv4addr_param *p4, p4_buf;
5259 phdr = sctp_get_next_param(in_initpkt, offset,
5260 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5261 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5265 p4 = (struct sctp_ipv4addr_param *)phdr;
5266 sin4.sin_addr.s_addr = p4->addr;
5267 sa_touse = (struct sockaddr *)&sin4;
5272 case SCTP_IPV6_ADDRESS:
5274 struct sctp_ipv6addr_param *p6, p6_buf;
5276 phdr = sctp_get_next_param(in_initpkt, offset,
5277 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5278 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5282 p6 = (struct sctp_ipv6addr_param *)phdr;
5283 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5285 sa_touse = (struct sockaddr *)&sin6;
5294 /* ok, sa_touse points to one to check */
5296 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5297 sa = (struct sockaddr *)&net->ro._l_addr;
5298 if (sa->sa_family != sa_touse->sa_family) {
5302 if (sa->sa_family == AF_INET) {
5303 sa4 = (struct sockaddr_in *)sa;
5304 if (sa4->sin_addr.s_addr ==
5305 sin4.sin_addr.s_addr) {
5312 if (sa->sa_family == AF_INET6) {
5313 sa6 = (struct sockaddr_in6 *)sa;
5314 if (SCTP6_ARE_ADDR_EQUAL(
5323 /* New addr added! no need to look further */
5327 offset += SCTP_SIZE32(plen);
5328 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5334 * Given a MBUF chain that was sent into us containing an INIT. Build a
5335 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5336 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5337 * message (i.e. the struct sctp_init_msg).
5340 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5341 struct mbuf *init_pkt, int iphlen, int offset,
5342 struct sockaddr *src, struct sockaddr *dst,
5343 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5344 uint8_t use_mflowid, uint32_t mflowid,
5345 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5347 struct sctp_association *asoc;
5348 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5349 struct sctp_init_ack_chunk *initack;
5350 struct sctp_adaptation_layer_indication *ali;
5351 struct sctp_ecn_supported_param *ecn;
5352 struct sctp_prsctp_supported_param *prsctp;
5353 struct sctp_supported_chunk_types_param *pr_supported;
5354 union sctp_sockstore *over_addr;
5357 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5358 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5359 struct sockaddr_in *sin;
5363 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5364 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5365 struct sockaddr_in6 *sin6;
5368 struct sockaddr *to;
5369 struct sctp_state_cookie stc;
5370 struct sctp_nets *net = NULL;
5371 uint8_t *signature = NULL;
5372 int cnt_inits_to = 0;
5373 uint16_t his_limit, i_want;
5374 int abort_flag, padval;
5377 int nat_friendly = 0;
5386 if ((asoc != NULL) &&
5387 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5388 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5389 /* new addresses, out of here in non-cookie-wait states */
5391 * Send a ABORT, we don't add the new address error clause
5392 * though we even set the T bit and copy in the 0 tag.. this
5393 * looks no different than if no listener was present.
5395 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5397 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5398 use_mflowid, mflowid,
5403 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5404 (offset + sizeof(struct sctp_init_chunk)),
5405 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5408 if (op_err == NULL) {
5409 char msg[SCTP_DIAG_INFO_LEN];
5411 snprintf(msg, sizeof(msg), "%s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__);
5412 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5415 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5416 init_chk->init.initiate_tag, op_err,
5417 use_mflowid, mflowid,
5421 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5423 /* No memory, INIT timer will re-attempt. */
5425 sctp_m_freem(op_err);
5428 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5431 * We might not overwrite the identification[] completely and on
5432 * some platforms time_entered will contain some padding. Therefore
5433 * zero out the cookie to avoid putting uninitialized memory on the
5436 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5438 /* the time I built cookie */
5439 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5441 /* populate any tie tags */
5443 /* unlock before tag selections */
5444 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5445 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5446 stc.cookie_life = asoc->cookie_life;
5447 net = asoc->primary_destination;
5449 stc.tie_tag_my_vtag = 0;
5450 stc.tie_tag_peer_vtag = 0;
5451 /* life I will award this cookie */
5452 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5455 /* copy in the ports for later check */
5456 stc.myport = sh->dest_port;
5457 stc.peerport = sh->src_port;
5460 * If we wanted to honor cookie life extentions, we would add to
5461 * stc.cookie_life. For now we should NOT honor any extension
5463 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5464 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5465 stc.ipv6_addr_legal = 1;
5466 if (SCTP_IPV6_V6ONLY(inp)) {
5467 stc.ipv4_addr_legal = 0;
5469 stc.ipv4_addr_legal = 1;
5472 stc.ipv6_addr_legal = 0;
5473 stc.ipv4_addr_legal = 1;
5475 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5482 switch (dst->sa_family) {
5486 /* lookup address */
5487 stc.address[0] = src4->sin_addr.s_addr;
5491 stc.addr_type = SCTP_IPV4_ADDRESS;
5492 /* local from address */
5493 stc.laddress[0] = dst4->sin_addr.s_addr;
5494 stc.laddress[1] = 0;
5495 stc.laddress[2] = 0;
5496 stc.laddress[3] = 0;
5497 stc.laddr_type = SCTP_IPV4_ADDRESS;
5498 /* scope_id is only for v6 */
5500 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5501 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
5506 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5507 /* Must use the address in this case */
5508 if (sctp_is_address_on_local_host(src, vrf_id)) {
5509 stc.loopback_scope = 1;
5512 stc.local_scope = 0;
5520 stc.addr_type = SCTP_IPV6_ADDRESS;
5521 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5522 stc.scope_id = in6_getscope(&src6->sin6_addr);
5523 if (sctp_is_address_on_local_host(src, vrf_id)) {
5524 stc.loopback_scope = 1;
5525 stc.local_scope = 0;
5528 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
5530 * If the new destination is a
5531 * LINK_LOCAL we must have common
5532 * both site and local scope. Don't
5533 * set local scope though since we
5534 * must depend on the source to be
5535 * added implicitly. We cannot
5536 * assure just because we share one
5537 * link that all links are common.
5539 stc.local_scope = 0;
5543 * we start counting for the private
5544 * address stuff at 1. since the
5545 * link local we source from won't
5546 * show up in our scoped count.
5550 * pull out the scope_id from
5553 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
5555 * If the new destination is
5556 * SITE_LOCAL then we must have site
5561 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5562 stc.laddr_type = SCTP_IPV6_ADDRESS;
5572 /* set the scope per the existing tcb */
5575 struct sctp_nets *lnet;
5579 stc.loopback_scope = asoc->scope.loopback_scope;
5580 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5581 stc.site_scope = asoc->scope.site_scope;
5582 stc.local_scope = asoc->scope.local_scope;
5584 /* Why do we not consider IPv4 LL addresses? */
5585 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5586 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5587 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5589 * if we have a LL address, start
5597 /* use the net pointer */
5598 to = (struct sockaddr *)&net->ro._l_addr;
5599 switch (to->sa_family) {
5602 sin = (struct sockaddr_in *)to;
5603 stc.address[0] = sin->sin_addr.s_addr;
5607 stc.addr_type = SCTP_IPV4_ADDRESS;
5608 if (net->src_addr_selected == 0) {
5610 * strange case here, the INIT should have
5611 * did the selection.
5613 net->ro._s_addr = sctp_source_address_selection(inp,
5614 stcb, (sctp_route_t *) & net->ro,
5616 if (net->ro._s_addr == NULL)
5619 net->src_addr_selected = 1;
5622 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5623 stc.laddress[1] = 0;
5624 stc.laddress[2] = 0;
5625 stc.laddress[3] = 0;
5626 stc.laddr_type = SCTP_IPV4_ADDRESS;
5627 /* scope_id is only for v6 */
5633 sin6 = (struct sockaddr_in6 *)to;
5634 memcpy(&stc.address, &sin6->sin6_addr,
5635 sizeof(struct in6_addr));
5636 stc.addr_type = SCTP_IPV6_ADDRESS;
5637 stc.scope_id = sin6->sin6_scope_id;
5638 if (net->src_addr_selected == 0) {
5640 * strange case here, the INIT should have
5641 * done the selection.
5643 net->ro._s_addr = sctp_source_address_selection(inp,
5644 stcb, (sctp_route_t *) & net->ro,
5646 if (net->ro._s_addr == NULL)
5649 net->src_addr_selected = 1;
5651 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5652 sizeof(struct in6_addr));
5653 stc.laddr_type = SCTP_IPV6_ADDRESS;
5658 /* Now lets put the SCTP header in place */
5659 initack = mtod(m, struct sctp_init_ack_chunk *);
5660 /* Save it off for quick ref */
5661 stc.peers_vtag = init_chk->init.initiate_tag;
5663 memcpy(stc.identification, SCTP_VERSION_STRING,
5664 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5665 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5666 /* now the chunk header */
5667 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5668 initack->ch.chunk_flags = 0;
5669 /* fill in later from mbuf we build */
5670 initack->ch.chunk_length = 0;
5671 /* place in my tag */
5672 if ((asoc != NULL) &&
5673 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5674 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5675 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5676 /* re-use the v-tags and init-seq here */
5677 initack->init.initiate_tag = htonl(asoc->my_vtag);
5678 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5680 uint32_t vtag, itsn;
5682 if (hold_inp_lock) {
5683 SCTP_INP_INCR_REF(inp);
5684 SCTP_INP_RUNLOCK(inp);
5687 atomic_add_int(&asoc->refcnt, 1);
5688 SCTP_TCB_UNLOCK(stcb);
5690 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5691 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5693 * Got a duplicate vtag on some guy behind a
5694 * nat make sure we don't use it.
5698 initack->init.initiate_tag = htonl(vtag);
5699 /* get a TSN to use too */
5700 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5701 initack->init.initial_tsn = htonl(itsn);
5702 SCTP_TCB_LOCK(stcb);
5703 atomic_add_int(&asoc->refcnt, -1);
5705 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5706 initack->init.initiate_tag = htonl(vtag);
5707 /* get a TSN to use too */
5708 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5710 if (hold_inp_lock) {
5711 SCTP_INP_RLOCK(inp);
5712 SCTP_INP_DECR_REF(inp);
5715 /* save away my tag to */
5716 stc.my_vtag = initack->init.initiate_tag;
5718 /* set up some of the credits. */
5719 so = inp->sctp_socket;
5721 /* memory problem */
5725 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5727 /* set what I want */
5728 his_limit = ntohs(init_chk->init.num_inbound_streams);
5729 /* choose what I want */
5731 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5732 i_want = asoc->streamoutcnt;
5734 i_want = inp->sctp_ep.pre_open_stream_count;
5737 i_want = inp->sctp_ep.pre_open_stream_count;
5739 if (his_limit < i_want) {
5740 /* I Want more :< */
5741 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5743 /* I can have what I want :> */
5744 initack->init.num_outbound_streams = htons(i_want);
5746 /* tell him his limit. */
5747 initack->init.num_inbound_streams =
5748 htons(inp->sctp_ep.max_open_streams_intome);
5750 /* adaptation layer indication parameter */
5751 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5752 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5753 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5754 ali->ph.param_length = htons(sizeof(*ali));
5755 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5756 SCTP_BUF_LEN(m) += sizeof(*ali);
5757 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5759 ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
5763 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5764 (inp->sctp_ecn_enable == 1)) {
5765 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5766 ecn->ph.param_length = htons(sizeof(*ecn));
5767 SCTP_BUF_LEN(m) += sizeof(*ecn);
5769 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5772 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5774 /* And now tell the peer we do pr-sctp */
5775 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5776 prsctp->ph.param_length = htons(sizeof(*prsctp));
5777 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5779 /* Add NAT friendly parameter */
5780 struct sctp_paramhdr *ph;
5782 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5783 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5784 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5785 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5787 /* And now tell the peer we do all the extensions */
5788 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5789 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5791 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5792 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5793 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5794 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5795 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5796 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5797 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5798 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5799 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5800 p_len = sizeof(*pr_supported) + num_ext;
5801 pr_supported->ph.param_length = htons(p_len);
5802 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5803 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5805 /* add authentication parameters */
5806 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5807 struct sctp_auth_random *randp;
5808 struct sctp_auth_hmac_algo *hmacs;
5809 struct sctp_auth_chunk_list *chunks;
5810 uint16_t random_len;
5812 /* generate and add RANDOM parameter */
5813 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5814 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5815 randp->ph.param_type = htons(SCTP_RANDOM);
5816 p_len = sizeof(*randp) + random_len;
5817 randp->ph.param_length = htons(p_len);
5818 SCTP_READ_RANDOM(randp->random_data, random_len);
5819 /* zero out any padding required */
5820 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5821 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5823 /* add HMAC_ALGO parameter */
5824 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5825 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5826 (uint8_t *) hmacs->hmac_ids);
5828 p_len += sizeof(*hmacs);
5829 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5830 hmacs->ph.param_length = htons(p_len);
5831 /* zero out any padding required */
5832 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5833 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5835 /* add CHUNKS parameter */
5836 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5837 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5838 chunks->chunk_types);
5840 p_len += sizeof(*chunks);
5841 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5842 chunks->ph.param_length = htons(p_len);
5843 /* zero out any padding required */
5844 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5845 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5849 /* now the addresses */
5851 struct sctp_scoping scp;
5854 * To optimize this we could put the scoping stuff into a
5855 * structure and remove the individual uint8's from the stc
5856 * structure. Then we could just sifa in the address within
5857 * the stc.. but for now this is a quick hack to get the
5858 * address stuff teased apart.
5860 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5861 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5862 scp.loopback_scope = stc.loopback_scope;
5863 scp.ipv4_local_scope = stc.ipv4_scope;
5864 scp.local_scope = stc.local_scope;
5865 scp.site_scope = stc.site_scope;
5866 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
5869 /* tack on the operational error if present */
5878 llen += SCTP_BUF_LEN(ol);
5879 ol = SCTP_BUF_NEXT(ol);
5882 /* must add a pad to the param */
5883 uint32_t cpthis = 0;
5886 padlen = 4 - (llen % 4);
5887 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5889 while (SCTP_BUF_NEXT(m_at) != NULL) {
5890 m_at = SCTP_BUF_NEXT(m_at);
5892 SCTP_BUF_NEXT(m_at) = op_err;
5893 while (SCTP_BUF_NEXT(m_at) != NULL) {
5894 m_at = SCTP_BUF_NEXT(m_at);
5897 /* pre-calulate the size and update pkt header and chunk header */
5899 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5900 p_len += SCTP_BUF_LEN(m_tmp);
5901 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5902 /* m_tmp should now point to last one */
5907 /* Now we must build a cookie */
5908 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
5909 if (m_cookie == NULL) {
5910 /* memory problem */
5914 /* Now append the cookie to the end and update the space/size */
5915 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5917 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5918 p_len += SCTP_BUF_LEN(m_tmp);
5919 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5920 /* m_tmp should now point to last one */
5926 * Place in the size, but we don't include the last pad (if any) in
5929 initack->ch.chunk_length = htons(p_len);
5932 * Time to sign the cookie, we don't sign over the cookie signature
5933 * though thus we set trailer.
5935 (void)sctp_hmac_m(SCTP_HMAC,
5936 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5937 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5938 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5940 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5941 * here since the timer will drive a retranmission.
5944 if ((padval) && (mp_last)) {
5945 /* see my previous comments on mp_last */
5946 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
5947 /* Houston we have a problem, no space */
5952 if (stc.loopback_scope) {
5953 over_addr = (union sctp_sockstore *)dst;
5958 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5960 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
5962 use_mflowid, mflowid,
5963 SCTP_SO_NOT_LOCKED);
5964 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5969 sctp_prune_prsctp(struct sctp_tcb *stcb,
5970 struct sctp_association *asoc,
5971 struct sctp_sndrcvinfo *srcv,
5975 struct sctp_tmit_chunk *chk, *nchk;
5977 SCTP_TCB_LOCK_ASSERT(stcb);
5978 if ((asoc->peer_supports_prsctp) &&
5979 (asoc->sent_queue_cnt_removeable > 0)) {
5980 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5982 * Look for chunks marked with the PR_SCTP flag AND
5983 * the buffer space flag. If the one being sent is
5984 * equal or greater priority then purge the old one
5985 * and free some space.
5987 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5989 * This one is PR-SCTP AND buffer space
5992 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5994 * Lower numbers equates to higher
5995 * priority so if the one we are
5996 * looking at has a larger or equal
5997 * priority we want to drop the data
5998 * and NOT retransmit it.
6002 * We release the book_size
6003 * if the mbuf is here
6008 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6012 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6015 freed_spc += ret_spc;
6016 if (freed_spc >= dataout) {
6019 } /* if chunk was present */
6020 } /* if of sufficent priority */
6021 } /* if chunk has enabled */
6022 } /* tailqforeach */
6024 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6025 /* Here we must move to the sent queue and mark */
6026 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6027 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6030 * We release the book_size
6031 * if the mbuf is here
6035 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6038 freed_spc += ret_spc;
6039 if (freed_spc >= dataout) {
6042 } /* end if chk->data */
6043 } /* end if right class */
6044 } /* end if chk pr-sctp */
6045 } /* tailqforeachsafe (chk) */
6046 } /* if enabled in asoc */
6050 sctp_get_frag_point(struct sctp_tcb *stcb,
6051 struct sctp_association *asoc)
6056 * For endpoints that have both v6 and v4 addresses we must reserve
6057 * room for the ipv6 header, for those that are only dealing with V4
6058 * we use a larger frag point.
6060 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6061 ovh = SCTP_MED_OVERHEAD;
6063 ovh = SCTP_MED_V4_OVERHEAD;
6066 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6067 siz = asoc->smallest_mtu - ovh;
6069 siz = (stcb->asoc.sctp_frag_point - ovh);
6071 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6073 /* A data chunk MUST fit in a cluster */
6074 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6077 /* adjust for an AUTH chunk if DATA requires auth */
6078 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6079 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6082 /* make it an even word boundary please */
6089 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6092 * We assume that the user wants PR_SCTP_TTL if the user provides a
6093 * positive lifetime but does not specify any PR_SCTP policy.
6095 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6096 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6097 } else if (sp->timetolive > 0) {
6098 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6099 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6103 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6104 case CHUNK_FLAGS_PR_SCTP_BUF:
6106 * Time to live is a priority stored in tv_sec when doing
6107 * the buffer drop thing.
6109 sp->ts.tv_sec = sp->timetolive;
6112 case CHUNK_FLAGS_PR_SCTP_TTL:
6116 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6117 tv.tv_sec = sp->timetolive / 1000;
6118 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6120 * TODO sctp_constants.h needs alternative time
6121 * macros when _KERNEL is undefined.
6123 timevaladd(&sp->ts, &tv);
6126 case CHUNK_FLAGS_PR_SCTP_RTX:
6128 * Time to live is a the number or retransmissions stored in
6131 sp->ts.tv_sec = sp->timetolive;
6135 SCTPDBG(SCTP_DEBUG_USRREQ1,
6136 "Unknown PR_SCTP policy %u.\n",
6137 PR_SCTP_POLICY(sp->sinfo_flags));
6143 sctp_msg_append(struct sctp_tcb *stcb,
6144 struct sctp_nets *net,
6146 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6150 struct sctp_stream_queue_pending *sp = NULL;
6151 struct sctp_stream_out *strm;
6154 * Given an mbuf chain, put it into the association send queue and
6155 * place it on the wheel
6157 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6158 /* Invalid stream number */
6159 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6163 if ((stcb->asoc.stream_locked) &&
6164 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6165 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6169 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6170 /* Now can we send this? */
6171 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6172 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6173 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6174 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6175 /* got data while shutting down */
6176 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6180 sctp_alloc_a_strmoq(stcb, sp);
6182 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6186 sp->sinfo_flags = srcv->sinfo_flags;
6187 sp->timetolive = srcv->sinfo_timetolive;
6188 sp->ppid = srcv->sinfo_ppid;
6189 sp->context = srcv->sinfo_context;
6190 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6192 atomic_add_int(&sp->net->ref_count, 1);
6196 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6197 sp->stream = srcv->sinfo_stream;
6198 sp->msg_is_complete = 1;
6199 sp->sender_all_done = 1;
6202 sp->tail_mbuf = NULL;
6203 sctp_set_prsctp_policy(sp);
6205 * We could in theory (for sendall) sifa the length in, but we would
6206 * still have to hunt through the chain since we need to setup the
6210 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6211 if (SCTP_BUF_NEXT(at) == NULL)
6213 sp->length += SCTP_BUF_LEN(at);
6215 if (srcv->sinfo_keynumber_valid) {
6216 sp->auth_keyid = srcv->sinfo_keynumber;
6218 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6220 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6221 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6222 sp->holds_key_ref = 1;
6224 if (hold_stcb_lock == 0) {
6225 SCTP_TCB_SEND_LOCK(stcb);
6227 sctp_snd_sb_alloc(stcb, sp->length);
6228 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6229 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6230 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6232 if (hold_stcb_lock == 0) {
6233 SCTP_TCB_SEND_UNLOCK(stcb);
6243 static struct mbuf *
6244 sctp_copy_mbufchain(struct mbuf *clonechain,
6245 struct mbuf *outchain,
6246 struct mbuf **endofchain,
6249 uint8_t copy_by_ref)
6252 struct mbuf *appendchain;
6256 if (endofchain == NULL) {
6260 sctp_m_freem(outchain);
6263 if (can_take_mbuf) {
6264 appendchain = clonechain;
6267 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6269 /* Its not in a cluster */
6270 if (*endofchain == NULL) {
6271 /* lets get a mbuf cluster */
6272 if (outchain == NULL) {
6273 /* This is the general case */
6275 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6276 if (outchain == NULL) {
6279 SCTP_BUF_LEN(outchain) = 0;
6280 *endofchain = outchain;
6281 /* get the prepend space */
6282 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6285 * We really should not get a NULL
6291 if (SCTP_BUF_NEXT(m) == NULL) {
6295 m = SCTP_BUF_NEXT(m);
6298 if (*endofchain == NULL) {
6300 * huh, TSNH XXX maybe we
6303 sctp_m_freem(outchain);
6307 /* get the new end of length */
6308 len = M_TRAILINGSPACE(*endofchain);
6310 /* how much is left at the end? */
6311 len = M_TRAILINGSPACE(*endofchain);
6313 /* Find the end of the data, for appending */
6314 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6316 /* Now lets copy it out */
6317 if (len >= sizeofcpy) {
6318 /* It all fits, copy it in */
6319 m_copydata(clonechain, 0, sizeofcpy, cp);
6320 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6322 /* fill up the end of the chain */
6324 m_copydata(clonechain, 0, len, cp);
6325 SCTP_BUF_LEN((*endofchain)) += len;
6326 /* now we need another one */
6329 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6334 SCTP_BUF_NEXT((*endofchain)) = m;
6336 cp = mtod((*endofchain), caddr_t);
6337 m_copydata(clonechain, len, sizeofcpy, cp);
6338 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6342 /* copy the old fashion way */
6343 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6344 #ifdef SCTP_MBUF_LOGGING
6345 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6348 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6349 if (SCTP_BUF_IS_EXTENDED(mat)) {
6350 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6357 if (appendchain == NULL) {
6360 sctp_m_freem(outchain);
6364 /* tack on to the end */
6365 if (*endofchain != NULL) {
6366 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6370 if (SCTP_BUF_NEXT(m) == NULL) {
6371 SCTP_BUF_NEXT(m) = appendchain;
6374 m = SCTP_BUF_NEXT(m);
6378 * save off the end and update the end-chain postion
6382 if (SCTP_BUF_NEXT(m) == NULL) {
6386 m = SCTP_BUF_NEXT(m);
6390 /* save off the end and update the end-chain postion */
6393 if (SCTP_BUF_NEXT(m) == NULL) {
6397 m = SCTP_BUF_NEXT(m);
6399 return (appendchain);
6404 sctp_med_chunk_output(struct sctp_inpcb *inp,
6405 struct sctp_tcb *stcb,
6406 struct sctp_association *asoc,
6409 int control_only, int from_where,
6410 struct timeval *now, int *now_filled, int frag_point, int so_locked
6411 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6417 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6418 uint32_t val SCTP_UNUSED)
6420 struct sctp_copy_all *ca;
6423 int added_control = 0;
6424 int un_sent, do_chunk_output = 1;
6425 struct sctp_association *asoc;
6426 struct sctp_nets *net;
6428 ca = (struct sctp_copy_all *)ptr;
6429 if (ca->m == NULL) {
6432 if (ca->inp != inp) {
6436 if (ca->sndlen > 0) {
6437 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6439 /* can't copy so we are done */
6443 #ifdef SCTP_MBUF_LOGGING
6444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6447 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6448 if (SCTP_BUF_IS_EXTENDED(mat)) {
6449 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6457 SCTP_TCB_LOCK_ASSERT(stcb);
6458 if (stcb->asoc.alternate) {
6459 net = stcb->asoc.alternate;
6461 net = stcb->asoc.primary_destination;
6463 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6464 /* Abort this assoc with m as the user defined reason */
6466 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6468 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6469 0, M_NOWAIT, 1, MT_DATA);
6470 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6473 struct sctp_paramhdr *ph;
6475 ph = mtod(m, struct sctp_paramhdr *);
6476 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6477 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
6480 * We add one here to keep the assoc from dis-appearing on
6483 atomic_add_int(&stcb->asoc.refcnt, 1);
6484 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6486 * sctp_abort_an_association calls sctp_free_asoc() free
6487 * association will NOT free it since we incremented the
6488 * refcnt .. we do this to prevent it being freed and things
6489 * getting tricky since we could end up (from free_asoc)
6490 * calling inpcb_free which would get a recursive lock call
6491 * to the iterator lock.. But as a consequence of that the
6492 * stcb will return to us un-locked.. since free_asoc
6493 * returns with either no TCB or the TCB unlocked, we must
6494 * relock.. to unlock in the iterator timer :-0
6496 SCTP_TCB_LOCK(stcb);
6497 atomic_add_int(&stcb->asoc.refcnt, -1);
6498 goto no_chunk_output;
6501 ret = sctp_msg_append(stcb, net, m,
6505 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6506 /* shutdown this assoc */
6509 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6511 if (TAILQ_EMPTY(&asoc->send_queue) &&
6512 TAILQ_EMPTY(&asoc->sent_queue) &&
6514 if (asoc->locked_on_sending) {
6518 * there is nothing queued to send, so I'm
6521 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6522 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6523 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6525 * only send SHUTDOWN the first time
6528 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6529 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6531 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6532 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6533 sctp_stop_timers_for_shutdown(stcb);
6534 sctp_send_shutdown(stcb, net);
6535 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6537 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6538 asoc->primary_destination);
6540 do_chunk_output = 0;
6544 * we still got (or just got) data to send,
6545 * so set SHUTDOWN_PENDING
6548 * XXX sockets draft says that SCTP_EOF
6549 * should be sent with no data. currently,
6550 * we will allow user data to be sent first
6551 * and move to SHUTDOWN-PENDING
6553 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6554 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6555 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6556 if (asoc->locked_on_sending) {
6558 * Locked to send out the
6561 struct sctp_stream_queue_pending *sp;
6563 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6565 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6566 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6569 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6570 if (TAILQ_EMPTY(&asoc->send_queue) &&
6571 TAILQ_EMPTY(&asoc->sent_queue) &&
6572 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6574 atomic_add_int(&stcb->asoc.refcnt, 1);
6575 sctp_abort_an_association(stcb->sctp_ep, stcb,
6576 NULL, SCTP_SO_NOT_LOCKED);
6577 atomic_add_int(&stcb->asoc.refcnt, -1);
6578 goto no_chunk_output;
6580 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6581 asoc->primary_destination);
6587 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6588 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6590 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6591 (stcb->asoc.total_flight > 0) &&
6592 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6593 do_chunk_output = 0;
6595 if (do_chunk_output)
6596 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6597 else if (added_control) {
6598 int num_out = 0, reason = 0, now_filled = 0;
6602 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6603 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6604 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6615 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6617 struct sctp_copy_all *ca;
6619 ca = (struct sctp_copy_all *)ptr;
6621 * Do a notify here? Kacheong suggests that the notify be done at
6622 * the send time.. so you would push up a notification if any send
6623 * failed. Don't know if this is feasable since the only failures we
6624 * have is "memory" related and if you cannot get an mbuf to send
6625 * the data you surely can't get an mbuf to send up to notify the
6626 * user you can't send the data :->
6629 /* now free everything */
6630 sctp_m_freem(ca->m);
6631 SCTP_FREE(ca, SCTP_M_COPYAL);
6635 #define MC_ALIGN(m, len) do { \
6636 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6641 static struct mbuf *
6642 sctp_copy_out_all(struct uio *uio, int len)
6644 struct mbuf *ret, *at;
6645 int left, willcpy, cancpy, error;
6647 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6653 SCTP_BUF_LEN(ret) = 0;
6654 /* save space for the data chunk header */
6655 cancpy = M_TRAILINGSPACE(ret);
6656 willcpy = min(cancpy, left);
6659 /* Align data to the end */
6660 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6666 SCTP_BUF_LEN(at) = willcpy;
6667 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6670 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
6671 if (SCTP_BUF_NEXT(at) == NULL) {
6674 at = SCTP_BUF_NEXT(at);
6675 SCTP_BUF_LEN(at) = 0;
6676 cancpy = M_TRAILINGSPACE(at);
6677 willcpy = min(cancpy, left);
6684 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6685 struct sctp_sndrcvinfo *srcv)
6688 struct sctp_copy_all *ca;
6690 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6694 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6697 memset(ca, 0, sizeof(struct sctp_copy_all));
6701 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6704 * take off the sendall flag, it would be bad if we failed to do
6707 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6708 /* get length and mbuf chain */
6710 ca->sndlen = uio->uio_resid;
6711 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6712 if (ca->m == NULL) {
6713 SCTP_FREE(ca, SCTP_M_COPYAL);
6714 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6718 /* Gather the length of the send */
6722 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6723 ca->sndlen += SCTP_BUF_LEN(mat);
6726 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6727 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6728 SCTP_ASOC_ANY_STATE,
6730 sctp_sendall_completes, inp, 1);
6732 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6733 SCTP_FREE(ca, SCTP_M_COPYAL);
6734 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6742 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6744 struct sctp_tmit_chunk *chk, *nchk;
6746 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6747 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6748 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6750 sctp_m_freem(chk->data);
6753 asoc->ctrl_queue_cnt--;
6754 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6760 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6762 struct sctp_association *asoc;
6763 struct sctp_tmit_chunk *chk, *nchk;
6764 struct sctp_asconf_chunk *acp;
6767 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6768 /* find SCTP_ASCONF chunk in queue */
6769 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6771 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6772 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6777 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6779 sctp_m_freem(chk->data);
6782 asoc->ctrl_queue_cnt--;
6783 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6790 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6791 struct sctp_association *asoc,
6792 struct sctp_tmit_chunk **data_list,
6794 struct sctp_nets *net)
6797 struct sctp_tmit_chunk *tp1;
6799 for (i = 0; i < bundle_at; i++) {
6800 /* off of the send queue */
6801 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6802 asoc->send_queue_cnt--;
6805 * Any chunk NOT 0 you zap the time chunk 0 gets
6806 * zapped or set based on if a RTO measurment is
6809 data_list[i]->do_rtt = 0;
6812 data_list[i]->sent_rcv_time = net->last_sent_time;
6813 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6814 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6815 if (data_list[i]->whoTo == NULL) {
6816 data_list[i]->whoTo = net;
6817 atomic_add_int(&net->ref_count, 1);
6819 /* on to the sent queue */
6820 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6821 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6822 struct sctp_tmit_chunk *tpp;
6824 /* need to move back */
6826 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6828 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6832 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6835 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6837 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6842 /* This does not lower until the cum-ack passes it */
6843 asoc->sent_queue_cnt++;
6844 if ((asoc->peers_rwnd <= 0) &&
6845 (asoc->total_flight == 0) &&
6847 /* Mark the chunk as being a window probe */
6848 SCTP_STAT_INCR(sctps_windowprobed);
6850 #ifdef SCTP_AUDITING_ENABLED
6851 sctp_audit_log(0xC2, 3);
6853 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6854 data_list[i]->snd_count = 1;
6855 data_list[i]->rec.data.chunk_was_revoked = 0;
6856 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6857 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6858 data_list[i]->whoTo->flight_size,
6859 data_list[i]->book_size,
6860 (uintptr_t) data_list[i]->whoTo,
6861 data_list[i]->rec.data.TSN_seq);
6863 sctp_flight_size_increase(data_list[i]);
6864 sctp_total_flight_increase(stcb, data_list[i]);
6865 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6866 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6867 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6869 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6870 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6871 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6872 /* SWS sender side engages */
6873 asoc->peers_rwnd = 0;
6876 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6877 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6882 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6883 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6888 struct sctp_tmit_chunk *chk, *nchk;
6890 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6891 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6892 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6893 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6894 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6895 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6896 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6897 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6898 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6899 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6900 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6901 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6902 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6903 /* Stray chunks must be cleaned up */
6905 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6907 sctp_m_freem(chk->data);
6910 asoc->ctrl_queue_cnt--;
6911 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
6912 asoc->fwd_tsn_cnt--;
6913 sctp_free_a_chunk(stcb, chk, so_locked);
6914 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6915 /* special handling, we must look into the param */
6916 if (chk != asoc->str_reset) {
6917 goto clean_up_anyway;
6925 sctp_can_we_split_this(struct sctp_tcb *stcb,
6927 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6930 * Make a decision on if I should split a msg into multiple parts.
6931 * This is only asked of incomplete messages.
6935 * If we are doing EEOR we need to always send it if its the
6936 * entire thing, since it might be all the guy is putting in
6939 if (goal_mtu >= length) {
6941 * If we have data outstanding,
6942 * we get another chance when the sack
6943 * arrives to transmit - wait for more data
6945 if (stcb->asoc.total_flight == 0) {
6947 * If nothing is in flight, we zero the
6955 /* You can fill the rest */
6960 * For those strange folk that make the send buffer
6961 * smaller than our fragmentation point, we can't
6962 * get a full msg in so we have to allow splitting.
6964 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
6967 if ((length <= goal_mtu) ||
6968 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
6969 /* Sub-optimial residual don't split in non-eeor mode. */
6973 * If we reach here length is larger than the goal_mtu. Do we wish
6974 * to split it for the sake of packet putting together?
6976 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
6977 /* Its ok to split it */
6978 return (min(goal_mtu, frag_point));
6980 /* Nope, can't split */
6986 sctp_move_to_outqueue(struct sctp_tcb *stcb,
6987 struct sctp_stream_out *strq,
6989 uint32_t frag_point,
6995 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7000 /* Move from the stream to the send_queue keeping track of the total */
7001 struct sctp_association *asoc;
7002 struct sctp_stream_queue_pending *sp;
7003 struct sctp_tmit_chunk *chk;
7004 struct sctp_data_chunk *dchkh;
7005 uint32_t to_move, length;
7006 uint8_t rcv_flags = 0;
7008 uint8_t send_lock_up = 0;
7010 SCTP_TCB_LOCK_ASSERT(stcb);
7013 /* sa_ignore FREED_MEMORY */
7014 sp = TAILQ_FIRST(&strq->outqueue);
7017 if (send_lock_up == 0) {
7018 SCTP_TCB_SEND_LOCK(stcb);
7021 sp = TAILQ_FIRST(&strq->outqueue);
7025 if (strq->last_msg_incomplete) {
7026 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7028 strq->last_msg_incomplete);
7029 strq->last_msg_incomplete = 0;
7033 SCTP_TCB_SEND_UNLOCK(stcb);
7038 if ((sp->msg_is_complete) && (sp->length == 0)) {
7039 if (sp->sender_all_done) {
7041 * We are doing differed cleanup. Last time through
7042 * when we took all the data the sender_all_done was
7045 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7046 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7047 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7048 sp->sender_all_done,
7050 sp->msg_is_complete,
7054 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7055 SCTP_TCB_SEND_LOCK(stcb);
7058 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7059 TAILQ_REMOVE(&strq->outqueue, sp, next);
7060 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7062 sctp_free_remote_addr(sp->net);
7066 sctp_m_freem(sp->data);
7069 sctp_free_a_strmoq(stcb, sp, so_locked);
7070 /* we can't be locked to it */
7072 stcb->asoc.locked_on_sending = NULL;
7074 SCTP_TCB_SEND_UNLOCK(stcb);
7077 /* back to get the next msg */
7081 * sender just finished this but still holds a
7090 /* is there some to get */
7091 if (sp->length == 0) {
7097 } else if (sp->discard_rest) {
7098 if (send_lock_up == 0) {
7099 SCTP_TCB_SEND_LOCK(stcb);
7102 /* Whack down the size */
7103 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7104 if ((stcb->sctp_socket != NULL) && \
7105 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7106 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7107 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7110 sctp_m_freem(sp->data);
7112 sp->tail_mbuf = NULL;
7122 some_taken = sp->some_taken;
7123 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7124 sp->msg_is_complete = 1;
7127 length = sp->length;
7128 if (sp->msg_is_complete) {
7129 /* The message is complete */
7130 to_move = min(length, frag_point);
7131 if (to_move == length) {
7132 /* All of it fits in the MTU */
7133 if (sp->some_taken) {
7134 rcv_flags |= SCTP_DATA_LAST_FRAG;
7135 sp->put_last_out = 1;
7137 rcv_flags |= SCTP_DATA_NOT_FRAG;
7138 sp->put_last_out = 1;
7141 /* Not all of it fits, we fragment */
7142 if (sp->some_taken == 0) {
7143 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7148 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7151 * We use a snapshot of length in case it
7152 * is expanding during the compare.
7157 if (to_move >= llen) {
7159 if (send_lock_up == 0) {
7161 * We are taking all of an incomplete msg
7162 * thus we need a send lock.
7164 SCTP_TCB_SEND_LOCK(stcb);
7166 if (sp->msg_is_complete) {
7168 * the sender finished the
7175 if (sp->some_taken == 0) {
7176 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7180 /* Nothing to take. */
7181 if (sp->some_taken) {
7190 /* If we reach here, we can copy out a chunk */
7191 sctp_alloc_a_chunk(stcb, chk);
7193 /* No chunk memory */
7199 * Setup for unordered if needed by looking at the user sent info
7202 if (sp->sinfo_flags & SCTP_UNORDERED) {
7203 rcv_flags |= SCTP_DATA_UNORDERED;
7205 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7206 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7207 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7209 /* clear out the chunk before setting up */
7210 memset(chk, 0, sizeof(*chk));
7211 chk->rec.data.rcv_flags = rcv_flags;
7213 if (to_move >= length) {
7214 /* we think we can steal the whole thing */
7215 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7216 SCTP_TCB_SEND_LOCK(stcb);
7219 if (to_move < sp->length) {
7220 /* bail, it changed */
7223 chk->data = sp->data;
7224 chk->last_mbuf = sp->tail_mbuf;
7225 /* register the stealing */
7226 sp->data = sp->tail_mbuf = NULL;
7231 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7232 chk->last_mbuf = NULL;
7233 if (chk->data == NULL) {
7234 sp->some_taken = some_taken;
7235 sctp_free_a_chunk(stcb, chk, so_locked);
7240 #ifdef SCTP_MBUF_LOGGING
7241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7244 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7245 if (SCTP_BUF_IS_EXTENDED(mat)) {
7246 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7251 /* Pull off the data */
7252 m_adj(sp->data, to_move);
7253 /* Now lets work our way down and compact it */
7255 while (m && (SCTP_BUF_LEN(m) == 0)) {
7256 sp->data = SCTP_BUF_NEXT(m);
7257 SCTP_BUF_NEXT(m) = NULL;
7258 if (sp->tail_mbuf == m) {
7260 * Freeing tail? TSNH since
7261 * we supposedly were taking less
7262 * than the sp->length.
7265 panic("Huh, freing tail? - TSNH");
7267 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7268 sp->tail_mbuf = sp->data = NULL;
7277 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7278 chk->copy_by_ref = 1;
7280 chk->copy_by_ref = 0;
7283 * get last_mbuf and counts of mb useage This is ugly but hopefully
7284 * its only one mbuf.
7286 if (chk->last_mbuf == NULL) {
7287 chk->last_mbuf = chk->data;
7288 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7289 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7292 if (to_move > length) {
7293 /*- This should not happen either
7294 * since we always lower to_move to the size
7295 * of sp->length if its larger.
7298 panic("Huh, how can to_move be larger?");
7300 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7304 atomic_subtract_int(&sp->length, to_move);
7306 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7307 /* Not enough room for a chunk header, get some */
7310 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7313 * we're in trouble here. _PREPEND below will free
7314 * all the data if there is no leading space, so we
7315 * must put the data back and restore.
7317 if (send_lock_up == 0) {
7318 SCTP_TCB_SEND_LOCK(stcb);
7321 if (chk->data == NULL) {
7322 /* unsteal the data */
7323 sp->data = chk->data;
7324 sp->tail_mbuf = chk->last_mbuf;
7328 /* reassemble the data */
7330 sp->data = chk->data;
7331 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7333 sp->some_taken = some_taken;
7334 atomic_add_int(&sp->length, to_move);
7337 sctp_free_a_chunk(stcb, chk, so_locked);
7341 SCTP_BUF_LEN(m) = 0;
7342 SCTP_BUF_NEXT(m) = chk->data;
7344 M_ALIGN(chk->data, 4);
7347 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7348 if (chk->data == NULL) {
7349 /* HELP, TSNH since we assured it would not above? */
7351 panic("prepend failes HELP?");
7353 SCTP_PRINTF("prepend fails HELP?\n");
7354 sctp_free_a_chunk(stcb, chk, so_locked);
7360 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7361 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7362 chk->book_size_scale = 0;
7363 chk->sent = SCTP_DATAGRAM_UNSENT;
7366 chk->asoc = &stcb->asoc;
7367 chk->pad_inplace = 0;
7368 chk->no_fr_allowed = 0;
7369 chk->rec.data.stream_seq = strq->next_sequence_send;
7370 if ((rcv_flags & SCTP_DATA_LAST_FRAG) &&
7371 !(rcv_flags & SCTP_DATA_UNORDERED)) {
7372 strq->next_sequence_send++;
7374 chk->rec.data.stream_number = sp->stream;
7375 chk->rec.data.payloadtype = sp->ppid;
7376 chk->rec.data.context = sp->context;
7377 chk->rec.data.doing_fast_retransmit = 0;
7379 chk->rec.data.timetodrop = sp->ts;
7380 chk->flags = sp->act_flags;
7383 chk->whoTo = sp->net;
7384 atomic_add_int(&chk->whoTo->ref_count, 1);
7388 if (sp->holds_key_ref) {
7389 chk->auth_keyid = sp->auth_keyid;
7390 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7391 chk->holds_key_ref = 1;
7393 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7395 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7396 (uintptr_t) stcb, sp->length,
7397 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7398 chk->rec.data.TSN_seq);
7400 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7402 * Put the rest of the things in place now. Size was done earlier in
7403 * previous loop prior to padding.
7406 #ifdef SCTP_ASOCLOG_OF_TSNS
7407 SCTP_TCB_LOCK_ASSERT(stcb);
7408 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7409 asoc->tsn_out_at = 0;
7410 asoc->tsn_out_wrapped = 1;
7412 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7413 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7414 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7415 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7416 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7417 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7418 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7419 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7423 dchkh->ch.chunk_type = SCTP_DATA;
7424 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7425 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7426 dchkh->dp.stream_id = htons(strq->stream_no);
7427 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7428 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7429 dchkh->ch.chunk_length = htons(chk->send_size);
7430 /* Now advance the chk->send_size by the actual pad needed. */
7431 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7436 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7437 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7438 chk->pad_inplace = 1;
7440 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7441 /* pad added an mbuf */
7442 chk->last_mbuf = lm;
7444 chk->send_size += pads;
7446 if (PR_SCTP_ENABLED(chk->flags)) {
7447 asoc->pr_sctp_cnt++;
7449 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7450 /* All done pull and kill the message */
7451 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7452 if (sp->put_last_out == 0) {
7453 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7454 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7455 sp->sender_all_done,
7457 sp->msg_is_complete,
7461 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7462 SCTP_TCB_SEND_LOCK(stcb);
7465 TAILQ_REMOVE(&strq->outqueue, sp, next);
7466 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7468 sctp_free_remote_addr(sp->net);
7472 sctp_m_freem(sp->data);
7475 sctp_free_a_strmoq(stcb, sp, so_locked);
7477 /* we can't be locked to it */
7479 stcb->asoc.locked_on_sending = NULL;
7481 /* more to go, we are locked */
7484 asoc->chunks_on_out_queue++;
7485 strq->chunks_on_queues++;
7486 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7487 asoc->send_queue_cnt++;
7490 SCTP_TCB_SEND_UNLOCK(stcb);
7497 sctp_fill_outqueue(struct sctp_tcb *stcb,
7498 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7499 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7504 struct sctp_association *asoc;
7505 struct sctp_stream_out *strq;
7506 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7509 SCTP_TCB_LOCK_ASSERT(stcb);
7511 switch (net->ro._l_addr.sa.sa_family) {
7514 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7519 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7524 goal_mtu = net->mtu;
7527 /* Need an allowance for the data chunk header too */
7528 goal_mtu -= sizeof(struct sctp_data_chunk);
7530 /* must make even word boundary */
7531 goal_mtu &= 0xfffffffc;
7532 if (asoc->locked_on_sending) {
7533 /* We are stuck on one stream until the message completes. */
7534 strq = asoc->locked_on_sending;
7537 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7540 while ((goal_mtu > 0) && strq) {
7543 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7544 &giveup, eeor_mode, &bail, so_locked);
7546 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7549 asoc->locked_on_sending = strq;
7550 if ((moved_how_much == 0) || (giveup) || bail)
7551 /* no more to move for now */
7554 asoc->locked_on_sending = NULL;
7555 if ((giveup) || bail) {
7558 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7563 total_moved += moved_how_much;
7564 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7565 goal_mtu &= 0xfffffffc;
7570 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7572 if (total_moved == 0) {
7573 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7574 (net == stcb->asoc.primary_destination)) {
7575 /* ran dry for primary network net */
7576 SCTP_STAT_INCR(sctps_primary_randry);
7577 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7578 /* ran dry with CMT on */
7579 SCTP_STAT_INCR(sctps_cmt_randry);
7585 sctp_fix_ecn_echo(struct sctp_association *asoc)
7587 struct sctp_tmit_chunk *chk;
7589 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7590 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7591 chk->sent = SCTP_DATAGRAM_UNSENT;
7597 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7599 struct sctp_association *asoc;
7600 struct sctp_tmit_chunk *chk;
7601 struct sctp_stream_queue_pending *sp;
7608 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7609 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7610 if (sp->net == net) {
7611 sctp_free_remote_addr(sp->net);
7616 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7617 if (chk->whoTo == net) {
7618 sctp_free_remote_addr(chk->whoTo);
7625 sctp_med_chunk_output(struct sctp_inpcb *inp,
7626 struct sctp_tcb *stcb,
7627 struct sctp_association *asoc,
7630 int control_only, int from_where,
7631 struct timeval *now, int *now_filled, int frag_point, int so_locked
7632 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7638 * Ok this is the generic chunk service queue. we must do the
7639 * following: - Service the stream queue that is next, moving any
7640 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7641 * LAST to the out queue in one pass) and assigning TSN's - Check to
7642 * see if the cwnd/rwnd allows any output, if so we go ahead and
7643 * fomulate and send the low level chunks. Making sure to combine
7644 * any control in the control chunk queue also.
7646 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7647 struct mbuf *outchain, *endoutchain;
7648 struct sctp_tmit_chunk *chk, *nchk;
7650 /* temp arrays for unlinking */
7651 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7652 int no_fragmentflg, error;
7653 unsigned int max_rwnd_per_dest, max_send_per_dest;
7654 int one_chunk, hbflag, skip_data_for_this_net;
7655 int asconf, cookie, no_out_cnt;
7656 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7657 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7659 uint32_t auth_offset = 0;
7660 struct sctp_auth_chunk *auth = NULL;
7661 uint16_t auth_keyid;
7662 int override_ok = 1;
7663 int skip_fill_up = 0;
7664 int data_auth_reqd = 0;
7667 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7673 auth_keyid = stcb->asoc.authinfo.active_keyid;
7675 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7676 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7677 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7682 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7684 * First lets prime the pump. For each destination, if there is room
7685 * in the flight size, attempt to pull an MTU's worth out of the
7686 * stream queues into the general send_queue
7688 #ifdef SCTP_AUDITING_ENABLED
7689 sctp_audit_log(0xC2, 2);
7691 SCTP_TCB_LOCK_ASSERT(stcb);
7693 if ((control_only) || (asoc->stream_reset_outstanding))
7698 /* Nothing to possible to send? */
7699 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7700 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7701 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7702 TAILQ_EMPTY(&asoc->send_queue) &&
7703 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7708 if (asoc->peers_rwnd == 0) {
7709 /* No room in peers rwnd */
7711 if (asoc->total_flight > 0) {
7712 /* we are allowed one chunk in flight */
7716 if (stcb->asoc.ecn_echo_cnt_onq) {
7717 /* Record where a sack goes, if any */
7718 if (no_data_chunks &&
7719 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7720 /* Nothing but ECNe to send - we don't do that */
7721 goto nothing_to_send;
7723 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7724 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7725 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7726 sack_goes_to = chk->whoTo;
7731 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7732 if (stcb->sctp_socket)
7733 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7735 max_send_per_dest = 0;
7736 if (no_data_chunks == 0) {
7737 /* How many non-directed chunks are there? */
7738 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7739 if (chk->whoTo == NULL) {
7741 * We already have non-directed chunks on
7742 * the queue, no need to do a fill-up.
7750 if ((no_data_chunks == 0) &&
7751 (skip_fill_up == 0) &&
7752 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7753 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7755 * This for loop we are in takes in each net, if
7756 * its's got space in cwnd and has data sent to it
7757 * (when CMT is off) then it calls
7758 * sctp_fill_outqueue for the net. This gets data on
7759 * the send queue for that network.
7761 * In sctp_fill_outqueue TSN's are assigned and data is
7762 * copied out of the stream buffers. Note mostly
7763 * copy by reference (we hope).
7765 net->window_probe = 0;
7766 if ((net != stcb->asoc.alternate) &&
7767 ((net->dest_state & SCTP_ADDR_PF) ||
7768 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7769 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7771 sctp_log_cwnd(stcb, net, 1,
7772 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7776 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7777 (net->flight_size == 0)) {
7778 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7780 if (net->flight_size >= net->cwnd) {
7781 /* skip this network, no room - can't fill */
7782 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7783 sctp_log_cwnd(stcb, net, 3,
7784 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7788 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7789 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7791 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7793 /* memory alloc failure */
7799 /* now service each destination and send out what we can for it */
7800 /* Nothing to send? */
7801 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7802 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7803 TAILQ_EMPTY(&asoc->send_queue)) {
7807 if (asoc->sctp_cmt_on_off > 0) {
7808 /* get the last start point */
7809 start_at = asoc->last_net_cmt_send_started;
7810 if (start_at == NULL) {
7811 /* null so to beginning */
7812 start_at = TAILQ_FIRST(&asoc->nets);
7814 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7815 if (start_at == NULL) {
7816 start_at = TAILQ_FIRST(&asoc->nets);
7819 asoc->last_net_cmt_send_started = start_at;
7821 start_at = TAILQ_FIRST(&asoc->nets);
7823 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7824 if (chk->whoTo == NULL) {
7825 if (asoc->alternate) {
7826 chk->whoTo = asoc->alternate;
7828 chk->whoTo = asoc->primary_destination;
7830 atomic_add_int(&chk->whoTo->ref_count, 1);
7833 old_start_at = NULL;
7834 again_one_more_time:
7835 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7836 /* how much can we send? */
7837 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7838 if (old_start_at && (old_start_at == net)) {
7839 /* through list ocmpletely. */
7843 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7844 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7845 (net->flight_size >= net->cwnd)) {
7847 * Nothing on control or asconf and flight is full,
7848 * we can skip even in the CMT case.
7853 endoutchain = outchain = NULL;
7856 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7857 skip_data_for_this_net = 1;
7859 skip_data_for_this_net = 0;
7861 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7863 * if we have a route and an ifp check to see if we
7864 * have room to send to this guy
7868 ifp = net->ro.ro_rt->rt_ifp;
7869 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7870 SCTP_STAT_INCR(sctps_ifnomemqueued);
7871 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7872 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7877 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7880 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7885 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7895 if (mtu > asoc->peers_rwnd) {
7896 if (asoc->total_flight > 0) {
7897 /* We have a packet in flight somewhere */
7898 r_mtu = asoc->peers_rwnd;
7900 /* We are always allowed to send one MTU out */
7907 /************************/
7908 /* ASCONF transmission */
7909 /************************/
7910 /* Now first lets go through the asconf queue */
7911 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7912 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7915 if (chk->whoTo == NULL) {
7916 if (asoc->alternate == NULL) {
7917 if (asoc->primary_destination != net) {
7921 if (asoc->alternate != net) {
7926 if (chk->whoTo != net) {
7930 if (chk->data == NULL) {
7933 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7934 chk->sent != SCTP_DATAGRAM_RESEND) {
7938 * if no AUTH is yet included and this chunk
7939 * requires it, make sure to account for it. We
7940 * don't apply the size until the AUTH chunk is
7941 * actually added below in case there is no room for
7942 * this chunk. NOTE: we overload the use of "omtu"
7945 if ((auth == NULL) &&
7946 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7947 stcb->asoc.peer_auth_chunks)) {
7948 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7951 /* Here we do NOT factor the r_mtu */
7952 if ((chk->send_size < (int)(mtu - omtu)) ||
7953 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7955 * We probably should glom the mbuf chain
7956 * from the chk->data for control but the
7957 * problem is it becomes yet one more level
7958 * of tracking to do if for some reason
7959 * output fails. Then I have got to
7960 * reconstruct the merged control chain.. el
7961 * yucko.. for now we take the easy way and
7965 * Add an AUTH chunk, if chunk requires it
7966 * save the offset into the chain for AUTH
7968 if ((auth == NULL) &&
7969 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7970 stcb->asoc.peer_auth_chunks))) {
7971 outchain = sctp_add_auth_chunk(outchain,
7976 chk->rec.chunk_id.id);
7977 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7979 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7980 (int)chk->rec.chunk_id.can_take_data,
7981 chk->send_size, chk->copy_by_ref);
7982 if (outchain == NULL) {
7984 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7987 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7988 /* update our MTU size */
7989 if (mtu > (chk->send_size + omtu))
7990 mtu -= (chk->send_size + omtu);
7993 to_out += (chk->send_size + omtu);
7994 /* Do clear IP_DF ? */
7995 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7998 if (chk->rec.chunk_id.can_take_data)
8001 * set hb flag since we can use these for
8007 * should sysctl this: don't bundle data
8008 * with ASCONF since it requires AUTH
8011 chk->sent = SCTP_DATAGRAM_SENT;
8012 if (chk->whoTo == NULL) {
8014 atomic_add_int(&net->ref_count, 1);
8019 * Ok we are out of room but we can
8020 * output without effecting the
8021 * flight size since this little guy
8022 * is a control only packet.
8024 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8026 * do NOT clear the asconf flag as
8027 * it is used to do appropriate
8028 * source address selection.
8030 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8031 (struct sockaddr *)&net->ro._l_addr,
8032 outchain, auth_offset, auth,
8033 stcb->asoc.authinfo.active_keyid,
8034 no_fragmentflg, 0, asconf,
8035 inp->sctp_lport, stcb->rport,
8036 htonl(stcb->asoc.peer_vtag),
8040 if (error == ENOBUFS) {
8041 asoc->ifp_had_enobuf = 1;
8042 SCTP_STAT_INCR(sctps_lowlevelerr);
8044 if (from_where == 0) {
8045 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8047 if (*now_filled == 0) {
8048 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8050 *now = net->last_sent_time;
8052 net->last_sent_time = *now;
8055 /* error, could not output */
8056 if (error == EHOSTUNREACH) {
8062 sctp_move_chunks_from_net(stcb, net);
8067 asoc->ifp_had_enobuf = 0;
8068 if (*now_filled == 0) {
8069 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8071 *now = net->last_sent_time;
8073 net->last_sent_time = *now;
8077 * increase the number we sent, if a
8078 * cookie is sent we don't tell them
8081 outchain = endoutchain = NULL;
8085 *num_out += ctl_cnt;
8086 /* recalc a clean slate and setup */
8087 switch (net->ro._l_addr.sa.sa_family) {
8090 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8095 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8108 /************************/
8109 /* Control transmission */
8110 /************************/
8111 /* Now first lets go through the control queue */
8112 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8113 if ((sack_goes_to) &&
8114 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8115 (chk->whoTo != sack_goes_to)) {
8117 * if we have a sack in queue, and we are
8118 * looking at an ecn echo that is NOT queued
8119 * to where the sack is going..
8121 if (chk->whoTo == net) {
8123 * Don't transmit it to where its
8124 * going (current net)
8127 } else if (sack_goes_to == net) {
8129 * But do transmit it to this
8132 goto skip_net_check;
8135 if (chk->whoTo == NULL) {
8136 if (asoc->alternate == NULL) {
8137 if (asoc->primary_destination != net) {
8141 if (asoc->alternate != net) {
8146 if (chk->whoTo != net) {
8151 if (chk->data == NULL) {
8154 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8156 * It must be unsent. Cookies and ASCONF's
8157 * hang around but there timers will force
8158 * when marked for resend.
8163 * if no AUTH is yet included and this chunk
8164 * requires it, make sure to account for it. We
8165 * don't apply the size until the AUTH chunk is
8166 * actually added below in case there is no room for
8167 * this chunk. NOTE: we overload the use of "omtu"
8170 if ((auth == NULL) &&
8171 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8172 stcb->asoc.peer_auth_chunks)) {
8173 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8176 /* Here we do NOT factor the r_mtu */
8177 if ((chk->send_size <= (int)(mtu - omtu)) ||
8178 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8180 * We probably should glom the mbuf chain
8181 * from the chk->data for control but the
8182 * problem is it becomes yet one more level
8183 * of tracking to do if for some reason
8184 * output fails. Then I have got to
8185 * reconstruct the merged control chain.. el
8186 * yucko.. for now we take the easy way and
8190 * Add an AUTH chunk, if chunk requires it
8191 * save the offset into the chain for AUTH
8193 if ((auth == NULL) &&
8194 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8195 stcb->asoc.peer_auth_chunks))) {
8196 outchain = sctp_add_auth_chunk(outchain,
8201 chk->rec.chunk_id.id);
8202 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8204 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8205 (int)chk->rec.chunk_id.can_take_data,
8206 chk->send_size, chk->copy_by_ref);
8207 if (outchain == NULL) {
8209 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8212 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8213 /* update our MTU size */
8214 if (mtu > (chk->send_size + omtu))
8215 mtu -= (chk->send_size + omtu);
8218 to_out += (chk->send_size + omtu);
8219 /* Do clear IP_DF ? */
8220 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8223 if (chk->rec.chunk_id.can_take_data)
8225 /* Mark things to be removed, if needed */
8226 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8227 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8228 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8229 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8230 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8231 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8232 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8233 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8234 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8235 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8236 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8237 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8240 /* remove these chunks at the end */
8241 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8242 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8243 /* turn off the timer */
8244 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8245 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8246 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8252 * Other chunks, since they have
8253 * timers running (i.e. COOKIE) we
8254 * just "trust" that it gets sent or
8258 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8261 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8263 * Increment ecne send count
8264 * here this means we may be
8265 * over-zealous in our
8266 * counting if the send
8267 * fails, but its the best
8268 * place to do it (we used
8269 * to do it in the queue of
8270 * the chunk, but that did
8271 * not tell how many times
8274 SCTP_STAT_INCR(sctps_sendecne);
8276 chk->sent = SCTP_DATAGRAM_SENT;
8277 if (chk->whoTo == NULL) {
8279 atomic_add_int(&net->ref_count, 1);
8285 * Ok we are out of room but we can
8286 * output without effecting the
8287 * flight size since this little guy
8288 * is a control only packet.
8291 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8293 * do NOT clear the asconf
8294 * flag as it is used to do
8295 * appropriate source
8296 * address selection.
8300 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8303 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8304 (struct sockaddr *)&net->ro._l_addr,
8307 stcb->asoc.authinfo.active_keyid,
8308 no_fragmentflg, 0, asconf,
8309 inp->sctp_lport, stcb->rport,
8310 htonl(stcb->asoc.peer_vtag),
8314 if (error == ENOBUFS) {
8315 asoc->ifp_had_enobuf = 1;
8316 SCTP_STAT_INCR(sctps_lowlevelerr);
8318 if (from_where == 0) {
8319 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8321 /* error, could not output */
8323 if (*now_filled == 0) {
8324 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8326 *now = net->last_sent_time;
8328 net->last_sent_time = *now;
8332 if (error == EHOSTUNREACH) {
8338 sctp_move_chunks_from_net(stcb, net);
8343 asoc->ifp_had_enobuf = 0;
8344 /* Only HB or ASCONF advances time */
8346 if (*now_filled == 0) {
8347 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8349 *now = net->last_sent_time;
8351 net->last_sent_time = *now;
8356 * increase the number we sent, if a
8357 * cookie is sent we don't tell them
8360 outchain = endoutchain = NULL;
8364 *num_out += ctl_cnt;
8365 /* recalc a clean slate and setup */
8366 switch (net->ro._l_addr.sa.sa_family) {
8369 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8374 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8387 /* JRI: if dest is in PF state, do not send data to it */
8388 if ((asoc->sctp_cmt_on_off > 0) &&
8389 (net != stcb->asoc.alternate) &&
8390 (net->dest_state & SCTP_ADDR_PF)) {
8393 if (net->flight_size >= net->cwnd) {
8396 if ((asoc->sctp_cmt_on_off > 0) &&
8397 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8398 (net->flight_size > max_rwnd_per_dest)) {
8402 * We need a specific accounting for the usage of the send
8403 * buffer. We also need to check the number of messages per
8404 * net. For now, this is better than nothing and it disabled
8407 if ((asoc->sctp_cmt_on_off > 0) &&
8408 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8409 (max_send_per_dest > 0) &&
8410 (net->flight_size > max_send_per_dest)) {
8413 /*********************/
8414 /* Data transmission */
8415 /*********************/
8417 * if AUTH for DATA is required and no AUTH has been added
8418 * yet, account for this in the mtu now... if no data can be
8419 * bundled, this adjustment won't matter anyways since the
8420 * packet will be going out...
8422 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8423 stcb->asoc.peer_auth_chunks);
8424 if (data_auth_reqd && (auth == NULL)) {
8425 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8427 /* now lets add any data within the MTU constraints */
8428 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8431 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8432 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8439 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8440 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8450 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8451 (skip_data_for_this_net == 0)) ||
8453 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8454 if (no_data_chunks) {
8455 /* let only control go out */
8459 if (net->flight_size >= net->cwnd) {
8460 /* skip this net, no room for data */
8464 if ((chk->whoTo != NULL) &&
8465 (chk->whoTo != net)) {
8466 /* Don't send the chunk on this net */
8469 if (asoc->sctp_cmt_on_off == 0) {
8470 if ((asoc->alternate) &&
8471 (asoc->alternate != net) &&
8472 (chk->whoTo == NULL)) {
8474 } else if ((net != asoc->primary_destination) &&
8475 (asoc->alternate == NULL) &&
8476 (chk->whoTo == NULL)) {
8480 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8482 * strange, we have a chunk that is
8483 * to big for its destination and
8484 * yet no fragment ok flag.
8485 * Something went wrong when the
8486 * PMTU changed...we did not mark
8487 * this chunk for some reason?? I
8488 * will fix it here by letting IP
8489 * fragment it for now and printing
8490 * a warning. This really should not
8493 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8494 chk->send_size, mtu);
8495 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8497 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8498 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8499 struct sctp_data_chunk *dchkh;
8501 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8502 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8504 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8505 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8506 /* ok we will add this one */
8509 * Add an AUTH chunk, if chunk
8510 * requires it, save the offset into
8511 * the chain for AUTH
8513 if (data_auth_reqd) {
8515 outchain = sctp_add_auth_chunk(outchain,
8521 auth_keyid = chk->auth_keyid;
8523 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8524 } else if (override_ok) {
8529 auth_keyid = chk->auth_keyid;
8531 } else if (auth_keyid != chk->auth_keyid) {
8539 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8540 chk->send_size, chk->copy_by_ref);
8541 if (outchain == NULL) {
8542 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8543 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8544 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8547 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8550 /* upate our MTU size */
8551 /* Do clear IP_DF ? */
8552 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8555 /* unsigned subtraction of mtu */
8556 if (mtu > chk->send_size)
8557 mtu -= chk->send_size;
8560 /* unsigned subtraction of r_mtu */
8561 if (r_mtu > chk->send_size)
8562 r_mtu -= chk->send_size;
8566 to_out += chk->send_size;
8567 if ((to_out > mx_mtu) && no_fragmentflg) {
8569 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8571 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8575 chk->window_probe = 0;
8576 data_list[bundle_at++] = chk;
8577 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8580 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8581 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8582 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8584 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8586 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8587 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8597 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8599 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8600 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8601 data_list[0]->window_probe = 1;
8602 net->window_probe = 1;
8608 * Must be sent in order of the
8609 * TSN's (on a network)
8613 } /* for (chunk gather loop for this net) */
8614 } /* if asoc.state OPEN */
8616 /* Is there something to send for this destination? */
8618 /* We may need to start a control timer or two */
8620 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8623 * do NOT clear the asconf flag as it is
8624 * used to do appropriate source address
8629 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8632 /* must start a send timer if data is being sent */
8633 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8635 * no timer running on this destination
8638 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8640 /* Now send it, if there is anything to send :> */
8641 if ((error = sctp_lowlevel_chunk_output(inp,
8644 (struct sockaddr *)&net->ro._l_addr,
8652 inp->sctp_lport, stcb->rport,
8653 htonl(stcb->asoc.peer_vtag),
8657 /* error, we could not output */
8658 if (error == ENOBUFS) {
8659 SCTP_STAT_INCR(sctps_lowlevelerr);
8660 asoc->ifp_had_enobuf = 1;
8662 if (from_where == 0) {
8663 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8665 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8667 if (*now_filled == 0) {
8668 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8670 *now = net->last_sent_time;
8672 net->last_sent_time = *now;
8676 if (error == EHOSTUNREACH) {
8678 * Destination went unreachable
8681 sctp_move_chunks_from_net(stcb, net);
8685 * I add this line to be paranoid. As far as
8686 * I can tell the continue, takes us back to
8687 * the top of the for, but just to make sure
8688 * I will reset these again here.
8690 ctl_cnt = bundle_at = 0;
8691 continue; /* This takes us back to the
8692 * for() for the nets. */
8694 asoc->ifp_had_enobuf = 0;
8699 if (bundle_at || hbflag) {
8700 /* For data/asconf and hb set time */
8701 if (*now_filled == 0) {
8702 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8704 *now = net->last_sent_time;
8706 net->last_sent_time = *now;
8710 *num_out += (ctl_cnt + bundle_at);
8713 /* setup for a RTO measurement */
8714 tsns_sent = data_list[0]->rec.data.TSN_seq;
8715 /* fill time if not already filled */
8716 if (*now_filled == 0) {
8717 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8719 *now = asoc->time_last_sent;
8721 asoc->time_last_sent = *now;
8723 if (net->rto_needed) {
8724 data_list[0]->do_rtt = 1;
8725 net->rto_needed = 0;
8727 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8728 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8735 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8738 if (old_start_at == NULL) {
8739 old_start_at = start_at;
8740 start_at = TAILQ_FIRST(&asoc->nets);
8742 goto again_one_more_time;
8745 * At the end there should be no NON timed chunks hanging on this
8748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8749 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8751 if ((*num_out == 0) && (*reason_code == 0)) {
8756 sctp_clean_up_ctl(stcb, asoc, so_locked);
8761 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8764 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8765 * the control chunk queue.
8767 struct sctp_chunkhdr *hdr;
8768 struct sctp_tmit_chunk *chk;
8771 SCTP_TCB_LOCK_ASSERT(stcb);
8772 sctp_alloc_a_chunk(stcb, chk);
8775 sctp_m_freem(op_err);
8778 chk->copy_by_ref = 0;
8779 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8780 if (op_err == NULL) {
8781 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8786 while (mat != NULL) {
8787 chk->send_size += SCTP_BUF_LEN(mat);
8788 mat = SCTP_BUF_NEXT(mat);
8790 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8791 chk->rec.chunk_id.can_take_data = 1;
8792 chk->sent = SCTP_DATAGRAM_UNSENT;
8795 chk->asoc = &stcb->asoc;
8798 hdr = mtod(op_err, struct sctp_chunkhdr *);
8799 hdr->chunk_type = SCTP_OPERATION_ERROR;
8800 hdr->chunk_flags = 0;
8801 hdr->chunk_length = htons(chk->send_size);
8802 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8805 chk->asoc->ctrl_queue_cnt++;
8809 sctp_send_cookie_echo(struct mbuf *m,
8811 struct sctp_tcb *stcb,
8812 struct sctp_nets *net)
8815 * pull out the cookie and put it at the front of the control chunk
8819 struct mbuf *cookie;
8820 struct sctp_paramhdr parm, *phdr;
8821 struct sctp_chunkhdr *hdr;
8822 struct sctp_tmit_chunk *chk;
8823 uint16_t ptype, plen;
8825 /* First find the cookie in the param area */
8827 at = offset + sizeof(struct sctp_init_chunk);
8829 SCTP_TCB_LOCK_ASSERT(stcb);
8831 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8835 ptype = ntohs(phdr->param_type);
8836 plen = ntohs(phdr->param_length);
8837 if (ptype == SCTP_STATE_COOKIE) {
8840 /* found the cookie */
8841 if ((pad = (plen % 4))) {
8844 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
8845 if (cookie == NULL) {
8849 #ifdef SCTP_MBUF_LOGGING
8850 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8853 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
8854 if (SCTP_BUF_IS_EXTENDED(mat)) {
8855 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8862 at += SCTP_SIZE32(plen);
8864 if (cookie == NULL) {
8865 /* Did not find the cookie */
8868 /* ok, we got the cookie lets change it into a cookie echo chunk */
8870 /* first the change from param to cookie */
8871 hdr = mtod(cookie, struct sctp_chunkhdr *);
8872 hdr->chunk_type = SCTP_COOKIE_ECHO;
8873 hdr->chunk_flags = 0;
8874 /* get the chunk stuff now and place it in the FRONT of the queue */
8875 sctp_alloc_a_chunk(stcb, chk);
8878 sctp_m_freem(cookie);
8881 chk->copy_by_ref = 0;
8882 chk->send_size = plen;
8883 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8884 chk->rec.chunk_id.can_take_data = 0;
8885 chk->sent = SCTP_DATAGRAM_UNSENT;
8887 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8888 chk->asoc = &stcb->asoc;
8891 atomic_add_int(&chk->whoTo->ref_count, 1);
8892 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8893 chk->asoc->ctrl_queue_cnt++;
8898 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8902 struct sctp_nets *net)
8905 * take a HB request and make it into a HB ack and send it.
8907 struct mbuf *outchain;
8908 struct sctp_chunkhdr *chdr;
8909 struct sctp_tmit_chunk *chk;
8913 /* must have a net pointer */
8916 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
8917 if (outchain == NULL) {
8918 /* gak out of memory */
8921 #ifdef SCTP_MBUF_LOGGING
8922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8925 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
8926 if (SCTP_BUF_IS_EXTENDED(mat)) {
8927 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8932 chdr = mtod(outchain, struct sctp_chunkhdr *);
8933 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8934 chdr->chunk_flags = 0;
8935 if (chk_length % 4) {
8937 uint32_t cpthis = 0;
8940 padlen = 4 - (chk_length % 4);
8941 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8943 sctp_alloc_a_chunk(stcb, chk);
8946 sctp_m_freem(outchain);
8949 chk->copy_by_ref = 0;
8950 chk->send_size = chk_length;
8951 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8952 chk->rec.chunk_id.can_take_data = 1;
8953 chk->sent = SCTP_DATAGRAM_UNSENT;
8956 chk->asoc = &stcb->asoc;
8957 chk->data = outchain;
8959 atomic_add_int(&chk->whoTo->ref_count, 1);
8960 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8961 chk->asoc->ctrl_queue_cnt++;
8965 sctp_send_cookie_ack(struct sctp_tcb *stcb)
8967 /* formulate and queue a cookie-ack back to sender */
8968 struct mbuf *cookie_ack;
8969 struct sctp_chunkhdr *hdr;
8970 struct sctp_tmit_chunk *chk;
8972 SCTP_TCB_LOCK_ASSERT(stcb);
8974 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
8975 if (cookie_ack == NULL) {
8979 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
8980 sctp_alloc_a_chunk(stcb, chk);
8983 sctp_m_freem(cookie_ack);
8986 chk->copy_by_ref = 0;
8987 chk->send_size = sizeof(struct sctp_chunkhdr);
8988 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
8989 chk->rec.chunk_id.can_take_data = 1;
8990 chk->sent = SCTP_DATAGRAM_UNSENT;
8993 chk->asoc = &stcb->asoc;
8994 chk->data = cookie_ack;
8995 if (chk->asoc->last_control_chunk_from != NULL) {
8996 chk->whoTo = chk->asoc->last_control_chunk_from;
8997 atomic_add_int(&chk->whoTo->ref_count, 1);
9001 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9002 hdr->chunk_type = SCTP_COOKIE_ACK;
9003 hdr->chunk_flags = 0;
9004 hdr->chunk_length = htons(chk->send_size);
9005 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9006 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9007 chk->asoc->ctrl_queue_cnt++;
9013 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9015 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9016 struct mbuf *m_shutdown_ack;
9017 struct sctp_shutdown_ack_chunk *ack_cp;
9018 struct sctp_tmit_chunk *chk;
9020 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9021 if (m_shutdown_ack == NULL) {
9025 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9026 sctp_alloc_a_chunk(stcb, chk);
9029 sctp_m_freem(m_shutdown_ack);
9032 chk->copy_by_ref = 0;
9033 chk->send_size = sizeof(struct sctp_chunkhdr);
9034 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9035 chk->rec.chunk_id.can_take_data = 1;
9036 chk->sent = SCTP_DATAGRAM_UNSENT;
9039 chk->asoc = &stcb->asoc;
9040 chk->data = m_shutdown_ack;
9043 atomic_add_int(&chk->whoTo->ref_count, 1);
9045 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9046 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9047 ack_cp->ch.chunk_flags = 0;
9048 ack_cp->ch.chunk_length = htons(chk->send_size);
9049 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9050 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9051 chk->asoc->ctrl_queue_cnt++;
9056 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9058 /* formulate and queue a SHUTDOWN to the sender */
9059 struct mbuf *m_shutdown;
9060 struct sctp_shutdown_chunk *shutdown_cp;
9061 struct sctp_tmit_chunk *chk;
9063 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9064 if (m_shutdown == NULL) {
9068 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9069 sctp_alloc_a_chunk(stcb, chk);
9072 sctp_m_freem(m_shutdown);
9075 chk->copy_by_ref = 0;
9076 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9077 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9078 chk->rec.chunk_id.can_take_data = 1;
9079 chk->sent = SCTP_DATAGRAM_UNSENT;
9082 chk->asoc = &stcb->asoc;
9083 chk->data = m_shutdown;
9086 atomic_add_int(&chk->whoTo->ref_count, 1);
9088 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9089 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9090 shutdown_cp->ch.chunk_flags = 0;
9091 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9092 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9093 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9094 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9095 chk->asoc->ctrl_queue_cnt++;
9100 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9103 * formulate and queue an ASCONF to the peer. ASCONF parameters
9104 * should be queued on the assoc queue.
9106 struct sctp_tmit_chunk *chk;
9107 struct mbuf *m_asconf;
9110 SCTP_TCB_LOCK_ASSERT(stcb);
9112 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9113 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9114 /* can't send a new one if there is one in flight already */
9117 /* compose an ASCONF chunk, maximum length is PMTU */
9118 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9119 if (m_asconf == NULL) {
9122 sctp_alloc_a_chunk(stcb, chk);
9125 sctp_m_freem(m_asconf);
9128 chk->copy_by_ref = 0;
9129 chk->data = m_asconf;
9130 chk->send_size = len;
9131 chk->rec.chunk_id.id = SCTP_ASCONF;
9132 chk->rec.chunk_id.can_take_data = 0;
9133 chk->sent = SCTP_DATAGRAM_UNSENT;
9135 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9136 chk->asoc = &stcb->asoc;
9139 atomic_add_int(&chk->whoTo->ref_count, 1);
9141 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9142 chk->asoc->ctrl_queue_cnt++;
9147 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9150 * formulate and queue a asconf-ack back to sender. the asconf-ack
9151 * must be stored in the tcb.
9153 struct sctp_tmit_chunk *chk;
9154 struct sctp_asconf_ack *ack, *latest_ack;
9156 struct sctp_nets *net = NULL;
9158 SCTP_TCB_LOCK_ASSERT(stcb);
9159 /* Get the latest ASCONF-ACK */
9160 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9161 if (latest_ack == NULL) {
9164 if (latest_ack->last_sent_to != NULL &&
9165 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9166 /* we're doing a retransmission */
9167 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9170 if (stcb->asoc.last_control_chunk_from == NULL) {
9171 if (stcb->asoc.alternate) {
9172 net = stcb->asoc.alternate;
9174 net = stcb->asoc.primary_destination;
9177 net = stcb->asoc.last_control_chunk_from;
9182 if (stcb->asoc.last_control_chunk_from == NULL) {
9183 if (stcb->asoc.alternate) {
9184 net = stcb->asoc.alternate;
9186 net = stcb->asoc.primary_destination;
9189 net = stcb->asoc.last_control_chunk_from;
9192 latest_ack->last_sent_to = net;
9194 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9195 if (ack->data == NULL) {
9198 /* copy the asconf_ack */
9199 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9200 if (m_ack == NULL) {
9201 /* couldn't copy it */
9204 #ifdef SCTP_MBUF_LOGGING
9205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9208 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9209 if (SCTP_BUF_IS_EXTENDED(mat)) {
9210 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9216 sctp_alloc_a_chunk(stcb, chk);
9220 sctp_m_freem(m_ack);
9223 chk->copy_by_ref = 0;
9227 atomic_add_int(&chk->whoTo->ref_count, 1);
9232 chk->send_size = ack->len;
9233 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9234 chk->rec.chunk_id.can_take_data = 1;
9235 chk->sent = SCTP_DATAGRAM_UNSENT;
9237 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9238 chk->asoc = &stcb->asoc;
9240 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9241 chk->asoc->ctrl_queue_cnt++;
9248 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9249 struct sctp_tcb *stcb,
9250 struct sctp_association *asoc,
9251 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9252 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9258 * send out one MTU of retransmission. If fast_retransmit is
9259 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9260 * rwnd. For a Cookie or Asconf in the control chunk queue we
9261 * retransmit them by themselves.
9263 * For data chunks we will pick out the lowest TSN's in the sent_queue
9264 * marked for resend and bundle them all together (up to a MTU of
9265 * destination). The address to send to should have been
9266 * selected/changed where the retransmission was marked (i.e. in FR
9267 * or t3-timeout routines).
9269 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9270 struct sctp_tmit_chunk *chk, *fwd;
9271 struct mbuf *m, *endofchain;
9272 struct sctp_nets *net = NULL;
9273 uint32_t tsns_sent = 0;
9274 int no_fragmentflg, bundle_at, cnt_thru;
9276 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9277 struct sctp_auth_chunk *auth = NULL;
9278 uint32_t auth_offset = 0;
9279 uint16_t auth_keyid;
9280 int override_ok = 1;
9281 int data_auth_reqd = 0;
9284 SCTP_TCB_LOCK_ASSERT(stcb);
9285 tmr_started = ctl_cnt = bundle_at = error = 0;
9290 endofchain = m = NULL;
9291 auth_keyid = stcb->asoc.authinfo.active_keyid;
9292 #ifdef SCTP_AUDITING_ENABLED
9293 sctp_audit_log(0xC3, 1);
9295 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9296 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9297 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9298 asoc->sent_queue_retran_cnt);
9299 asoc->sent_queue_cnt = 0;
9300 asoc->sent_queue_cnt_removeable = 0;
9301 /* send back 0/0 so we enter normal transmission */
9305 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9306 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9307 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9308 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9309 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9312 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9313 if (chk != asoc->str_reset) {
9315 * not eligible for retran if its
9322 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9326 * Add an AUTH chunk, if chunk requires it save the
9327 * offset into the chain for AUTH
9329 if ((auth == NULL) &&
9330 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9331 stcb->asoc.peer_auth_chunks))) {
9332 m = sctp_add_auth_chunk(m, &endofchain,
9333 &auth, &auth_offset,
9335 chk->rec.chunk_id.id);
9336 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9338 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9344 /* do we have control chunks to retransmit? */
9346 /* Start a timer no matter if we suceed or fail */
9347 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9348 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9349 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9350 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9351 chk->snd_count++; /* update our count */
9352 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9353 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9354 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9355 no_fragmentflg, 0, 0,
9356 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9357 chk->whoTo->port, NULL,
9360 SCTP_STAT_INCR(sctps_lowlevelerr);
9367 * We don't want to mark the net->sent time here since this
9368 * we use this for HB and retrans cannot measure RTT
9370 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9372 chk->sent = SCTP_DATAGRAM_SENT;
9373 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9377 /* Clean up the fwd-tsn list */
9378 sctp_clean_up_ctl(stcb, asoc, so_locked);
9383 * Ok, it is just data retransmission we need to do or that and a
9384 * fwd-tsn with it all.
9386 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9387 return (SCTP_RETRAN_DONE);
9389 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9390 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9391 /* not yet open, resend the cookie and that is it */
9394 #ifdef SCTP_AUDITING_ENABLED
9395 sctp_auditing(20, inp, stcb, NULL);
9397 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9398 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9399 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9400 /* No, not sent to this net or not ready for rtx */
9403 if (chk->data == NULL) {
9404 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9405 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9408 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9409 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9410 /* Gak, we have exceeded max unlucky retran, abort! */
9411 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9413 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9414 atomic_add_int(&stcb->asoc.refcnt, 1);
9415 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9416 SCTP_TCB_LOCK(stcb);
9417 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9418 return (SCTP_RETRAN_EXIT);
9420 /* pick up the net */
9422 switch (net->ro._l_addr.sa.sa_family) {
9425 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9430 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9439 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9440 /* No room in peers rwnd */
9443 tsn = asoc->last_acked_seq + 1;
9444 if (tsn == chk->rec.data.TSN_seq) {
9446 * we make a special exception for this
9447 * case. The peer has no rwnd but is missing
9448 * the lowest chunk.. which is probably what
9449 * is holding up the rwnd.
9451 goto one_chunk_around;
9456 if (asoc->peers_rwnd < mtu) {
9458 if ((asoc->peers_rwnd == 0) &&
9459 (asoc->total_flight == 0)) {
9460 chk->window_probe = 1;
9461 chk->whoTo->window_probe = 1;
9464 #ifdef SCTP_AUDITING_ENABLED
9465 sctp_audit_log(0xC3, 2);
9469 net->fast_retran_ip = 0;
9470 if (chk->rec.data.doing_fast_retransmit == 0) {
9472 * if no FR in progress skip destination that have
9473 * flight_size > cwnd.
9475 if (net->flight_size >= net->cwnd) {
9480 * Mark the destination net to have FR recovery
9484 net->fast_retran_ip = 1;
9488 * if no AUTH is yet included and this chunk requires it,
9489 * make sure to account for it. We don't apply the size
9490 * until the AUTH chunk is actually added below in case
9491 * there is no room for this chunk.
9493 if (data_auth_reqd && (auth == NULL)) {
9494 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9498 if ((chk->send_size <= (mtu - dmtu)) ||
9499 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9500 /* ok we will add this one */
9501 if (data_auth_reqd) {
9503 m = sctp_add_auth_chunk(m,
9509 auth_keyid = chk->auth_keyid;
9511 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9512 } else if (override_ok) {
9513 auth_keyid = chk->auth_keyid;
9515 } else if (chk->auth_keyid != auth_keyid) {
9516 /* different keyid, so done bundling */
9520 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9522 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9525 /* Do clear IP_DF ? */
9526 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9529 /* upate our MTU size */
9530 if (mtu > (chk->send_size + dmtu))
9531 mtu -= (chk->send_size + dmtu);
9534 data_list[bundle_at++] = chk;
9535 if (one_chunk && (asoc->total_flight <= 0)) {
9536 SCTP_STAT_INCR(sctps_windowprobed);
9539 if (one_chunk == 0) {
9541 * now are there anymore forward from chk to pick
9544 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9545 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9546 /* Nope, not for retran */
9549 if (fwd->whoTo != net) {
9550 /* Nope, not the net in question */
9553 if (data_auth_reqd && (auth == NULL)) {
9554 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9557 if (fwd->send_size <= (mtu - dmtu)) {
9558 if (data_auth_reqd) {
9560 m = sctp_add_auth_chunk(m,
9566 auth_keyid = fwd->auth_keyid;
9568 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9569 } else if (override_ok) {
9570 auth_keyid = fwd->auth_keyid;
9572 } else if (fwd->auth_keyid != auth_keyid) {
9580 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9582 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9585 /* Do clear IP_DF ? */
9586 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9589 /* upate our MTU size */
9590 if (mtu > (fwd->send_size + dmtu))
9591 mtu -= (fwd->send_size + dmtu);
9594 data_list[bundle_at++] = fwd;
9595 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9599 /* can't fit so we are done */
9604 /* Is there something to send for this destination? */
9607 * No matter if we fail/or suceed we should start a
9608 * timer. A failure is like a lost IP packet :-)
9610 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9612 * no timer running on this destination
9615 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9618 /* Now lets send it, if there is anything to send :> */
9619 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9620 (struct sockaddr *)&net->ro._l_addr, m,
9621 auth_offset, auth, auth_keyid,
9622 no_fragmentflg, 0, 0,
9623 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9627 /* error, we could not output */
9628 SCTP_STAT_INCR(sctps_lowlevelerr);
9636 * We don't want to mark the net->sent time here
9637 * since this we use this for HB and retrans cannot
9640 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9642 /* For auto-close */
9644 if (*now_filled == 0) {
9645 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9646 *now = asoc->time_last_sent;
9649 asoc->time_last_sent = *now;
9651 *cnt_out += bundle_at;
9652 #ifdef SCTP_AUDITING_ENABLED
9653 sctp_audit_log(0xC4, bundle_at);
9656 tsns_sent = data_list[0]->rec.data.TSN_seq;
9658 for (i = 0; i < bundle_at; i++) {
9659 SCTP_STAT_INCR(sctps_sendretransdata);
9660 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9662 * When we have a revoked data, and we
9663 * retransmit it, then we clear the revoked
9664 * flag since this flag dictates if we
9665 * subtracted from the fs
9667 if (data_list[i]->rec.data.chunk_was_revoked) {
9668 /* Deflate the cwnd */
9669 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9670 data_list[i]->rec.data.chunk_was_revoked = 0;
9672 data_list[i]->snd_count++;
9673 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9674 /* record the time */
9675 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9676 if (data_list[i]->book_size_scale) {
9678 * need to double the book size on
9681 data_list[i]->book_size_scale = 0;
9683 * Since we double the booksize, we
9684 * must also double the output queue
9685 * size, since this get shrunk when
9686 * we free by this amount.
9688 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9689 data_list[i]->book_size *= 2;
9693 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9694 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9695 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9697 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9698 (uint32_t) (data_list[i]->send_size +
9699 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9702 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9703 data_list[i]->whoTo->flight_size,
9704 data_list[i]->book_size,
9705 (uintptr_t) data_list[i]->whoTo,
9706 data_list[i]->rec.data.TSN_seq);
9708 sctp_flight_size_increase(data_list[i]);
9709 sctp_total_flight_increase(stcb, data_list[i]);
9710 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9711 /* SWS sender side engages */
9712 asoc->peers_rwnd = 0;
9715 (data_list[i]->rec.data.doing_fast_retransmit)) {
9716 SCTP_STAT_INCR(sctps_sendfastretrans);
9717 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9718 (tmr_started == 0)) {
9720 * ok we just fast-retrans'd
9721 * the lowest TSN, i.e the
9722 * first on the list. In
9723 * this case we want to give
9724 * some more time to get a
9725 * SACK back without a
9728 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9729 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9730 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9735 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9737 #ifdef SCTP_AUDITING_ENABLED
9738 sctp_auditing(21, inp, stcb, NULL);
9744 if (asoc->sent_queue_retran_cnt <= 0) {
9745 /* all done we have no more to retran */
9746 asoc->sent_queue_retran_cnt = 0;
9750 /* No more room in rwnd */
9753 /* stop the for loop here. we sent out a packet */
9760 sctp_timer_validation(struct sctp_inpcb *inp,
9761 struct sctp_tcb *stcb,
9762 struct sctp_association *asoc)
9764 struct sctp_nets *net;
9766 /* Validate that a timer is running somewhere */
9767 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9768 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9769 /* Here is a timer */
9773 SCTP_TCB_LOCK_ASSERT(stcb);
9774 /* Gak, we did not have a timer somewhere */
9775 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9776 if (asoc->alternate) {
9777 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9779 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9785 sctp_chunk_output(struct sctp_inpcb *inp,
9786 struct sctp_tcb *stcb,
9789 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9795 * Ok this is the generic chunk service queue. we must do the
9797 * - See if there are retransmits pending, if so we must
9799 * - Service the stream queue that is next, moving any
9800 * message (note I must get a complete message i.e.
9801 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9803 * - Check to see if the cwnd/rwnd allows any output, if so we
9804 * go ahead and fomulate and send the low level chunks. Making sure
9805 * to combine any control in the control chunk queue also.
9807 struct sctp_association *asoc;
9808 struct sctp_nets *net;
9809 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9810 unsigned int burst_cnt = 0;
9814 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9817 unsigned int tot_frs = 0;
9820 /* The Nagle algorithm is only applied when handling a send call. */
9821 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9822 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9830 SCTP_TCB_LOCK_ASSERT(stcb);
9832 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9834 if ((un_sent <= 0) &&
9835 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9836 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9837 (asoc->sent_queue_retran_cnt == 0)) {
9838 /* Nothing to do unless there is something to be sent left */
9842 * Do we have something to send, data or control AND a sack timer
9843 * running, if so piggy-back the sack.
9845 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9846 sctp_send_sack(stcb, so_locked);
9847 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9849 while (asoc->sent_queue_retran_cnt) {
9851 * Ok, it is retransmission time only, we send out only ONE
9852 * packet with a single call off to the retran code.
9854 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9856 * Special hook for handling cookiess discarded
9857 * by peer that carried data. Send cookie-ack only
9858 * and then the next call with get the retran's.
9860 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9862 &now, &now_filled, frag_point, so_locked);
9864 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9865 /* if its not from a HB then do it */
9867 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9873 * its from any other place, we don't allow retran
9874 * output (only control)
9879 /* Can't send anymore */
9881 * now lets push out control by calling med-level
9882 * output once. this assures that we WILL send HB's
9885 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9887 &now, &now_filled, frag_point, so_locked);
9888 #ifdef SCTP_AUDITING_ENABLED
9889 sctp_auditing(8, inp, stcb, NULL);
9891 sctp_timer_validation(inp, stcb, asoc);
9896 * The count was off.. retran is not happening so do
9897 * the normal retransmission.
9899 #ifdef SCTP_AUDITING_ENABLED
9900 sctp_auditing(9, inp, stcb, NULL);
9902 if (ret == SCTP_RETRAN_EXIT) {
9907 if (from_where == SCTP_OUTPUT_FROM_T3) {
9908 /* Only one transmission allowed out of a timeout */
9909 #ifdef SCTP_AUDITING_ENABLED
9910 sctp_auditing(10, inp, stcb, NULL);
9912 /* Push out any control */
9913 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9914 &now, &now_filled, frag_point, so_locked);
9917 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
9918 /* Hit FR burst limit */
9921 if ((num_out == 0) && (ret == 0)) {
9922 /* No more retrans to send */
9926 #ifdef SCTP_AUDITING_ENABLED
9927 sctp_auditing(12, inp, stcb, NULL);
9929 /* Check for bad destinations, if they exist move chunks around. */
9930 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9931 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
9933 * if possible move things off of this address we
9934 * still may send below due to the dormant state but
9935 * we try to find an alternate address to send to
9936 * and if we have one we move all queued data on the
9937 * out wheel to this alternate address.
9939 if (net->ref_count > 1)
9940 sctp_move_chunks_from_net(stcb, net);
9943 * if ((asoc->sat_network) || (net->addr_is_local))
9944 * { burst_limit = asoc->max_burst *
9945 * SCTP_SAT_NETWORK_BURST_INCR; }
9947 if (asoc->max_burst > 0) {
9948 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9949 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
9951 * JRS - Use the congestion
9952 * control given in the
9953 * congestion control module
9955 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
9956 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9957 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
9959 SCTP_STAT_INCR(sctps_maxburstqueued);
9961 net->fast_retran_ip = 0;
9963 if (net->flight_size == 0) {
9965 * Should be decaying the
9977 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
9978 &reason_code, 0, from_where,
9979 &now, &now_filled, frag_point, so_locked);
9981 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
9982 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9983 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
9985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9986 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
9987 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
9991 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
9995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9996 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
9998 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10003 * When the Nagle algorithm is used, look at how
10004 * much is unsent, then if its smaller than an MTU
10005 * and we have data in flight we stop, except if we
10006 * are handling a fragmented user message.
10008 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10009 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10010 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10011 (stcb->asoc.total_flight > 0) &&
10012 ((stcb->asoc.locked_on_sending == NULL) ||
10013 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10017 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10018 TAILQ_EMPTY(&asoc->send_queue) &&
10019 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10020 /* Nothing left to send */
10023 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10024 /* Nothing left to send */
10027 } while (num_out &&
10028 ((asoc->max_burst == 0) ||
10029 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10030 (burst_cnt < asoc->max_burst)));
10032 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10033 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10034 SCTP_STAT_INCR(sctps_maxburstqueued);
10035 asoc->burst_limit_applied = 1;
10036 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10037 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10040 asoc->burst_limit_applied = 0;
10043 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10044 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10046 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10050 * Now we need to clean up the control chunk chain if a ECNE is on
10051 * it. It must be marked as UNSENT again so next call will continue
10052 * to send it until such time that we get a CWR, to remove it.
10054 if (stcb->asoc.ecn_echo_cnt_onq)
10055 sctp_fix_ecn_echo(asoc);
10062 struct sctp_inpcb *inp,
10064 struct sockaddr *addr,
10065 struct mbuf *control,
10070 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10073 if (inp->sctp_socket == NULL) {
10074 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10077 return (sctp_sosend(inp->sctp_socket,
10079 (struct uio *)NULL,
10087 send_forward_tsn(struct sctp_tcb *stcb,
10088 struct sctp_association *asoc)
10090 struct sctp_tmit_chunk *chk;
10091 struct sctp_forward_tsn_chunk *fwdtsn;
10092 uint32_t advance_peer_ack_point;
10094 SCTP_TCB_LOCK_ASSERT(stcb);
10095 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10096 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10097 /* mark it to unsent */
10098 chk->sent = SCTP_DATAGRAM_UNSENT;
10099 chk->snd_count = 0;
10100 /* Do we correct its output location? */
10102 sctp_free_remote_addr(chk->whoTo);
10105 goto sctp_fill_in_rest;
10108 /* Ok if we reach here we must build one */
10109 sctp_alloc_a_chunk(stcb, chk);
10113 asoc->fwd_tsn_cnt++;
10114 chk->copy_by_ref = 0;
10115 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10116 chk->rec.chunk_id.can_take_data = 0;
10119 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10120 if (chk->data == NULL) {
10121 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10124 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10125 chk->sent = SCTP_DATAGRAM_UNSENT;
10126 chk->snd_count = 0;
10127 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10128 asoc->ctrl_queue_cnt++;
10131 * Here we go through and fill out the part that deals with
10132 * stream/seq of the ones we skip.
10134 SCTP_BUF_LEN(chk->data) = 0;
10136 struct sctp_tmit_chunk *at, *tp1, *last;
10137 struct sctp_strseq *strseq;
10138 unsigned int cnt_of_space, i, ovh;
10139 unsigned int space_needed;
10140 unsigned int cnt_of_skipped = 0;
10142 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10143 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10144 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10145 /* no more to look at */
10148 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10149 /* We don't report these */
10154 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10155 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10157 cnt_of_space = M_TRAILINGSPACE(chk->data);
10159 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10160 ovh = SCTP_MIN_OVERHEAD;
10162 ovh = SCTP_MIN_V4_OVERHEAD;
10164 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10165 /* trim to a mtu size */
10166 cnt_of_space = asoc->smallest_mtu - ovh;
10168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10169 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10170 0xff, 0, cnt_of_skipped,
10171 asoc->advanced_peer_ack_point);
10174 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10175 if (cnt_of_space < space_needed) {
10177 * ok we must trim down the chunk by lowering the
10178 * advance peer ack point.
10180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10181 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10182 0xff, 0xff, cnt_of_space,
10185 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10186 cnt_of_skipped /= sizeof(struct sctp_strseq);
10188 * Go through and find the TSN that will be the one
10191 at = TAILQ_FIRST(&asoc->sent_queue);
10193 for (i = 0; i < cnt_of_skipped; i++) {
10194 tp1 = TAILQ_NEXT(at, sctp_next);
10201 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10202 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10203 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10204 asoc->advanced_peer_ack_point);
10208 * last now points to last one I can report, update
10212 advance_peer_ack_point = last->rec.data.TSN_seq;
10213 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10214 cnt_of_skipped * sizeof(struct sctp_strseq);
10216 chk->send_size = space_needed;
10217 /* Setup the chunk */
10218 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10219 fwdtsn->ch.chunk_length = htons(chk->send_size);
10220 fwdtsn->ch.chunk_flags = 0;
10221 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10222 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10223 SCTP_BUF_LEN(chk->data) = chk->send_size;
10226 * Move pointer to after the fwdtsn and transfer to the
10229 strseq = (struct sctp_strseq *)fwdtsn;
10231 * Now populate the strseq list. This is done blindly
10232 * without pulling out duplicate stream info. This is
10233 * inefficent but won't harm the process since the peer will
10234 * look at these in sequence and will thus release anything.
10235 * It could mean we exceed the PMTU and chop off some that
10236 * we could have included.. but this is unlikely (aka 1432/4
10237 * would mean 300+ stream seq's would have to be reported in
10238 * one FWD-TSN. With a bit of work we can later FIX this to
10239 * optimize and pull out duplcates.. but it does add more
10240 * overhead. So for now... not!
10242 at = TAILQ_FIRST(&asoc->sent_queue);
10243 for (i = 0; i < cnt_of_skipped; i++) {
10244 tp1 = TAILQ_NEXT(at, sctp_next);
10247 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10248 /* We don't report these */
10253 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10254 at->rec.data.fwd_tsn_cnt = 0;
10256 strseq->stream = ntohs(at->rec.data.stream_number);
10257 strseq->sequence = ntohs(at->rec.data.stream_seq);
10266 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10267 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10273 * Queue up a SACK or NR-SACK in the control queue.
10274 * We must first check to see if a SACK or NR-SACK is
10275 * somehow on the control queue.
10276 * If so, we will take and and remove the old one.
10278 struct sctp_association *asoc;
10279 struct sctp_tmit_chunk *chk, *a_chk;
10280 struct sctp_sack_chunk *sack;
10281 struct sctp_nr_sack_chunk *nr_sack;
10282 struct sctp_gap_ack_block *gap_descriptor;
10283 struct sack_track *selector;
10288 int limit_reached = 0;
10289 unsigned int i, siz, j;
10290 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10293 uint32_t highest_tsn;
10298 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10299 (stcb->asoc.peer_supports_nr_sack == 1)) {
10300 type = SCTP_NR_SELECTIVE_ACK;
10302 type = SCTP_SELECTIVE_ACK;
10305 asoc = &stcb->asoc;
10306 SCTP_TCB_LOCK_ASSERT(stcb);
10307 if (asoc->last_data_chunk_from == NULL) {
10308 /* Hmm we never received anything */
10311 sctp_slide_mapping_arrays(stcb);
10312 sctp_set_rwnd(stcb, asoc);
10313 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10314 if (chk->rec.chunk_id.id == type) {
10315 /* Hmm, found a sack already on queue, remove it */
10316 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10317 asoc->ctrl_queue_cnt--;
10320 sctp_m_freem(a_chk->data);
10321 a_chk->data = NULL;
10323 if (a_chk->whoTo) {
10324 sctp_free_remote_addr(a_chk->whoTo);
10325 a_chk->whoTo = NULL;
10330 if (a_chk == NULL) {
10331 sctp_alloc_a_chunk(stcb, a_chk);
10332 if (a_chk == NULL) {
10333 /* No memory so we drop the idea, and set a timer */
10334 if (stcb->asoc.delayed_ack) {
10335 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10336 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10337 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10338 stcb->sctp_ep, stcb, NULL);
10340 stcb->asoc.send_sack = 1;
10344 a_chk->copy_by_ref = 0;
10345 a_chk->rec.chunk_id.id = type;
10346 a_chk->rec.chunk_id.can_take_data = 1;
10348 /* Clear our pkt counts */
10349 asoc->data_pkts_seen = 0;
10351 a_chk->asoc = asoc;
10352 a_chk->snd_count = 0;
10353 a_chk->send_size = 0; /* fill in later */
10354 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10355 a_chk->whoTo = NULL;
10357 if ((asoc->numduptsns) ||
10358 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10360 * Ok, we have some duplicates or the destination for the
10361 * sack is unreachable, lets see if we can select an
10362 * alternate than asoc->last_data_chunk_from
10364 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10365 (asoc->used_alt_onsack > asoc->numnets)) {
10366 /* We used an alt last time, don't this time */
10367 a_chk->whoTo = NULL;
10369 asoc->used_alt_onsack++;
10370 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10372 if (a_chk->whoTo == NULL) {
10373 /* Nope, no alternate */
10374 a_chk->whoTo = asoc->last_data_chunk_from;
10375 asoc->used_alt_onsack = 0;
10379 * No duplicates so we use the last place we received data
10382 asoc->used_alt_onsack = 0;
10383 a_chk->whoTo = asoc->last_data_chunk_from;
10385 if (a_chk->whoTo) {
10386 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10388 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10389 highest_tsn = asoc->highest_tsn_inside_map;
10391 highest_tsn = asoc->highest_tsn_inside_nr_map;
10393 if (highest_tsn == asoc->cumulative_tsn) {
10395 if (type == SCTP_SELECTIVE_ACK) {
10396 space_req = sizeof(struct sctp_sack_chunk);
10398 space_req = sizeof(struct sctp_nr_sack_chunk);
10401 /* gaps get a cluster */
10402 space_req = MCLBYTES;
10404 /* Ok now lets formulate a MBUF with our sack */
10405 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10406 if ((a_chk->data == NULL) ||
10407 (a_chk->whoTo == NULL)) {
10408 /* rats, no mbuf memory */
10410 /* was a problem with the destination */
10411 sctp_m_freem(a_chk->data);
10412 a_chk->data = NULL;
10414 sctp_free_a_chunk(stcb, a_chk, so_locked);
10415 /* sa_ignore NO_NULL_CHK */
10416 if (stcb->asoc.delayed_ack) {
10417 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10418 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10419 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10420 stcb->sctp_ep, stcb, NULL);
10422 stcb->asoc.send_sack = 1;
10426 /* ok, lets go through and fill it in */
10427 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10428 space = M_TRAILINGSPACE(a_chk->data);
10429 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10430 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10432 limit = mtod(a_chk->data, caddr_t);
10437 if ((asoc->sctp_cmt_on_off > 0) &&
10438 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10440 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10441 * received, then set high bit to 1, else 0. Reset
10444 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10445 asoc->cmt_dac_pkts_rcvd = 0;
10447 #ifdef SCTP_ASOCLOG_OF_TSNS
10448 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10449 stcb->asoc.cumack_log_atsnt++;
10450 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10451 stcb->asoc.cumack_log_atsnt = 0;
10454 /* reset the readers interpretation */
10455 stcb->freed_by_sorcv_sincelast = 0;
10457 if (type == SCTP_SELECTIVE_ACK) {
10458 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10460 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10461 if (highest_tsn > asoc->mapping_array_base_tsn) {
10462 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10464 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10468 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10469 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10470 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10471 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10473 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10477 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10480 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10482 if (((type == SCTP_SELECTIVE_ACK) &&
10483 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10484 ((type == SCTP_NR_SELECTIVE_ACK) &&
10485 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10486 /* we have a gap .. maybe */
10487 for (i = 0; i < siz; i++) {
10488 tsn_map = asoc->mapping_array[i];
10489 if (type == SCTP_SELECTIVE_ACK) {
10490 tsn_map |= asoc->nr_mapping_array[i];
10494 * Clear all bits corresponding to TSNs
10495 * smaller or equal to the cumulative TSN.
10497 tsn_map &= (~0 << (1 - offset));
10499 selector = &sack_array[tsn_map];
10500 if (mergeable && selector->right_edge) {
10502 * Backup, left and right edges were ok to
10508 if (selector->num_entries == 0)
10511 for (j = 0; j < selector->num_entries; j++) {
10512 if (mergeable && selector->right_edge) {
10514 * do a merge by NOT setting
10520 * no merge, set the left
10524 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10526 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10529 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10535 if (selector->left_edge) {
10539 if (limit_reached) {
10540 /* Reached the limit stop */
10546 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10547 (limit_reached == 0)) {
10551 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10552 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10554 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10557 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10560 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10562 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10563 /* we have a gap .. maybe */
10564 for (i = 0; i < siz; i++) {
10565 tsn_map = asoc->nr_mapping_array[i];
10568 * Clear all bits corresponding to
10569 * TSNs smaller or equal to the
10572 tsn_map &= (~0 << (1 - offset));
10574 selector = &sack_array[tsn_map];
10575 if (mergeable && selector->right_edge) {
10577 * Backup, left and right edges were
10580 num_nr_gap_blocks--;
10583 if (selector->num_entries == 0)
10586 for (j = 0; j < selector->num_entries; j++) {
10587 if (mergeable && selector->right_edge) {
10589 * do a merge by NOT
10596 * no merge, set the
10600 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10602 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10603 num_nr_gap_blocks++;
10605 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10611 if (selector->left_edge) {
10615 if (limit_reached) {
10616 /* Reached the limit stop */
10623 /* now we must add any dups we are going to report. */
10624 if ((limit_reached == 0) && (asoc->numduptsns)) {
10625 dup = (uint32_t *) gap_descriptor;
10626 for (i = 0; i < asoc->numduptsns; i++) {
10627 *dup = htonl(asoc->dup_tsns[i]);
10630 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10635 asoc->numduptsns = 0;
10638 * now that the chunk is prepared queue it to the control chunk
10641 if (type == SCTP_SELECTIVE_ACK) {
10642 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10643 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10644 num_dups * sizeof(int32_t);
10645 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10646 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10647 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10648 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10649 sack->sack.num_dup_tsns = htons(num_dups);
10650 sack->ch.chunk_type = type;
10651 sack->ch.chunk_flags = flags;
10652 sack->ch.chunk_length = htons(a_chk->send_size);
10654 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10655 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10656 num_dups * sizeof(int32_t);
10657 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10658 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10659 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10660 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10661 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10662 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10663 nr_sack->nr_sack.reserved = 0;
10664 nr_sack->ch.chunk_type = type;
10665 nr_sack->ch.chunk_flags = flags;
10666 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10668 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10669 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10670 asoc->ctrl_queue_cnt++;
10671 asoc->send_sack = 0;
10672 SCTP_STAT_INCR(sctps_sendsacks);
10677 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10678 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10683 struct mbuf *m_abort, *m, *m_last;
10684 struct mbuf *m_out, *m_end = NULL;
10685 struct sctp_abort_chunk *abort;
10686 struct sctp_auth_chunk *auth = NULL;
10687 struct sctp_nets *net;
10689 uint32_t auth_offset = 0;
10690 uint16_t cause_len, chunk_len, padding_len;
10692 SCTP_TCB_LOCK_ASSERT(stcb);
10694 * Add an AUTH chunk, if chunk requires it and save the offset into
10695 * the chain for AUTH
10697 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10698 stcb->asoc.peer_auth_chunks)) {
10699 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10700 stcb, SCTP_ABORT_ASSOCIATION);
10701 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10705 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10706 if (m_abort == NULL) {
10708 sctp_m_freem(m_out);
10711 sctp_m_freem(operr);
10715 /* link in any error */
10716 SCTP_BUF_NEXT(m_abort) = operr;
10719 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10720 cause_len += (uint16_t) SCTP_BUF_LEN(m);
10721 if (SCTP_BUF_NEXT(m) == NULL) {
10725 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10726 chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
10727 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10728 if (m_out == NULL) {
10729 /* NO Auth chunk prepended, so reserve space in front */
10730 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10733 /* Put AUTH chunk at the front of the chain */
10734 SCTP_BUF_NEXT(m_end) = m_abort;
10736 if (stcb->asoc.alternate) {
10737 net = stcb->asoc.alternate;
10739 net = stcb->asoc.primary_destination;
10741 /* Fill in the ABORT chunk header. */
10742 abort = mtod(m_abort, struct sctp_abort_chunk *);
10743 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10744 if (stcb->asoc.peer_vtag == 0) {
10745 /* This happens iff the assoc is in COOKIE-WAIT state. */
10746 vtag = stcb->asoc.my_vtag;
10747 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10749 vtag = stcb->asoc.peer_vtag;
10750 abort->ch.chunk_flags = 0;
10752 abort->ch.chunk_length = htons(chunk_len);
10753 /* Add padding, if necessary. */
10754 if (padding_len > 0) {
10755 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
10756 sctp_m_freem(m_out);
10760 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10761 (struct sockaddr *)&net->ro._l_addr,
10762 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10763 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10764 stcb->asoc.primary_destination->port, NULL,
10767 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10771 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10772 struct sctp_nets *net,
10775 /* formulate and SEND a SHUTDOWN-COMPLETE */
10776 struct mbuf *m_shutdown_comp;
10777 struct sctp_shutdown_complete_chunk *shutdown_complete;
10781 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
10782 if (m_shutdown_comp == NULL) {
10786 if (reflect_vtag) {
10787 flags = SCTP_HAD_NO_TCB;
10788 vtag = stcb->asoc.my_vtag;
10791 vtag = stcb->asoc.peer_vtag;
10793 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10794 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10795 shutdown_complete->ch.chunk_flags = flags;
10796 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10797 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10798 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10799 (struct sockaddr *)&net->ro._l_addr,
10800 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10801 stcb->sctp_ep->sctp_lport, stcb->rport,
10805 SCTP_SO_NOT_LOCKED);
10806 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10811 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
10812 struct sctphdr *sh, uint32_t vtag,
10813 uint8_t type, struct mbuf *cause,
10814 uint8_t use_mflowid, uint32_t mflowid,
10815 uint32_t vrf_id, uint16_t port)
10817 struct mbuf *o_pak;
10819 struct sctphdr *shout;
10820 struct sctp_chunkhdr *ch;
10821 struct udphdr *udp;
10822 int len, cause_len, padding_len;
10824 #if defined(INET) || defined(INET6)
10829 struct sockaddr_in *src_sin, *dst_sin;
10834 struct sockaddr_in6 *src_sin6, *dst_sin6;
10835 struct ip6_hdr *ip6;
10839 /* Compute the length of the cause and add final padding. */
10841 if (cause != NULL) {
10842 struct mbuf *m_at, *m_last = NULL;
10844 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
10845 if (SCTP_BUF_NEXT(m_at) == NULL)
10847 cause_len += SCTP_BUF_LEN(m_at);
10849 padding_len = cause_len % 4;
10850 if (padding_len != 0) {
10851 padding_len = 4 - padding_len;
10853 if (padding_len != 0) {
10854 if (sctp_add_pad_tombuf(m_last, padding_len)) {
10855 sctp_m_freem(cause);
10862 /* Get an mbuf for the header. */
10863 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
10864 switch (dst->sa_family) {
10867 len += sizeof(struct ip);
10872 len += sizeof(struct ip6_hdr);
10879 len += sizeof(struct udphdr);
10881 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
10882 if (mout == NULL) {
10884 sctp_m_freem(cause);
10888 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10889 SCTP_BUF_LEN(mout) = len;
10890 SCTP_BUF_NEXT(mout) = cause;
10891 if (use_mflowid != 0) {
10892 mout->m_pkthdr.flowid = mflowid;
10893 mout->m_flags |= M_FLOWID;
10901 switch (dst->sa_family) {
10904 src_sin = (struct sockaddr_in *)src;
10905 dst_sin = (struct sockaddr_in *)dst;
10906 ip = mtod(mout, struct ip *);
10907 ip->ip_v = IPVERSION;
10908 ip->ip_hl = (sizeof(struct ip) >> 2);
10910 ip->ip_id = ip_newid();
10912 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
10914 ip->ip_p = IPPROTO_UDP;
10916 ip->ip_p = IPPROTO_SCTP;
10918 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
10919 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
10921 len = sizeof(struct ip);
10922 shout = (struct sctphdr *)((caddr_t)ip + len);
10927 src_sin6 = (struct sockaddr_in6 *)src;
10928 dst_sin6 = (struct sockaddr_in6 *)dst;
10929 ip6 = mtod(mout, struct ip6_hdr *);
10930 ip6->ip6_flow = htonl(0x60000000);
10931 if (V_ip6_auto_flowlabel) {
10932 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
10934 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10936 ip6->ip6_nxt = IPPROTO_UDP;
10938 ip6->ip6_nxt = IPPROTO_SCTP;
10940 ip6->ip6_src = dst_sin6->sin6_addr;
10941 ip6->ip6_dst = src_sin6->sin6_addr;
10942 len = sizeof(struct ip6_hdr);
10943 shout = (struct sctphdr *)((caddr_t)ip6 + len);
10948 shout = mtod(mout, struct sctphdr *);
10952 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
10953 sctp_m_freem(mout);
10956 udp = (struct udphdr *)shout;
10957 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10958 udp->uh_dport = port;
10960 udp->uh_ulen = htons(sizeof(struct udphdr) +
10961 sizeof(struct sctphdr) +
10962 sizeof(struct sctp_chunkhdr) +
10963 cause_len + padding_len);
10964 len += sizeof(struct udphdr);
10965 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
10969 shout->src_port = sh->dest_port;
10970 shout->dest_port = sh->src_port;
10971 shout->checksum = 0;
10973 shout->v_tag = htonl(vtag);
10975 shout->v_tag = sh->v_tag;
10977 len += sizeof(struct sctphdr);
10978 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
10979 ch->chunk_type = type;
10981 ch->chunk_flags = 0;
10983 ch->chunk_flags = SCTP_HAD_NO_TCB;
10985 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
10986 len += sizeof(struct sctp_chunkhdr);
10987 len += cause_len + padding_len;
10989 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10990 sctp_m_freem(mout);
10993 SCTP_ATTACH_CHAIN(o_pak, mout, len);
10994 switch (dst->sa_family) {
10999 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11004 ip->ip_len = htons(len);
11006 #if defined(SCTP_WITH_NO_CSUM)
11007 SCTP_STAT_INCR(sctps_sendnocrc);
11009 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11010 SCTP_STAT_INCR(sctps_sendswcrc);
11013 SCTP_ENABLE_UDP_CSUM(o_pak);
11016 #if defined(SCTP_WITH_NO_CSUM)
11017 SCTP_STAT_INCR(sctps_sendnocrc);
11019 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11020 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11021 SCTP_STAT_INCR(sctps_sendhwcrc);
11024 #ifdef SCTP_PACKET_LOGGING
11025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11026 sctp_packet_log(o_pak);
11029 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11034 ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11036 #if defined(SCTP_WITH_NO_CSUM)
11037 SCTP_STAT_INCR(sctps_sendnocrc);
11039 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11040 SCTP_STAT_INCR(sctps_sendswcrc);
11042 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11043 udp->uh_sum = 0xffff;
11046 #if defined(SCTP_WITH_NO_CSUM)
11047 SCTP_STAT_INCR(sctps_sendnocrc);
11049 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11050 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11051 SCTP_STAT_INCR(sctps_sendhwcrc);
11054 #ifdef SCTP_PACKET_LOGGING
11055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11056 sctp_packet_log(o_pak);
11059 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11063 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11065 sctp_m_freem(mout);
11066 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11069 SCTP_STAT_INCR(sctps_sendpackets);
11070 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11071 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11076 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11077 struct sctphdr *sh,
11078 uint8_t use_mflowid, uint32_t mflowid,
11079 uint32_t vrf_id, uint16_t port)
11081 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11082 use_mflowid, mflowid,
11087 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11088 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11093 struct sctp_tmit_chunk *chk;
11094 struct sctp_heartbeat_chunk *hb;
11095 struct timeval now;
11097 SCTP_TCB_LOCK_ASSERT(stcb);
11101 (void)SCTP_GETTIME_TIMEVAL(&now);
11102 switch (net->ro._l_addr.sa.sa_family) {
11114 sctp_alloc_a_chunk(stcb, chk);
11116 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11119 chk->copy_by_ref = 0;
11120 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11121 chk->rec.chunk_id.can_take_data = 1;
11122 chk->asoc = &stcb->asoc;
11123 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11125 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11126 if (chk->data == NULL) {
11127 sctp_free_a_chunk(stcb, chk, so_locked);
11130 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11131 SCTP_BUF_LEN(chk->data) = chk->send_size;
11132 chk->sent = SCTP_DATAGRAM_UNSENT;
11133 chk->snd_count = 0;
11135 atomic_add_int(&chk->whoTo->ref_count, 1);
11136 /* Now we have a mbuf that we can fill in with the details */
11137 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11138 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11139 /* fill out chunk header */
11140 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11141 hb->ch.chunk_flags = 0;
11142 hb->ch.chunk_length = htons(chk->send_size);
11143 /* Fill out hb parameter */
11144 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11145 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11146 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11147 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11148 /* Did our user request this one, put it in */
11149 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11150 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11151 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11153 * we only take from the entropy pool if the address is not
11156 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11157 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11159 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11160 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11162 switch (net->ro._l_addr.sa.sa_family) {
11165 memcpy(hb->heartbeat.hb_info.address,
11166 &net->ro._l_addr.sin.sin_addr,
11167 sizeof(net->ro._l_addr.sin.sin_addr));
11172 memcpy(hb->heartbeat.hb_info.address,
11173 &net->ro._l_addr.sin6.sin6_addr,
11174 sizeof(net->ro._l_addr.sin6.sin6_addr));
11181 net->hb_responded = 0;
11182 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11183 stcb->asoc.ctrl_queue_cnt++;
11184 SCTP_STAT_INCR(sctps_sendheartbeat);
11189 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11192 struct sctp_association *asoc;
11193 struct sctp_ecne_chunk *ecne;
11194 struct sctp_tmit_chunk *chk;
11199 asoc = &stcb->asoc;
11200 SCTP_TCB_LOCK_ASSERT(stcb);
11201 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11202 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11203 /* found a previous ECN_ECHO update it if needed */
11204 uint32_t cnt, ctsn;
11206 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11207 ctsn = ntohl(ecne->tsn);
11208 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11209 ecne->tsn = htonl(high_tsn);
11210 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11212 cnt = ntohl(ecne->num_pkts_since_cwr);
11214 ecne->num_pkts_since_cwr = htonl(cnt);
11218 /* nope could not find one to update so we must build one */
11219 sctp_alloc_a_chunk(stcb, chk);
11223 chk->copy_by_ref = 0;
11224 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11225 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11226 chk->rec.chunk_id.can_take_data = 0;
11227 chk->asoc = &stcb->asoc;
11228 chk->send_size = sizeof(struct sctp_ecne_chunk);
11229 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11230 if (chk->data == NULL) {
11231 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11234 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11235 SCTP_BUF_LEN(chk->data) = chk->send_size;
11236 chk->sent = SCTP_DATAGRAM_UNSENT;
11237 chk->snd_count = 0;
11239 atomic_add_int(&chk->whoTo->ref_count, 1);
11241 stcb->asoc.ecn_echo_cnt_onq++;
11242 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11243 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11244 ecne->ch.chunk_flags = 0;
11245 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11246 ecne->tsn = htonl(high_tsn);
11247 ecne->num_pkts_since_cwr = htonl(1);
11248 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11249 asoc->ctrl_queue_cnt++;
11253 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11254 struct mbuf *m, int len, int iphlen, int bad_crc)
11256 struct sctp_association *asoc;
11257 struct sctp_pktdrop_chunk *drp;
11258 struct sctp_tmit_chunk *chk;
11264 struct sctp_chunkhdr *ch, chunk_buf;
11265 unsigned int chk_length;
11270 asoc = &stcb->asoc;
11271 SCTP_TCB_LOCK_ASSERT(stcb);
11272 if (asoc->peer_supports_pktdrop == 0) {
11274 * peer must declare support before I send one.
11278 if (stcb->sctp_socket == NULL) {
11281 sctp_alloc_a_chunk(stcb, chk);
11285 chk->copy_by_ref = 0;
11287 chk->send_size = len;
11288 /* Validate that we do not have an ABORT in here. */
11289 offset = iphlen + sizeof(struct sctphdr);
11290 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11291 sizeof(*ch), (uint8_t *) & chunk_buf);
11292 while (ch != NULL) {
11293 chk_length = ntohs(ch->chunk_length);
11294 if (chk_length < sizeof(*ch)) {
11295 /* break to abort land */
11298 switch (ch->chunk_type) {
11299 case SCTP_PACKET_DROPPED:
11300 case SCTP_ABORT_ASSOCIATION:
11301 case SCTP_INITIATION_ACK:
11303 * We don't respond with an PKT-DROP to an ABORT
11304 * or PKT-DROP. We also do not respond to an
11305 * INIT-ACK, because we can't know if the initiation
11306 * tag is correct or not.
11308 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11313 offset += SCTP_SIZE32(chk_length);
11314 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11315 sizeof(*ch), (uint8_t *) & chunk_buf);
11318 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11319 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11321 * only send 1 mtu worth, trim off the excess on the end.
11324 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11327 chk->asoc = &stcb->asoc;
11328 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11329 if (chk->data == NULL) {
11331 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11334 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11335 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11337 sctp_m_freem(chk->data);
11341 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11342 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11343 chk->book_size_scale = 0;
11345 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11346 drp->trunc_len = htons(fullsz);
11348 * Len is already adjusted to size minus overhead above take
11349 * out the pkt_drop chunk itself from it.
11351 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11352 len = chk->send_size;
11354 /* no truncation needed */
11355 drp->ch.chunk_flags = 0;
11356 drp->trunc_len = htons(0);
11359 drp->ch.chunk_flags |= SCTP_BADCRC;
11361 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11362 SCTP_BUF_LEN(chk->data) = chk->send_size;
11363 chk->sent = SCTP_DATAGRAM_UNSENT;
11364 chk->snd_count = 0;
11366 /* we should hit here */
11368 atomic_add_int(&chk->whoTo->ref_count, 1);
11372 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11373 chk->rec.chunk_id.can_take_data = 1;
11374 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11375 drp->ch.chunk_length = htons(chk->send_size);
11376 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11380 drp->bottle_bw = htonl(spc);
11381 if (asoc->my_rwnd) {
11382 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11383 asoc->size_on_all_streams +
11384 asoc->my_rwnd_control_len +
11385 stcb->sctp_socket->so_rcv.sb_cc);
11388 * If my rwnd is 0, possibly from mbuf depletion as well as
11389 * space used, tell the peer there is NO space aka onq == bw
11391 drp->current_onq = htonl(spc);
11395 m_copydata(m, iphlen, len, (caddr_t)datap);
11396 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11397 asoc->ctrl_queue_cnt++;
11401 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11403 struct sctp_association *asoc;
11404 struct sctp_cwr_chunk *cwr;
11405 struct sctp_tmit_chunk *chk;
11407 SCTP_TCB_LOCK_ASSERT(stcb);
11411 asoc = &stcb->asoc;
11412 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11413 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11415 * found a previous CWR queued to same destination
11416 * update it if needed
11420 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11421 ctsn = ntohl(cwr->tsn);
11422 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11423 cwr->tsn = htonl(high_tsn);
11425 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11426 /* Make sure override is carried */
11427 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11432 sctp_alloc_a_chunk(stcb, chk);
11436 chk->copy_by_ref = 0;
11437 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11438 chk->rec.chunk_id.can_take_data = 1;
11439 chk->asoc = &stcb->asoc;
11440 chk->send_size = sizeof(struct sctp_cwr_chunk);
11441 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11442 if (chk->data == NULL) {
11443 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11446 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11447 SCTP_BUF_LEN(chk->data) = chk->send_size;
11448 chk->sent = SCTP_DATAGRAM_UNSENT;
11449 chk->snd_count = 0;
11451 atomic_add_int(&chk->whoTo->ref_count, 1);
11452 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11453 cwr->ch.chunk_type = SCTP_ECN_CWR;
11454 cwr->ch.chunk_flags = override;
11455 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11456 cwr->tsn = htonl(high_tsn);
11457 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11458 asoc->ctrl_queue_cnt++;
11462 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11463 int number_entries, uint16_t * list,
11464 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11466 uint16_t len, old_len, i;
11467 struct sctp_stream_reset_out_request *req_out;
11468 struct sctp_chunkhdr *ch;
11470 ch = mtod(chk->data, struct sctp_chunkhdr *);
11471 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11473 /* get to new offset for the param. */
11474 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11475 /* now how long will this param be? */
11476 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11477 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11478 req_out->ph.param_length = htons(len);
11479 req_out->request_seq = htonl(seq);
11480 req_out->response_seq = htonl(resp_seq);
11481 req_out->send_reset_at_tsn = htonl(last_sent);
11482 if (number_entries) {
11483 for (i = 0; i < number_entries; i++) {
11484 req_out->list_of_streams[i] = htons(list[i]);
11487 if (SCTP_SIZE32(len) > len) {
11489 * Need to worry about the pad we may end up adding to the
11490 * end. This is easy since the struct is either aligned to 4
11491 * bytes or 2 bytes off.
11493 req_out->list_of_streams[number_entries] = 0;
11495 /* now fix the chunk length */
11496 ch->chunk_length = htons(len + old_len);
11497 chk->book_size = len + old_len;
11498 chk->book_size_scale = 0;
11499 chk->send_size = SCTP_SIZE32(chk->book_size);
11500 SCTP_BUF_LEN(chk->data) = chk->send_size;
11505 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11506 int number_entries, uint16_t * list,
11509 uint16_t len, old_len, i;
11510 struct sctp_stream_reset_in_request *req_in;
11511 struct sctp_chunkhdr *ch;
11513 ch = mtod(chk->data, struct sctp_chunkhdr *);
11514 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11516 /* get to new offset for the param. */
11517 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11518 /* now how long will this param be? */
11519 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11520 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11521 req_in->ph.param_length = htons(len);
11522 req_in->request_seq = htonl(seq);
11523 if (number_entries) {
11524 for (i = 0; i < number_entries; i++) {
11525 req_in->list_of_streams[i] = htons(list[i]);
11528 if (SCTP_SIZE32(len) > len) {
11530 * Need to worry about the pad we may end up adding to the
11531 * end. This is easy since the struct is either aligned to 4
11532 * bytes or 2 bytes off.
11534 req_in->list_of_streams[number_entries] = 0;
11536 /* now fix the chunk length */
11537 ch->chunk_length = htons(len + old_len);
11538 chk->book_size = len + old_len;
11539 chk->book_size_scale = 0;
11540 chk->send_size = SCTP_SIZE32(chk->book_size);
11541 SCTP_BUF_LEN(chk->data) = chk->send_size;
11546 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11549 uint16_t len, old_len;
11550 struct sctp_stream_reset_tsn_request *req_tsn;
11551 struct sctp_chunkhdr *ch;
11553 ch = mtod(chk->data, struct sctp_chunkhdr *);
11554 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11556 /* get to new offset for the param. */
11557 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11558 /* now how long will this param be? */
11559 len = sizeof(struct sctp_stream_reset_tsn_request);
11560 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11561 req_tsn->ph.param_length = htons(len);
11562 req_tsn->request_seq = htonl(seq);
11564 /* now fix the chunk length */
11565 ch->chunk_length = htons(len + old_len);
11566 chk->send_size = len + old_len;
11567 chk->book_size = SCTP_SIZE32(chk->send_size);
11568 chk->book_size_scale = 0;
11569 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11574 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11575 uint32_t resp_seq, uint32_t result)
11577 uint16_t len, old_len;
11578 struct sctp_stream_reset_response *resp;
11579 struct sctp_chunkhdr *ch;
11581 ch = mtod(chk->data, struct sctp_chunkhdr *);
11582 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11584 /* get to new offset for the param. */
11585 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11586 /* now how long will this param be? */
11587 len = sizeof(struct sctp_stream_reset_response);
11588 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11589 resp->ph.param_length = htons(len);
11590 resp->response_seq = htonl(resp_seq);
11591 resp->result = ntohl(result);
11593 /* now fix the chunk length */
11594 ch->chunk_length = htons(len + old_len);
11595 chk->book_size = len + old_len;
11596 chk->book_size_scale = 0;
11597 chk->send_size = SCTP_SIZE32(chk->book_size);
11598 SCTP_BUF_LEN(chk->data) = chk->send_size;
11603 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11604 uint32_t resp_seq, uint32_t result,
11605 uint32_t send_una, uint32_t recv_next)
11607 uint16_t len, old_len;
11608 struct sctp_stream_reset_response_tsn *resp;
11609 struct sctp_chunkhdr *ch;
11611 ch = mtod(chk->data, struct sctp_chunkhdr *);
11612 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11614 /* get to new offset for the param. */
11615 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11616 /* now how long will this param be? */
11617 len = sizeof(struct sctp_stream_reset_response_tsn);
11618 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11619 resp->ph.param_length = htons(len);
11620 resp->response_seq = htonl(resp_seq);
11621 resp->result = htonl(result);
11622 resp->senders_next_tsn = htonl(send_una);
11623 resp->receivers_next_tsn = htonl(recv_next);
11625 /* now fix the chunk length */
11626 ch->chunk_length = htons(len + old_len);
11627 chk->book_size = len + old_len;
11628 chk->send_size = SCTP_SIZE32(chk->book_size);
11629 chk->book_size_scale = 0;
11630 SCTP_BUF_LEN(chk->data) = chk->send_size;
11635 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11639 uint16_t len, old_len;
11640 struct sctp_chunkhdr *ch;
11641 struct sctp_stream_reset_add_strm *addstr;
11643 ch = mtod(chk->data, struct sctp_chunkhdr *);
11644 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11646 /* get to new offset for the param. */
11647 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11648 /* now how long will this param be? */
11649 len = sizeof(struct sctp_stream_reset_add_strm);
11652 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11653 addstr->ph.param_length = htons(len);
11654 addstr->request_seq = htonl(seq);
11655 addstr->number_of_streams = htons(adding);
11656 addstr->reserved = 0;
11658 /* now fix the chunk length */
11659 ch->chunk_length = htons(len + old_len);
11660 chk->send_size = len + old_len;
11661 chk->book_size = SCTP_SIZE32(chk->send_size);
11662 chk->book_size_scale = 0;
11663 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11668 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11672 uint16_t len, old_len;
11673 struct sctp_chunkhdr *ch;
11674 struct sctp_stream_reset_add_strm *addstr;
11676 ch = mtod(chk->data, struct sctp_chunkhdr *);
11677 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11679 /* get to new offset for the param. */
11680 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11681 /* now how long will this param be? */
11682 len = sizeof(struct sctp_stream_reset_add_strm);
11684 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11685 addstr->ph.param_length = htons(len);
11686 addstr->request_seq = htonl(seq);
11687 addstr->number_of_streams = htons(adding);
11688 addstr->reserved = 0;
11690 /* now fix the chunk length */
11691 ch->chunk_length = htons(len + old_len);
11692 chk->send_size = len + old_len;
11693 chk->book_size = SCTP_SIZE32(chk->send_size);
11694 chk->book_size_scale = 0;
11695 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11700 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11701 int number_entries, uint16_t * list,
11702 uint8_t send_out_req,
11703 uint8_t send_in_req,
11704 uint8_t send_tsn_req,
11705 uint8_t add_stream,
11707 uint16_t adding_i, uint8_t peer_asked)
11710 struct sctp_association *asoc;
11711 struct sctp_tmit_chunk *chk;
11712 struct sctp_chunkhdr *ch;
11715 asoc = &stcb->asoc;
11716 if (asoc->stream_reset_outstanding) {
11718 * Already one pending, must get ACK back to clear the flag.
11720 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11723 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11724 (add_stream == 0)) {
11725 /* nothing to do */
11726 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11729 if (send_tsn_req && (send_out_req || send_in_req)) {
11730 /* error, can't do that */
11731 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11734 sctp_alloc_a_chunk(stcb, chk);
11736 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11739 chk->copy_by_ref = 0;
11740 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11741 chk->rec.chunk_id.can_take_data = 0;
11742 chk->asoc = &stcb->asoc;
11743 chk->book_size = sizeof(struct sctp_chunkhdr);
11744 chk->send_size = SCTP_SIZE32(chk->book_size);
11745 chk->book_size_scale = 0;
11747 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11748 if (chk->data == NULL) {
11749 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11750 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11753 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11755 /* setup chunk parameters */
11756 chk->sent = SCTP_DATAGRAM_UNSENT;
11757 chk->snd_count = 0;
11758 if (stcb->asoc.alternate) {
11759 chk->whoTo = stcb->asoc.alternate;
11761 chk->whoTo = stcb->asoc.primary_destination;
11763 atomic_add_int(&chk->whoTo->ref_count, 1);
11764 ch = mtod(chk->data, struct sctp_chunkhdr *);
11765 ch->chunk_type = SCTP_STREAM_RESET;
11766 ch->chunk_flags = 0;
11767 ch->chunk_length = htons(chk->book_size);
11768 SCTP_BUF_LEN(chk->data) = chk->send_size;
11770 seq = stcb->asoc.str_reset_seq_out;
11771 if (send_out_req) {
11772 sctp_add_stream_reset_out(chk, number_entries, list,
11773 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
11774 asoc->stream_reset_out_is_outstanding = 1;
11776 asoc->stream_reset_outstanding++;
11778 if ((add_stream & 1) &&
11779 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
11780 /* Need to allocate more */
11781 struct sctp_stream_out *oldstream;
11782 struct sctp_stream_queue_pending *sp, *nsp;
11785 oldstream = stcb->asoc.strmout;
11786 /* get some more */
11787 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
11788 ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
11790 if (stcb->asoc.strmout == NULL) {
11793 stcb->asoc.strmout = oldstream;
11794 /* Turn off the bit */
11795 x = add_stream & 0xfe;
11800 * Ok now we proceed with copying the old out stuff and
11801 * initializing the new stuff.
11803 SCTP_TCB_SEND_LOCK(stcb);
11804 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
11805 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11806 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11807 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
11808 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
11809 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
11810 stcb->asoc.strmout[i].stream_no = i;
11811 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
11812 /* now anything on those queues? */
11813 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
11814 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
11815 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
11817 /* Now move assoc pointers too */
11818 if (stcb->asoc.last_out_stream == &oldstream[i]) {
11819 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
11821 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
11822 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
11825 /* now the new streams */
11826 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
11827 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
11828 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11829 stcb->asoc.strmout[i].chunks_on_queues = 0;
11830 stcb->asoc.strmout[i].next_sequence_send = 0x0;
11831 stcb->asoc.strmout[i].stream_no = i;
11832 stcb->asoc.strmout[i].last_msg_incomplete = 0;
11833 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
11835 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
11836 SCTP_FREE(oldstream, SCTP_M_STRMO);
11837 SCTP_TCB_SEND_UNLOCK(stcb);
11840 if ((add_stream & 1) && (adding_o > 0)) {
11841 asoc->strm_pending_add_size = adding_o;
11842 asoc->peer_req_out = peer_asked;
11843 sctp_add_an_out_stream(chk, seq, adding_o);
11845 asoc->stream_reset_outstanding++;
11847 if ((add_stream & 2) && (adding_i > 0)) {
11848 sctp_add_an_in_stream(chk, seq, adding_i);
11850 asoc->stream_reset_outstanding++;
11853 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11855 asoc->stream_reset_outstanding++;
11857 if (send_tsn_req) {
11858 sctp_add_stream_reset_tsn(chk, seq);
11859 asoc->stream_reset_outstanding++;
11861 asoc->str_reset = chk;
11862 /* insert the chunk for sending */
11863 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11866 asoc->ctrl_queue_cnt++;
11867 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11872 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
11873 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
11874 uint8_t use_mflowid, uint32_t mflowid,
11875 uint32_t vrf_id, uint16_t port)
11877 /* Don't respond to an ABORT with an ABORT. */
11878 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11880 sctp_m_freem(cause);
11883 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
11884 use_mflowid, mflowid,
11890 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
11891 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
11892 uint8_t use_mflowid, uint32_t mflowid,
11893 uint32_t vrf_id, uint16_t port)
11895 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
11896 use_mflowid, mflowid,
11901 static struct mbuf *
11902 sctp_copy_resume(struct uio *uio,
11904 int user_marks_eor,
11907 struct mbuf **new_tail)
11911 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
11912 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
11914 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11917 *sndout = m_length(m, NULL);
11918 *new_tail = m_last(m);
11924 sctp_copy_one(struct sctp_stream_queue_pending *sp,
11931 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
11933 if (sp->data == NULL) {
11934 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11937 sp->tail_mbuf = m_last(sp->data);
11943 static struct sctp_stream_queue_pending *
11944 sctp_copy_it_in(struct sctp_tcb *stcb,
11945 struct sctp_association *asoc,
11946 struct sctp_sndrcvinfo *srcv,
11948 struct sctp_nets *net,
11950 int user_marks_eor,
11954 * This routine must be very careful in its work. Protocol
11955 * processing is up and running so care must be taken to spl...()
11956 * when you need to do something that may effect the stcb/asoc. The
11957 * sb is locked however. When data is copied the protocol processing
11958 * should be enabled since this is a slower operation...
11960 struct sctp_stream_queue_pending *sp = NULL;
11964 /* Now can we send this? */
11965 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
11966 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
11967 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
11968 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
11969 /* got data while shutting down */
11970 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
11971 *error = ECONNRESET;
11974 sctp_alloc_a_strmoq(stcb, sp);
11976 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11981 sp->sender_all_done = 0;
11982 sp->sinfo_flags = srcv->sinfo_flags;
11983 sp->timetolive = srcv->sinfo_timetolive;
11984 sp->ppid = srcv->sinfo_ppid;
11985 sp->context = srcv->sinfo_context;
11986 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
11988 sp->stream = srcv->sinfo_stream;
11989 sp->length = min(uio->uio_resid, max_send_len);
11990 if ((sp->length == (uint32_t) uio->uio_resid) &&
11991 ((user_marks_eor == 0) ||
11992 (srcv->sinfo_flags & SCTP_EOF) ||
11993 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
11994 sp->msg_is_complete = 1;
11996 sp->msg_is_complete = 0;
11998 sp->sender_all_done = 0;
11999 sp->some_taken = 0;
12000 sp->put_last_out = 0;
12001 resv_in_first = sizeof(struct sctp_data_chunk);
12002 sp->data = sp->tail_mbuf = NULL;
12003 if (sp->length == 0) {
12007 if (srcv->sinfo_keynumber_valid) {
12008 sp->auth_keyid = srcv->sinfo_keynumber;
12010 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12012 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12013 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12014 sp->holds_key_ref = 1;
12016 *error = sctp_copy_one(sp, uio, resv_in_first);
12019 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12022 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12024 atomic_add_int(&sp->net->ref_count, 1);
12028 sctp_set_prsctp_policy(sp);
12036 sctp_sosend(struct socket *so,
12037 struct sockaddr *addr,
12040 struct mbuf *control,
12045 int error, use_sndinfo = 0;
12046 struct sctp_sndrcvinfo sndrcvninfo;
12047 struct sockaddr *addr_to_use;
12049 #if defined(INET) && defined(INET6)
12050 struct sockaddr_in sin;
12055 /* process cmsg snd/rcv info (maybe a assoc-id) */
12056 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12057 sizeof(sndrcvninfo))) {
12062 addr_to_use = addr;
12063 #if defined(INET) && defined(INET6)
12064 if ((addr) && (addr->sa_family == AF_INET6)) {
12065 struct sockaddr_in6 *sin6;
12067 sin6 = (struct sockaddr_in6 *)addr;
12068 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12069 in6_sin6_2_sin(&sin, sin6);
12070 addr_to_use = (struct sockaddr *)&sin;
12074 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12077 use_sndinfo ? &sndrcvninfo : NULL
12085 sctp_lower_sosend(struct socket *so,
12086 struct sockaddr *addr,
12088 struct mbuf *i_pak,
12089 struct mbuf *control,
12091 struct sctp_sndrcvinfo *srcv
12096 unsigned int sndlen = 0, max_len;
12098 struct mbuf *top = NULL;
12099 int queue_only = 0, queue_only_for_init = 0;
12100 int free_cnt_applied = 0;
12102 int now_filled = 0;
12103 unsigned int inqueue_bytes = 0;
12104 struct sctp_block_entry be;
12105 struct sctp_inpcb *inp;
12106 struct sctp_tcb *stcb = NULL;
12107 struct timeval now;
12108 struct sctp_nets *net;
12109 struct sctp_association *asoc;
12110 struct sctp_inpcb *t_inp;
12111 int user_marks_eor;
12112 int create_lock_applied = 0;
12113 int nagle_applies = 0;
12114 int some_on_control = 0;
12115 int got_all_of_the_send = 0;
12116 int hold_tcblock = 0;
12117 int non_blocking = 0;
12118 uint32_t local_add_more, local_soresv = 0;
12120 uint16_t sinfo_flags;
12121 sctp_assoc_t sinfo_assoc_id;
12128 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12130 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12133 SCTP_RELEASE_PKT(i_pak);
12137 if ((uio == NULL) && (i_pak == NULL)) {
12138 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12141 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12142 atomic_add_int(&inp->total_sends, 1);
12144 if (uio->uio_resid < 0) {
12145 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12148 sndlen = uio->uio_resid;
12150 top = SCTP_HEADER_TO_CHAIN(i_pak);
12151 sndlen = SCTP_HEADER_LEN(i_pak);
12153 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12156 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12157 (inp->sctp_socket->so_qlimit)) {
12158 /* The listener can NOT send */
12159 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12164 * Pre-screen address, if one is given the sin-len
12165 * must be set correctly!
12168 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12170 switch (raddr->sa.sa_family) {
12173 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12174 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12178 port = raddr->sin.sin_port;
12183 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12184 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12188 port = raddr->sin6.sin6_port;
12192 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12193 error = EAFNOSUPPORT;
12200 sinfo_flags = srcv->sinfo_flags;
12201 sinfo_assoc_id = srcv->sinfo_assoc_id;
12202 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12203 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12204 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12208 if (srcv->sinfo_flags)
12209 SCTP_STAT_INCR(sctps_sends_with_flags);
12211 sinfo_flags = inp->def_send.sinfo_flags;
12212 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12214 if (sinfo_flags & SCTP_SENDALL) {
12215 /* its a sendall */
12216 error = sctp_sendall(inp, uio, top, srcv);
12220 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12221 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12225 /* now we must find the assoc */
12226 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12227 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12228 SCTP_INP_RLOCK(inp);
12229 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12231 SCTP_TCB_LOCK(stcb);
12234 SCTP_INP_RUNLOCK(inp);
12235 } else if (sinfo_assoc_id) {
12236 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12239 * Since we did not use findep we must
12240 * increment it, and if we don't find a tcb
12243 SCTP_INP_WLOCK(inp);
12244 SCTP_INP_INCR_REF(inp);
12245 SCTP_INP_WUNLOCK(inp);
12246 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12247 if (stcb == NULL) {
12248 SCTP_INP_WLOCK(inp);
12249 SCTP_INP_DECR_REF(inp);
12250 SCTP_INP_WUNLOCK(inp);
12255 if ((stcb == NULL) && (addr)) {
12256 /* Possible implicit send? */
12257 SCTP_ASOC_CREATE_LOCK(inp);
12258 create_lock_applied = 1;
12259 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12260 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12261 /* Should I really unlock ? */
12262 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12267 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12268 (addr->sa_family == AF_INET6)) {
12269 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12273 SCTP_INP_WLOCK(inp);
12274 SCTP_INP_INCR_REF(inp);
12275 SCTP_INP_WUNLOCK(inp);
12276 /* With the lock applied look again */
12277 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12278 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12279 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12281 if (stcb == NULL) {
12282 SCTP_INP_WLOCK(inp);
12283 SCTP_INP_DECR_REF(inp);
12284 SCTP_INP_WUNLOCK(inp);
12291 if (t_inp != inp) {
12292 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12297 if (stcb == NULL) {
12298 if (addr == NULL) {
12299 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12303 /* We must go ahead and start the INIT process */
12306 if ((sinfo_flags & SCTP_ABORT) ||
12307 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12309 * User asks to abort a non-existant assoc,
12310 * or EOF a non-existant assoc with no data
12312 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12316 /* get an asoc/stcb struct */
12317 vrf_id = inp->def_vrf_id;
12319 if (create_lock_applied == 0) {
12320 panic("Error, should hold create lock and I don't?");
12323 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12326 if (stcb == NULL) {
12327 /* Error is setup for us in the call */
12330 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12331 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12333 * Set the connected flag so we can queue
12336 soisconnecting(so);
12339 if (create_lock_applied) {
12340 SCTP_ASOC_CREATE_UNLOCK(inp);
12341 create_lock_applied = 0;
12343 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12346 * Turn on queue only flag to prevent data from
12350 asoc = &stcb->asoc;
12351 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12352 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12354 /* initialize authentication params for the assoc */
12355 sctp_initialize_auth_params(inp, stcb);
12358 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12359 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12365 /* out with the INIT */
12366 queue_only_for_init = 1;
12368 * we may want to dig in after this call and adjust the MTU
12369 * value. It defaulted to 1500 (constant) but the ro
12370 * structure may now have an update and thus we may need to
12371 * change it BEFORE we append the message.
12375 asoc = &stcb->asoc;
12377 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12378 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12380 net = sctp_findnet(stcb, addr);
12383 if ((net == NULL) ||
12384 ((port != 0) && (port != stcb->rport))) {
12385 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12390 if (stcb->asoc.alternate) {
12391 net = stcb->asoc.alternate;
12393 net = stcb->asoc.primary_destination;
12396 atomic_add_int(&stcb->total_sends, 1);
12397 /* Keep the stcb from being freed under our feet */
12398 atomic_add_int(&asoc->refcnt, 1);
12399 free_cnt_applied = 1;
12401 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12402 if (sndlen > asoc->smallest_mtu) {
12403 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12408 if (SCTP_SO_IS_NBIO(so)
12409 || (flags & MSG_NBIO)
12413 /* would we block? */
12414 if (non_blocking) {
12415 if (hold_tcblock == 0) {
12416 SCTP_TCB_LOCK(stcb);
12419 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12420 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12421 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12422 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12423 if (sndlen > SCTP_SB_LIMIT_SND(so))
12426 error = EWOULDBLOCK;
12429 stcb->asoc.sb_send_resv += sndlen;
12430 SCTP_TCB_UNLOCK(stcb);
12433 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12435 local_soresv = sndlen;
12436 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12437 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12438 error = ECONNRESET;
12441 if (create_lock_applied) {
12442 SCTP_ASOC_CREATE_UNLOCK(inp);
12443 create_lock_applied = 0;
12445 if (asoc->stream_reset_outstanding) {
12447 * Can't queue any data while stream reset is underway.
12449 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12453 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12454 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12457 /* we are now done with all control */
12459 sctp_m_freem(control);
12462 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12463 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12464 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12465 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12466 if (srcv->sinfo_flags & SCTP_ABORT) {
12469 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12470 error = ECONNRESET;
12474 /* Ok, we will attempt a msgsnd :> */
12476 p->td_ru.ru_msgsnd++;
12478 /* Are we aborting? */
12479 if (srcv->sinfo_flags & SCTP_ABORT) {
12481 int tot_demand, tot_out = 0, max_out;
12483 SCTP_STAT_INCR(sctps_sends_with_abort);
12484 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12485 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12486 /* It has to be up before we abort */
12487 /* how big is the user initiated abort? */
12488 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12492 if (hold_tcblock) {
12493 SCTP_TCB_UNLOCK(stcb);
12497 struct mbuf *cntm = NULL;
12499 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12501 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12502 tot_out += SCTP_BUF_LEN(cntm);
12506 /* Must fit in a MTU */
12508 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12509 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12511 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12515 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
12518 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12522 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12523 max_out -= sizeof(struct sctp_abort_msg);
12524 if (tot_out > max_out) {
12528 struct sctp_paramhdr *ph;
12530 /* now move forward the data pointer */
12531 ph = mtod(mm, struct sctp_paramhdr *);
12532 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12533 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
12535 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12537 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12540 * Here if we can't get his data we
12541 * still abort we just don't get to
12542 * send the users note :-0
12549 SCTP_BUF_NEXT(mm) = top;
12553 if (hold_tcblock == 0) {
12554 SCTP_TCB_LOCK(stcb);
12556 atomic_add_int(&stcb->asoc.refcnt, -1);
12557 free_cnt_applied = 0;
12558 /* release this lock, otherwise we hang on ourselves */
12559 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12560 /* now relock the stcb so everything is sane */
12564 * In this case top is already chained to mm avoid double
12565 * free, since we free it below if top != NULL and driver
12566 * would free it after sending the packet out
12573 /* Calculate the maximum we can send */
12574 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12575 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12576 if (non_blocking) {
12577 /* we already checked for non-blocking above. */
12580 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12585 if (hold_tcblock) {
12586 SCTP_TCB_UNLOCK(stcb);
12589 /* Is the stream no. valid? */
12590 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12591 /* Invalid stream number */
12592 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12596 if (asoc->strmout == NULL) {
12597 /* huh? software error */
12598 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12602 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12603 if ((user_marks_eor == 0) &&
12604 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12605 /* It will NEVER fit */
12606 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12610 if ((uio == NULL) && user_marks_eor) {
12612 * We do not support eeor mode for
12613 * sending with mbuf chains (like sendfile).
12615 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12619 if (user_marks_eor) {
12620 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12623 * For non-eeor the whole message must fit in
12624 * the socket send buffer.
12626 local_add_more = sndlen;
12629 if (non_blocking) {
12630 goto skip_preblock;
12632 if (((max_len <= local_add_more) &&
12633 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12635 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12636 /* No room right now ! */
12637 SOCKBUF_LOCK(&so->so_snd);
12638 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12639 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12640 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12641 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12642 (unsigned int)SCTP_SB_LIMIT_SND(so),
12645 stcb->asoc.stream_queue_cnt,
12646 stcb->asoc.chunks_on_out_queue,
12647 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12649 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
12652 stcb->block_entry = &be;
12653 error = sbwait(&so->so_snd);
12654 stcb->block_entry = NULL;
12655 if (error || so->so_error || be.error) {
12658 error = so->so_error;
12663 SOCKBUF_UNLOCK(&so->so_snd);
12666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12667 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12668 asoc, stcb->asoc.total_output_queue_size);
12670 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12673 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12675 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12676 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12680 SOCKBUF_UNLOCK(&so->so_snd);
12683 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12687 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12688 * case NOTE: uio will be null when top/mbuf is passed
12691 if (srcv->sinfo_flags & SCTP_EOF) {
12692 got_all_of_the_send = 1;
12695 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12701 struct sctp_stream_queue_pending *sp;
12702 struct sctp_stream_out *strm;
12705 SCTP_TCB_SEND_LOCK(stcb);
12706 if ((asoc->stream_locked) &&
12707 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12708 SCTP_TCB_SEND_UNLOCK(stcb);
12709 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12713 SCTP_TCB_SEND_UNLOCK(stcb);
12715 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12716 if (strm->last_msg_incomplete == 0) {
12718 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
12719 if ((sp == NULL) || (error)) {
12722 SCTP_TCB_SEND_LOCK(stcb);
12723 if (sp->msg_is_complete) {
12724 strm->last_msg_incomplete = 0;
12725 asoc->stream_locked = 0;
12728 * Just got locked to this guy in case of an
12731 strm->last_msg_incomplete = 1;
12732 asoc->stream_locked = 1;
12733 asoc->stream_locked_on = srcv->sinfo_stream;
12734 sp->sender_all_done = 0;
12736 sctp_snd_sb_alloc(stcb, sp->length);
12737 atomic_add_int(&asoc->stream_queue_cnt, 1);
12738 if (srcv->sinfo_flags & SCTP_UNORDERED) {
12739 SCTP_STAT_INCR(sctps_sends_with_unord);
12741 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12742 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
12743 SCTP_TCB_SEND_UNLOCK(stcb);
12745 SCTP_TCB_SEND_LOCK(stcb);
12746 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12747 SCTP_TCB_SEND_UNLOCK(stcb);
12749 /* ???? Huh ??? last msg is gone */
12751 panic("Warning: Last msg marked incomplete, yet nothing left?");
12753 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12754 strm->last_msg_incomplete = 0;
12760 while (uio->uio_resid > 0) {
12761 /* How much room do we have? */
12762 struct mbuf *new_tail, *mm;
12764 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12765 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12769 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12770 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12771 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
12774 if (hold_tcblock) {
12775 SCTP_TCB_UNLOCK(stcb);
12778 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
12779 if ((mm == NULL) || error) {
12785 /* Update the mbuf and count */
12786 SCTP_TCB_SEND_LOCK(stcb);
12787 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12789 * we need to get out. Peer probably
12793 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12794 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12795 error = ECONNRESET;
12797 SCTP_TCB_SEND_UNLOCK(stcb);
12800 if (sp->tail_mbuf) {
12801 /* tack it to the end */
12802 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12803 sp->tail_mbuf = new_tail;
12805 /* A stolen mbuf */
12807 sp->tail_mbuf = new_tail;
12809 sctp_snd_sb_alloc(stcb, sndout);
12810 atomic_add_int(&sp->length, sndout);
12813 /* Did we reach EOR? */
12814 if ((uio->uio_resid == 0) &&
12815 ((user_marks_eor == 0) ||
12816 (srcv->sinfo_flags & SCTP_EOF) ||
12817 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12818 sp->msg_is_complete = 1;
12820 sp->msg_is_complete = 0;
12822 SCTP_TCB_SEND_UNLOCK(stcb);
12824 if (uio->uio_resid == 0) {
12829 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12831 * This is ugly but we must assure locking
12834 if (hold_tcblock == 0) {
12835 SCTP_TCB_LOCK(stcb);
12838 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12839 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12840 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12841 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12847 SCTP_TCB_UNLOCK(stcb);
12850 /* wait for space now */
12851 if (non_blocking) {
12852 /* Non-blocking io in place out */
12855 /* What about the INIT, send it maybe */
12856 if (queue_only_for_init) {
12857 if (hold_tcblock == 0) {
12858 SCTP_TCB_LOCK(stcb);
12861 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
12862 /* a collision took us forward? */
12865 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
12866 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12870 if ((net->flight_size > net->cwnd) &&
12871 (asoc->sctp_cmt_on_off == 0)) {
12872 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12874 } else if (asoc->ifp_had_enobuf) {
12875 SCTP_STAT_INCR(sctps_ifnomemqueued);
12876 if (net->flight_size > (2 * net->mtu)) {
12879 asoc->ifp_had_enobuf = 0;
12881 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12882 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12883 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
12884 (stcb->asoc.total_flight > 0) &&
12885 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
12886 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
12889 * Ok, Nagle is set on and we have data outstanding.
12890 * Don't send anything and let SACKs drive out the
12891 * data unless wen have a "full" segment to send.
12893 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12894 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
12896 SCTP_STAT_INCR(sctps_naglequeued);
12899 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12900 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
12901 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
12903 SCTP_STAT_INCR(sctps_naglesent);
12906 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12908 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
12909 nagle_applies, un_sent);
12910 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
12911 stcb->asoc.total_flight,
12912 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
12914 if (queue_only_for_init)
12915 queue_only_for_init = 0;
12916 if ((queue_only == 0) && (nagle_applies == 0)) {
12918 * need to start chunk output
12919 * before blocking.. note that if
12920 * a lock is already applied, then
12921 * the input via the net is happening
12922 * and I don't need to start output :-D
12924 if (hold_tcblock == 0) {
12925 if (SCTP_TCB_TRYLOCK(stcb)) {
12927 sctp_chunk_output(inp,
12929 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12932 sctp_chunk_output(inp,
12934 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12936 if (hold_tcblock == 1) {
12937 SCTP_TCB_UNLOCK(stcb);
12941 SOCKBUF_LOCK(&so->so_snd);
12943 * This is a bit strange, but I think it will
12944 * work. The total_output_queue_size is locked and
12945 * protected by the TCB_LOCK, which we just released.
12946 * There is a race that can occur between releasing it
12947 * above, and me getting the socket lock, where sacks
12948 * come in but we have not put the SB_WAIT on the
12949 * so_snd buffer to get the wakeup. After the LOCK
12950 * is applied the sack_processing will also need to
12951 * LOCK the so->so_snd to do the actual sowwakeup(). So
12952 * once we have the socket buffer lock if we recheck the
12953 * size we KNOW we will get to sleep safely with the
12954 * wakeup flag in place.
12956 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
12957 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
12958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12959 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
12960 asoc, uio->uio_resid);
12963 stcb->block_entry = &be;
12964 error = sbwait(&so->so_snd);
12965 stcb->block_entry = NULL;
12967 if (error || so->so_error || be.error) {
12970 error = so->so_error;
12975 SOCKBUF_UNLOCK(&so->so_snd);
12978 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12979 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12980 asoc, stcb->asoc.total_output_queue_size);
12983 SOCKBUF_UNLOCK(&so->so_snd);
12984 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12988 SCTP_TCB_SEND_LOCK(stcb);
12990 if (sp->msg_is_complete == 0) {
12991 strm->last_msg_incomplete = 1;
12992 asoc->stream_locked = 1;
12993 asoc->stream_locked_on = srcv->sinfo_stream;
12995 sp->sender_all_done = 1;
12996 strm->last_msg_incomplete = 0;
12997 asoc->stream_locked = 0;
13000 SCTP_PRINTF("Huh no sp TSNH?\n");
13001 strm->last_msg_incomplete = 0;
13002 asoc->stream_locked = 0;
13004 SCTP_TCB_SEND_UNLOCK(stcb);
13005 if (uio->uio_resid == 0) {
13006 got_all_of_the_send = 1;
13009 /* We send in a 0, since we do NOT have any locks */
13010 error = sctp_msg_append(stcb, net, top, srcv, 0);
13012 if (srcv->sinfo_flags & SCTP_EOF) {
13014 * This should only happen for Panda for the mbuf
13015 * send case, which does NOT yet support EEOR mode.
13016 * Thus, we can just set this flag to do the proper
13019 got_all_of_the_send = 1;
13027 if ((srcv->sinfo_flags & SCTP_EOF) &&
13028 (got_all_of_the_send == 1)) {
13031 SCTP_STAT_INCR(sctps_sends_with_eof);
13033 if (hold_tcblock == 0) {
13034 SCTP_TCB_LOCK(stcb);
13037 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13038 if (TAILQ_EMPTY(&asoc->send_queue) &&
13039 TAILQ_EMPTY(&asoc->sent_queue) &&
13041 if (asoc->locked_on_sending) {
13044 /* there is nothing queued to send, so I'm done... */
13045 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13046 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13047 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13048 struct sctp_nets *netp;
13050 /* only send SHUTDOWN the first time through */
13051 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13052 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13054 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13055 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13056 sctp_stop_timers_for_shutdown(stcb);
13057 if (stcb->asoc.alternate) {
13058 netp = stcb->asoc.alternate;
13060 netp = stcb->asoc.primary_destination;
13062 sctp_send_shutdown(stcb, netp);
13063 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13065 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13066 asoc->primary_destination);
13070 * we still got (or just got) data to send, so set
13074 * XXX sockets draft says that SCTP_EOF should be
13075 * sent with no data. currently, we will allow user
13076 * data to be sent first and move to
13079 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13080 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13081 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13082 if (hold_tcblock == 0) {
13083 SCTP_TCB_LOCK(stcb);
13086 if (asoc->locked_on_sending) {
13087 /* Locked to send out the data */
13088 struct sctp_stream_queue_pending *sp;
13090 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13092 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13093 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13096 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13097 if (TAILQ_EMPTY(&asoc->send_queue) &&
13098 TAILQ_EMPTY(&asoc->sent_queue) &&
13099 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13101 if (free_cnt_applied) {
13102 atomic_add_int(&stcb->asoc.refcnt, -1);
13103 free_cnt_applied = 0;
13105 sctp_abort_an_association(stcb->sctp_ep, stcb,
13106 NULL, SCTP_SO_LOCKED);
13108 * now relock the stcb so everything
13115 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13116 asoc->primary_destination);
13117 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13122 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13123 some_on_control = 1;
13125 if (queue_only_for_init) {
13126 if (hold_tcblock == 0) {
13127 SCTP_TCB_LOCK(stcb);
13130 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13131 /* a collision took us forward? */
13134 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13135 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13139 if ((net->flight_size > net->cwnd) &&
13140 (stcb->asoc.sctp_cmt_on_off == 0)) {
13141 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13143 } else if (asoc->ifp_had_enobuf) {
13144 SCTP_STAT_INCR(sctps_ifnomemqueued);
13145 if (net->flight_size > (2 * net->mtu)) {
13148 asoc->ifp_had_enobuf = 0;
13150 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13151 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13152 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13153 (stcb->asoc.total_flight > 0) &&
13154 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13155 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13157 * Ok, Nagle is set on and we have data outstanding.
13158 * Don't send anything and let SACKs drive out the
13159 * data unless wen have a "full" segment to send.
13161 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13162 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13164 SCTP_STAT_INCR(sctps_naglequeued);
13167 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13168 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13169 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13171 SCTP_STAT_INCR(sctps_naglesent);
13174 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13175 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13176 nagle_applies, un_sent);
13177 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13178 stcb->asoc.total_flight,
13179 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13181 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13182 /* we can attempt to send too. */
13183 if (hold_tcblock == 0) {
13185 * If there is activity recv'ing sacks no need to
13188 if (SCTP_TCB_TRYLOCK(stcb)) {
13189 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13193 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13195 } else if ((queue_only == 0) &&
13196 (stcb->asoc.peers_rwnd == 0) &&
13197 (stcb->asoc.total_flight == 0)) {
13198 /* We get to have a probe outstanding */
13199 if (hold_tcblock == 0) {
13201 SCTP_TCB_LOCK(stcb);
13203 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13204 } else if (some_on_control) {
13205 int num_out, reason, frag_point;
13207 /* Here we do control only */
13208 if (hold_tcblock == 0) {
13210 SCTP_TCB_LOCK(stcb);
13212 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13213 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13214 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13216 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13217 queue_only, stcb->asoc.peers_rwnd, un_sent,
13218 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13219 stcb->asoc.total_output_queue_size, error);
13224 if (local_soresv && stcb) {
13225 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13227 if (create_lock_applied) {
13228 SCTP_ASOC_CREATE_UNLOCK(inp);
13230 if ((stcb) && hold_tcblock) {
13231 SCTP_TCB_UNLOCK(stcb);
13233 if (stcb && free_cnt_applied) {
13234 atomic_add_int(&stcb->asoc.refcnt, -1);
13238 if (mtx_owned(&stcb->tcb_mtx)) {
13239 panic("Leaving with tcb mtx owned?");
13241 if (mtx_owned(&stcb->tcb_send_mtx)) {
13242 panic("Leaving with tcb send mtx owned?");
13248 sctp_validate_no_locks(inp);
13250 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
13257 sctp_m_freem(control);
13264 * generate an AUTHentication chunk, if required
13267 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13268 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13269 struct sctp_tcb *stcb, uint8_t chunk)
13271 struct mbuf *m_auth;
13272 struct sctp_auth_chunk *auth;
13276 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13280 /* sysctl disabled auth? */
13281 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13284 /* peer doesn't do auth... */
13285 if (!stcb->asoc.peer_supports_auth) {
13288 /* does the requested chunk require auth? */
13289 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13292 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13293 if (m_auth == NULL) {
13297 /* reserve some space if this will be the first mbuf */
13299 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13300 /* fill in the AUTH chunk details */
13301 auth = mtod(m_auth, struct sctp_auth_chunk *);
13302 bzero(auth, sizeof(*auth));
13303 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13304 auth->ch.chunk_flags = 0;
13305 chunk_len = sizeof(*auth) +
13306 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13307 auth->ch.chunk_length = htons(chunk_len);
13308 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13309 /* key id and hmac digest will be computed and filled in upon send */
13311 /* save the offset where the auth was inserted into the chain */
13313 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13314 *offset += SCTP_BUF_LEN(cn);
13317 /* update length and return pointer to the auth chunk */
13318 SCTP_BUF_LEN(m_auth) = chunk_len;
13319 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13320 if (auth_ret != NULL)
13328 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13330 struct nd_prefix *pfx = NULL;
13331 struct nd_pfxrouter *pfxrtr = NULL;
13332 struct sockaddr_in6 gw6;
13334 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13337 /* get prefix entry of address */
13338 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13339 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13341 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13342 &src6->sin6_addr, &pfx->ndpr_mask))
13345 /* no prefix entry in the prefix list */
13347 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13348 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13351 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13352 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13354 /* search installed gateway from prefix entry */
13355 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13356 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13357 gw6.sin6_family = AF_INET6;
13358 gw6.sin6_len = sizeof(struct sockaddr_in6);
13359 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13360 sizeof(struct in6_addr));
13361 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13362 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13363 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13364 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13365 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13366 ro->ro_rt->rt_gateway)) {
13367 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13371 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13378 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13381 struct sockaddr_in *sin, *mask;
13382 struct ifaddr *ifa;
13383 struct in_addr srcnetaddr, gwnetaddr;
13385 if (ro == NULL || ro->ro_rt == NULL ||
13386 sifa->address.sa.sa_family != AF_INET) {
13389 ifa = (struct ifaddr *)sifa->ifa;
13390 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13391 sin = (struct sockaddr_in *)&sifa->address.sin;
13392 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13393 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13394 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13395 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13397 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13398 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13399 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13400 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13401 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13402 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {