2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <netinet/udp_var.h>
55 #include <machine/in_cksum.h>
59 #define SCTP_MAX_GAPS_INARRAY 4
61 uint8_t right_edge; /* mergable on the right edge */
62 uint8_t left_edge; /* mergable on the left edge */
65 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
68 struct sack_track sack_array[256] = {
69 {0, 0, 0, 0, /* 0x00 */
76 {1, 0, 1, 0, /* 0x01 */
83 {0, 0, 1, 0, /* 0x02 */
90 {1, 0, 1, 0, /* 0x03 */
97 {0, 0, 1, 0, /* 0x04 */
104 {1, 0, 2, 0, /* 0x05 */
111 {0, 0, 1, 0, /* 0x06 */
118 {1, 0, 1, 0, /* 0x07 */
125 {0, 0, 1, 0, /* 0x08 */
132 {1, 0, 2, 0, /* 0x09 */
139 {0, 0, 2, 0, /* 0x0a */
146 {1, 0, 2, 0, /* 0x0b */
153 {0, 0, 1, 0, /* 0x0c */
160 {1, 0, 2, 0, /* 0x0d */
167 {0, 0, 1, 0, /* 0x0e */
174 {1, 0, 1, 0, /* 0x0f */
181 {0, 0, 1, 0, /* 0x10 */
188 {1, 0, 2, 0, /* 0x11 */
195 {0, 0, 2, 0, /* 0x12 */
202 {1, 0, 2, 0, /* 0x13 */
209 {0, 0, 2, 0, /* 0x14 */
216 {1, 0, 3, 0, /* 0x15 */
223 {0, 0, 2, 0, /* 0x16 */
230 {1, 0, 2, 0, /* 0x17 */
237 {0, 0, 1, 0, /* 0x18 */
244 {1, 0, 2, 0, /* 0x19 */
251 {0, 0, 2, 0, /* 0x1a */
258 {1, 0, 2, 0, /* 0x1b */
265 {0, 0, 1, 0, /* 0x1c */
272 {1, 0, 2, 0, /* 0x1d */
279 {0, 0, 1, 0, /* 0x1e */
286 {1, 0, 1, 0, /* 0x1f */
293 {0, 0, 1, 0, /* 0x20 */
300 {1, 0, 2, 0, /* 0x21 */
307 {0, 0, 2, 0, /* 0x22 */
314 {1, 0, 2, 0, /* 0x23 */
321 {0, 0, 2, 0, /* 0x24 */
328 {1, 0, 3, 0, /* 0x25 */
335 {0, 0, 2, 0, /* 0x26 */
342 {1, 0, 2, 0, /* 0x27 */
349 {0, 0, 2, 0, /* 0x28 */
356 {1, 0, 3, 0, /* 0x29 */
363 {0, 0, 3, 0, /* 0x2a */
370 {1, 0, 3, 0, /* 0x2b */
377 {0, 0, 2, 0, /* 0x2c */
384 {1, 0, 3, 0, /* 0x2d */
391 {0, 0, 2, 0, /* 0x2e */
398 {1, 0, 2, 0, /* 0x2f */
405 {0, 0, 1, 0, /* 0x30 */
412 {1, 0, 2, 0, /* 0x31 */
419 {0, 0, 2, 0, /* 0x32 */
426 {1, 0, 2, 0, /* 0x33 */
433 {0, 0, 2, 0, /* 0x34 */
440 {1, 0, 3, 0, /* 0x35 */
447 {0, 0, 2, 0, /* 0x36 */
454 {1, 0, 2, 0, /* 0x37 */
461 {0, 0, 1, 0, /* 0x38 */
468 {1, 0, 2, 0, /* 0x39 */
475 {0, 0, 2, 0, /* 0x3a */
482 {1, 0, 2, 0, /* 0x3b */
489 {0, 0, 1, 0, /* 0x3c */
496 {1, 0, 2, 0, /* 0x3d */
503 {0, 0, 1, 0, /* 0x3e */
510 {1, 0, 1, 0, /* 0x3f */
517 {0, 0, 1, 0, /* 0x40 */
524 {1, 0, 2, 0, /* 0x41 */
531 {0, 0, 2, 0, /* 0x42 */
538 {1, 0, 2, 0, /* 0x43 */
545 {0, 0, 2, 0, /* 0x44 */
552 {1, 0, 3, 0, /* 0x45 */
559 {0, 0, 2, 0, /* 0x46 */
566 {1, 0, 2, 0, /* 0x47 */
573 {0, 0, 2, 0, /* 0x48 */
580 {1, 0, 3, 0, /* 0x49 */
587 {0, 0, 3, 0, /* 0x4a */
594 {1, 0, 3, 0, /* 0x4b */
601 {0, 0, 2, 0, /* 0x4c */
608 {1, 0, 3, 0, /* 0x4d */
615 {0, 0, 2, 0, /* 0x4e */
622 {1, 0, 2, 0, /* 0x4f */
629 {0, 0, 2, 0, /* 0x50 */
636 {1, 0, 3, 0, /* 0x51 */
643 {0, 0, 3, 0, /* 0x52 */
650 {1, 0, 3, 0, /* 0x53 */
657 {0, 0, 3, 0, /* 0x54 */
664 {1, 0, 4, 0, /* 0x55 */
671 {0, 0, 3, 0, /* 0x56 */
678 {1, 0, 3, 0, /* 0x57 */
685 {0, 0, 2, 0, /* 0x58 */
692 {1, 0, 3, 0, /* 0x59 */
699 {0, 0, 3, 0, /* 0x5a */
706 {1, 0, 3, 0, /* 0x5b */
713 {0, 0, 2, 0, /* 0x5c */
720 {1, 0, 3, 0, /* 0x5d */
727 {0, 0, 2, 0, /* 0x5e */
734 {1, 0, 2, 0, /* 0x5f */
741 {0, 0, 1, 0, /* 0x60 */
748 {1, 0, 2, 0, /* 0x61 */
755 {0, 0, 2, 0, /* 0x62 */
762 {1, 0, 2, 0, /* 0x63 */
769 {0, 0, 2, 0, /* 0x64 */
776 {1, 0, 3, 0, /* 0x65 */
783 {0, 0, 2, 0, /* 0x66 */
790 {1, 0, 2, 0, /* 0x67 */
797 {0, 0, 2, 0, /* 0x68 */
804 {1, 0, 3, 0, /* 0x69 */
811 {0, 0, 3, 0, /* 0x6a */
818 {1, 0, 3, 0, /* 0x6b */
825 {0, 0, 2, 0, /* 0x6c */
832 {1, 0, 3, 0, /* 0x6d */
839 {0, 0, 2, 0, /* 0x6e */
846 {1, 0, 2, 0, /* 0x6f */
853 {0, 0, 1, 0, /* 0x70 */
860 {1, 0, 2, 0, /* 0x71 */
867 {0, 0, 2, 0, /* 0x72 */
874 {1, 0, 2, 0, /* 0x73 */
881 {0, 0, 2, 0, /* 0x74 */
888 {1, 0, 3, 0, /* 0x75 */
895 {0, 0, 2, 0, /* 0x76 */
902 {1, 0, 2, 0, /* 0x77 */
909 {0, 0, 1, 0, /* 0x78 */
916 {1, 0, 2, 0, /* 0x79 */
923 {0, 0, 2, 0, /* 0x7a */
930 {1, 0, 2, 0, /* 0x7b */
937 {0, 0, 1, 0, /* 0x7c */
944 {1, 0, 2, 0, /* 0x7d */
951 {0, 0, 1, 0, /* 0x7e */
958 {1, 0, 1, 0, /* 0x7f */
965 {0, 1, 1, 0, /* 0x80 */
972 {1, 1, 2, 0, /* 0x81 */
979 {0, 1, 2, 0, /* 0x82 */
986 {1, 1, 2, 0, /* 0x83 */
993 {0, 1, 2, 0, /* 0x84 */
1000 {1, 1, 3, 0, /* 0x85 */
1007 {0, 1, 2, 0, /* 0x86 */
1014 {1, 1, 2, 0, /* 0x87 */
1021 {0, 1, 2, 0, /* 0x88 */
1028 {1, 1, 3, 0, /* 0x89 */
1035 {0, 1, 3, 0, /* 0x8a */
1042 {1, 1, 3, 0, /* 0x8b */
1049 {0, 1, 2, 0, /* 0x8c */
1056 {1, 1, 3, 0, /* 0x8d */
1063 {0, 1, 2, 0, /* 0x8e */
1070 {1, 1, 2, 0, /* 0x8f */
1077 {0, 1, 2, 0, /* 0x90 */
1084 {1, 1, 3, 0, /* 0x91 */
1091 {0, 1, 3, 0, /* 0x92 */
1098 {1, 1, 3, 0, /* 0x93 */
1105 {0, 1, 3, 0, /* 0x94 */
1112 {1, 1, 4, 0, /* 0x95 */
1119 {0, 1, 3, 0, /* 0x96 */
1126 {1, 1, 3, 0, /* 0x97 */
1133 {0, 1, 2, 0, /* 0x98 */
1140 {1, 1, 3, 0, /* 0x99 */
1147 {0, 1, 3, 0, /* 0x9a */
1154 {1, 1, 3, 0, /* 0x9b */
1161 {0, 1, 2, 0, /* 0x9c */
1168 {1, 1, 3, 0, /* 0x9d */
1175 {0, 1, 2, 0, /* 0x9e */
1182 {1, 1, 2, 0, /* 0x9f */
1189 {0, 1, 2, 0, /* 0xa0 */
1196 {1, 1, 3, 0, /* 0xa1 */
1203 {0, 1, 3, 0, /* 0xa2 */
1210 {1, 1, 3, 0, /* 0xa3 */
1217 {0, 1, 3, 0, /* 0xa4 */
1224 {1, 1, 4, 0, /* 0xa5 */
1231 {0, 1, 3, 0, /* 0xa6 */
1238 {1, 1, 3, 0, /* 0xa7 */
1245 {0, 1, 3, 0, /* 0xa8 */
1252 {1, 1, 4, 0, /* 0xa9 */
1259 {0, 1, 4, 0, /* 0xaa */
1266 {1, 1, 4, 0, /* 0xab */
1273 {0, 1, 3, 0, /* 0xac */
1280 {1, 1, 4, 0, /* 0xad */
1287 {0, 1, 3, 0, /* 0xae */
1294 {1, 1, 3, 0, /* 0xaf */
1301 {0, 1, 2, 0, /* 0xb0 */
1308 {1, 1, 3, 0, /* 0xb1 */
1315 {0, 1, 3, 0, /* 0xb2 */
1322 {1, 1, 3, 0, /* 0xb3 */
1329 {0, 1, 3, 0, /* 0xb4 */
1336 {1, 1, 4, 0, /* 0xb5 */
1343 {0, 1, 3, 0, /* 0xb6 */
1350 {1, 1, 3, 0, /* 0xb7 */
1357 {0, 1, 2, 0, /* 0xb8 */
1364 {1, 1, 3, 0, /* 0xb9 */
1371 {0, 1, 3, 0, /* 0xba */
1378 {1, 1, 3, 0, /* 0xbb */
1385 {0, 1, 2, 0, /* 0xbc */
1392 {1, 1, 3, 0, /* 0xbd */
1399 {0, 1, 2, 0, /* 0xbe */
1406 {1, 1, 2, 0, /* 0xbf */
1413 {0, 1, 1, 0, /* 0xc0 */
1420 {1, 1, 2, 0, /* 0xc1 */
1427 {0, 1, 2, 0, /* 0xc2 */
1434 {1, 1, 2, 0, /* 0xc3 */
1441 {0, 1, 2, 0, /* 0xc4 */
1448 {1, 1, 3, 0, /* 0xc5 */
1455 {0, 1, 2, 0, /* 0xc6 */
1462 {1, 1, 2, 0, /* 0xc7 */
1469 {0, 1, 2, 0, /* 0xc8 */
1476 {1, 1, 3, 0, /* 0xc9 */
1483 {0, 1, 3, 0, /* 0xca */
1490 {1, 1, 3, 0, /* 0xcb */
1497 {0, 1, 2, 0, /* 0xcc */
1504 {1, 1, 3, 0, /* 0xcd */
1511 {0, 1, 2, 0, /* 0xce */
1518 {1, 1, 2, 0, /* 0xcf */
1525 {0, 1, 2, 0, /* 0xd0 */
1532 {1, 1, 3, 0, /* 0xd1 */
1539 {0, 1, 3, 0, /* 0xd2 */
1546 {1, 1, 3, 0, /* 0xd3 */
1553 {0, 1, 3, 0, /* 0xd4 */
1560 {1, 1, 4, 0, /* 0xd5 */
1567 {0, 1, 3, 0, /* 0xd6 */
1574 {1, 1, 3, 0, /* 0xd7 */
1581 {0, 1, 2, 0, /* 0xd8 */
1588 {1, 1, 3, 0, /* 0xd9 */
1595 {0, 1, 3, 0, /* 0xda */
1602 {1, 1, 3, 0, /* 0xdb */
1609 {0, 1, 2, 0, /* 0xdc */
1616 {1, 1, 3, 0, /* 0xdd */
1623 {0, 1, 2, 0, /* 0xde */
1630 {1, 1, 2, 0, /* 0xdf */
1637 {0, 1, 1, 0, /* 0xe0 */
1644 {1, 1, 2, 0, /* 0xe1 */
1651 {0, 1, 2, 0, /* 0xe2 */
1658 {1, 1, 2, 0, /* 0xe3 */
1665 {0, 1, 2, 0, /* 0xe4 */
1672 {1, 1, 3, 0, /* 0xe5 */
1679 {0, 1, 2, 0, /* 0xe6 */
1686 {1, 1, 2, 0, /* 0xe7 */
1693 {0, 1, 2, 0, /* 0xe8 */
1700 {1, 1, 3, 0, /* 0xe9 */
1707 {0, 1, 3, 0, /* 0xea */
1714 {1, 1, 3, 0, /* 0xeb */
1721 {0, 1, 2, 0, /* 0xec */
1728 {1, 1, 3, 0, /* 0xed */
1735 {0, 1, 2, 0, /* 0xee */
1742 {1, 1, 2, 0, /* 0xef */
1749 {0, 1, 1, 0, /* 0xf0 */
1756 {1, 1, 2, 0, /* 0xf1 */
1763 {0, 1, 2, 0, /* 0xf2 */
1770 {1, 1, 2, 0, /* 0xf3 */
1777 {0, 1, 2, 0, /* 0xf4 */
1784 {1, 1, 3, 0, /* 0xf5 */
1791 {0, 1, 2, 0, /* 0xf6 */
1798 {1, 1, 2, 0, /* 0xf7 */
1805 {0, 1, 1, 0, /* 0xf8 */
1812 {1, 1, 2, 0, /* 0xf9 */
1819 {0, 1, 2, 0, /* 0xfa */
1826 {1, 1, 2, 0, /* 0xfb */
1833 {0, 1, 1, 0, /* 0xfc */
1840 {1, 1, 2, 0, /* 0xfd */
1847 {0, 1, 1, 0, /* 0xfe */
1854 {1, 1, 1, 0, /* 0xff */
1865 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1866 struct sctp_scoping *scope,
1869 if ((scope->loopback_scope == 0) &&
1870 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1872 * skip loopback if not in scope *
1876 switch (ifa->address.sa.sa_family) {
1879 if (scope->ipv4_addr_legal) {
1880 struct sockaddr_in *sin;
1882 sin = (struct sockaddr_in *)&ifa->address.sin;
1883 if (sin->sin_addr.s_addr == 0) {
1884 /* not in scope , unspecified */
1887 if ((scope->ipv4_local_scope == 0) &&
1888 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1889 /* private address not in scope */
1899 if (scope->ipv6_addr_legal) {
1900 struct sockaddr_in6 *sin6;
1903 * Must update the flags, bummer, which means any
1904 * IFA locks must now be applied HERE <->
1907 sctp_gather_internal_ifa_flags(ifa);
1909 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1912 /* ok to use deprecated addresses? */
1913 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1914 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1915 /* skip unspecifed addresses */
1918 if ( /* (local_scope == 0) && */
1919 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1922 if ((scope->site_scope == 0) &&
1923 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1937 static struct mbuf *
1938 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
1940 struct sctp_paramhdr *parmh;
1944 switch (ifa->address.sa.sa_family) {
1947 plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
1952 plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
1958 if (M_TRAILINGSPACE(m) >= plen) {
1959 /* easy side we just drop it on the end */
1960 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1963 /* Need more space */
1965 while (SCTP_BUF_NEXT(mret) != NULL) {
1966 mret = SCTP_BUF_NEXT(mret);
1968 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1969 if (SCTP_BUF_NEXT(mret) == NULL) {
1970 /* We are hosed, can't add more addresses */
1973 mret = SCTP_BUF_NEXT(mret);
1974 parmh = mtod(mret, struct sctp_paramhdr *);
1976 /* now add the parameter */
1977 switch (ifa->address.sa.sa_family) {
1981 struct sctp_ipv4addr_param *ipv4p;
1982 struct sockaddr_in *sin;
1984 sin = (struct sockaddr_in *)&ifa->address.sin;
1985 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1986 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1987 parmh->param_length = htons(plen);
1988 ipv4p->addr = sin->sin_addr.s_addr;
1989 SCTP_BUF_LEN(mret) += plen;
1996 struct sctp_ipv6addr_param *ipv6p;
1997 struct sockaddr_in6 *sin6;
1999 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2000 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2001 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2002 parmh->param_length = htons(plen);
2003 memcpy(ipv6p->addr, &sin6->sin6_addr,
2004 sizeof(ipv6p->addr));
2005 /* clear embedded scope in the address */
2006 in6_clearscope((struct in6_addr *)ipv6p->addr);
2007 SCTP_BUF_LEN(mret) += plen;
2022 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2023 struct sctp_scoping *scope,
2024 struct mbuf *m_at, int cnt_inits_to,
2025 uint16_t * padding_len, uint16_t * chunk_len)
2027 struct sctp_vrf *vrf = NULL;
2028 int cnt, limit_out = 0, total_count;
2031 vrf_id = inp->def_vrf_id;
2032 SCTP_IPI_ADDR_RLOCK();
2033 vrf = sctp_find_vrf(vrf_id);
2035 SCTP_IPI_ADDR_RUNLOCK();
2038 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2039 struct sctp_ifa *sctp_ifap;
2040 struct sctp_ifn *sctp_ifnp;
2043 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2045 cnt = SCTP_ADDRESS_LIMIT;
2048 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2049 if ((scope->loopback_scope == 0) &&
2050 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2052 * Skip loopback devices if loopback_scope
2057 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2058 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2061 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2065 if (cnt > SCTP_ADDRESS_LIMIT) {
2069 if (cnt > SCTP_ADDRESS_LIMIT) {
2076 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2078 if ((scope->loopback_scope == 0) &&
2079 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2081 * Skip loopback devices if
2082 * loopback_scope not set
2086 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2087 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2090 if (sctp_is_address_in_scope(sctp_ifap,
2094 if ((chunk_len != NULL) &&
2095 (padding_len != NULL) &&
2096 (*padding_len > 0)) {
2097 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2098 SCTP_BUF_LEN(m_at) += *padding_len;
2099 *chunk_len += *padding_len;
2102 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2113 if (total_count > SCTP_ADDRESS_LIMIT) {
2114 /* No more addresses */
2122 struct sctp_laddr *laddr;
2125 /* First, how many ? */
2126 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2127 if (laddr->ifa == NULL) {
2130 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2132 * Address being deleted by the system, dont
2136 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2138 * Address being deleted on this ep don't
2143 if (sctp_is_address_in_scope(laddr->ifa,
2150 * To get through a NAT we only list addresses if we have
2151 * more than one. That way if you just bind a single address
2152 * we let the source of the init dictate our address.
2156 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2157 if (laddr->ifa == NULL) {
2160 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2163 if (sctp_is_address_in_scope(laddr->ifa,
2167 if ((chunk_len != NULL) &&
2168 (padding_len != NULL) &&
2169 (*padding_len > 0)) {
2170 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2171 SCTP_BUF_LEN(m_at) += *padding_len;
2172 *chunk_len += *padding_len;
2175 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2177 if (cnt >= SCTP_ADDRESS_LIMIT) {
2183 SCTP_IPI_ADDR_RUNLOCK();
2187 static struct sctp_ifa *
2188 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2189 uint8_t dest_is_loop,
2190 uint8_t dest_is_priv,
2193 uint8_t dest_is_global = 0;
2195 /* dest_is_priv is true if destination is a private address */
2196 /* dest_is_loop is true if destination is a loopback addresses */
2199 * Here we determine if its a preferred address. A preferred address
2200 * means it is the same scope or higher scope then the destination.
2201 * L = loopback, P = private, G = global
2202 * -----------------------------------------
2203 * src | dest | result
2204 * ----------------------------------------
2206 * -----------------------------------------
2207 * P | L | yes-v4 no-v6
2208 * -----------------------------------------
2209 * G | L | yes-v4 no-v6
2210 * -----------------------------------------
2212 * -----------------------------------------
2214 * -----------------------------------------
2216 * -----------------------------------------
2218 * -----------------------------------------
2220 * -----------------------------------------
2222 * -----------------------------------------
2225 if (ifa->address.sa.sa_family != fam) {
2226 /* forget mis-matched family */
2229 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2232 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2233 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2234 /* Ok the address may be ok */
2236 if (fam == AF_INET6) {
2237 /* ok to use deprecated addresses? no lets not! */
2238 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2239 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2242 if (ifa->src_is_priv && !ifa->src_is_loop) {
2244 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2248 if (ifa->src_is_glob) {
2250 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2257 * Now that we know what is what, implement or table this could in
2258 * theory be done slicker (it used to be), but this is
2259 * straightforward and easier to validate :-)
2261 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2262 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2263 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2264 dest_is_loop, dest_is_priv, dest_is_global);
2266 if ((ifa->src_is_loop) && (dest_is_priv)) {
2267 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2270 if ((ifa->src_is_glob) && (dest_is_priv)) {
2271 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2274 if ((ifa->src_is_loop) && (dest_is_global)) {
2275 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2278 if ((ifa->src_is_priv) && (dest_is_global)) {
2279 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2282 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2283 /* its a preferred address */
2287 static struct sctp_ifa *
2288 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2289 uint8_t dest_is_loop,
2290 uint8_t dest_is_priv,
2293 uint8_t dest_is_global = 0;
2296 * Here we determine if its a acceptable address. A acceptable
2297 * address means it is the same scope or higher scope but we can
2298 * allow for NAT which means its ok to have a global dest and a
2301 * L = loopback, P = private, G = global
2302 * -----------------------------------------
2303 * src | dest | result
2304 * -----------------------------------------
2306 * -----------------------------------------
2307 * P | L | yes-v4 no-v6
2308 * -----------------------------------------
2310 * -----------------------------------------
2312 * -----------------------------------------
2314 * -----------------------------------------
2315 * G | P | yes - May not work
2316 * -----------------------------------------
2318 * -----------------------------------------
2319 * P | G | yes - May not work
2320 * -----------------------------------------
2322 * -----------------------------------------
2325 if (ifa->address.sa.sa_family != fam) {
2326 /* forget non matching family */
2327 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2328 ifa->address.sa.sa_family, fam);
2331 /* Ok the address may be ok */
2332 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2334 dest_is_loop, dest_is_priv);
2335 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2339 if (fam == AF_INET6) {
2340 /* ok to use deprecated addresses? */
2341 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2344 if (ifa->src_is_priv) {
2345 /* Special case, linklocal to loop */
2352 * Now that we know what is what, implement our table. This could in
2353 * theory be done slicker (it used to be), but this is
2354 * straightforward and easier to validate :-)
2356 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2359 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2362 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2365 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2368 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2369 /* its an acceptable address */
2374 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2376 struct sctp_laddr *laddr;
2379 /* There are no restrictions, no TCB :-) */
2382 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2383 if (laddr->ifa == NULL) {
2384 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2388 if (laddr->ifa == ifa) {
2389 /* Yes it is on the list */
2398 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2400 struct sctp_laddr *laddr;
2404 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2405 if (laddr->ifa == NULL) {
2406 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2410 if ((laddr->ifa == ifa) && laddr->action == 0)
2419 static struct sctp_ifa *
2420 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2423 int non_asoc_addr_ok,
2424 uint8_t dest_is_priv,
2425 uint8_t dest_is_loop,
2428 struct sctp_laddr *laddr, *starting_point;
2431 struct sctp_ifn *sctp_ifn;
2432 struct sctp_ifa *sctp_ifa, *sifa;
2433 struct sctp_vrf *vrf;
2436 vrf = sctp_find_vrf(vrf_id);
2440 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2441 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2442 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2444 * first question, is the ifn we will emit on in our list, if so, we
2445 * want such an address. Note that we first looked for a preferred
2449 /* is a preferred one on the interface we route out? */
2450 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2451 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2452 (non_asoc_addr_ok == 0))
2454 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2459 if (sctp_is_addr_in_ep(inp, sifa)) {
2460 atomic_add_int(&sifa->refcount, 1);
2466 * ok, now we now need to find one on the list of the addresses. We
2467 * can't get one on the emitting interface so let's find first a
2468 * preferred one. If not that an acceptable one otherwise... we
2471 starting_point = inp->next_addr_touse;
2473 if (inp->next_addr_touse == NULL) {
2474 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2477 for (laddr = inp->next_addr_touse; laddr;
2478 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2479 if (laddr->ifa == NULL) {
2480 /* address has been removed */
2483 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2484 /* address is being deleted */
2487 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2491 atomic_add_int(&sifa->refcount, 1);
2494 if (resettotop == 0) {
2495 inp->next_addr_touse = NULL;
2498 inp->next_addr_touse = starting_point;
2501 if (inp->next_addr_touse == NULL) {
2502 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2505 /* ok, what about an acceptable address in the inp */
2506 for (laddr = inp->next_addr_touse; laddr;
2507 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2508 if (laddr->ifa == NULL) {
2509 /* address has been removed */
2512 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2513 /* address is being deleted */
2516 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2520 atomic_add_int(&sifa->refcount, 1);
2523 if (resettotop == 0) {
2524 inp->next_addr_touse = NULL;
2525 goto once_again_too;
2528 * no address bound can be a source for the destination we are in
2536 static struct sctp_ifa *
2537 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2538 struct sctp_tcb *stcb,
2541 uint8_t dest_is_priv,
2542 uint8_t dest_is_loop,
2543 int non_asoc_addr_ok,
2546 struct sctp_laddr *laddr, *starting_point;
2548 struct sctp_ifn *sctp_ifn;
2549 struct sctp_ifa *sctp_ifa, *sifa;
2550 uint8_t start_at_beginning = 0;
2551 struct sctp_vrf *vrf;
2555 * first question, is the ifn we will emit on in our list, if so, we
2558 vrf = sctp_find_vrf(vrf_id);
2562 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2563 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2564 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2567 * first question, is the ifn we will emit on in our list? If so,
2568 * we want that one. First we look for a preferred. Second, we go
2569 * for an acceptable.
2572 /* first try for a preferred address on the ep */
2573 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2574 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2576 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2577 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2580 if (((non_asoc_addr_ok == 0) &&
2581 (sctp_is_addr_restricted(stcb, sifa))) ||
2582 (non_asoc_addr_ok &&
2583 (sctp_is_addr_restricted(stcb, sifa)) &&
2584 (!sctp_is_addr_pending(stcb, sifa)))) {
2585 /* on the no-no list */
2588 atomic_add_int(&sifa->refcount, 1);
2592 /* next try for an acceptable address on the ep */
2593 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2594 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2596 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2597 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2600 if (((non_asoc_addr_ok == 0) &&
2601 (sctp_is_addr_restricted(stcb, sifa))) ||
2602 (non_asoc_addr_ok &&
2603 (sctp_is_addr_restricted(stcb, sifa)) &&
2604 (!sctp_is_addr_pending(stcb, sifa)))) {
2605 /* on the no-no list */
2608 atomic_add_int(&sifa->refcount, 1);
2615 * if we can't find one like that then we must look at all addresses
2616 * bound to pick one at first preferable then secondly acceptable.
2618 starting_point = stcb->asoc.last_used_address;
2620 if (stcb->asoc.last_used_address == NULL) {
2621 start_at_beginning = 1;
2622 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2624 /* search beginning with the last used address */
2625 for (laddr = stcb->asoc.last_used_address; laddr;
2626 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2627 if (laddr->ifa == NULL) {
2628 /* address has been removed */
2631 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2632 /* address is being deleted */
2635 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2638 if (((non_asoc_addr_ok == 0) &&
2639 (sctp_is_addr_restricted(stcb, sifa))) ||
2640 (non_asoc_addr_ok &&
2641 (sctp_is_addr_restricted(stcb, sifa)) &&
2642 (!sctp_is_addr_pending(stcb, sifa)))) {
2643 /* on the no-no list */
2646 stcb->asoc.last_used_address = laddr;
2647 atomic_add_int(&sifa->refcount, 1);
2650 if (start_at_beginning == 0) {
2651 stcb->asoc.last_used_address = NULL;
2652 goto sctp_from_the_top;
2654 /* now try for any higher scope than the destination */
2655 stcb->asoc.last_used_address = starting_point;
2656 start_at_beginning = 0;
2658 if (stcb->asoc.last_used_address == NULL) {
2659 start_at_beginning = 1;
2660 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2662 /* search beginning with the last used address */
2663 for (laddr = stcb->asoc.last_used_address; laddr;
2664 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2665 if (laddr->ifa == NULL) {
2666 /* address has been removed */
2669 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2670 /* address is being deleted */
2673 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2677 if (((non_asoc_addr_ok == 0) &&
2678 (sctp_is_addr_restricted(stcb, sifa))) ||
2679 (non_asoc_addr_ok &&
2680 (sctp_is_addr_restricted(stcb, sifa)) &&
2681 (!sctp_is_addr_pending(stcb, sifa)))) {
2682 /* on the no-no list */
2685 stcb->asoc.last_used_address = laddr;
2686 atomic_add_int(&sifa->refcount, 1);
2689 if (start_at_beginning == 0) {
2690 stcb->asoc.last_used_address = NULL;
2691 goto sctp_from_the_top2;
2696 static struct sctp_ifa *
2697 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2698 struct sctp_tcb *stcb,
2699 int non_asoc_addr_ok,
2700 uint8_t dest_is_loop,
2701 uint8_t dest_is_priv,
2707 struct sctp_ifa *ifa, *sifa;
2708 int num_eligible_addr = 0;
2711 struct sockaddr_in6 sin6, lsa6;
2713 if (fam == AF_INET6) {
2714 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2715 (void)sa6_recoverscope(&sin6);
2718 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2719 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2720 (non_asoc_addr_ok == 0))
2722 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2727 if (fam == AF_INET6 &&
2729 sifa->src_is_loop && sifa->src_is_priv) {
2731 * don't allow fe80::1 to be a src on loop ::1, we
2732 * don't list it to the peer so we will get an
2737 if (fam == AF_INET6 &&
2738 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2739 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2741 * link-local <-> link-local must belong to the same
2744 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2745 (void)sa6_recoverscope(&lsa6);
2746 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2753 * Check if the IPv6 address matches to next-hop. In the
2754 * mobile case, old IPv6 address may be not deleted from the
2755 * interface. Then, the interface has previous and new
2756 * addresses. We should use one corresponding to the
2757 * next-hop. (by micchie)
2760 if (stcb && fam == AF_INET6 &&
2761 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2762 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2769 /* Avoid topologically incorrect IPv4 address */
2770 if (stcb && fam == AF_INET &&
2771 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2772 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2778 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2781 if (((non_asoc_addr_ok == 0) &&
2782 (sctp_is_addr_restricted(stcb, sifa))) ||
2783 (non_asoc_addr_ok &&
2784 (sctp_is_addr_restricted(stcb, sifa)) &&
2785 (!sctp_is_addr_pending(stcb, sifa)))) {
2787 * It is restricted for some reason..
2788 * probably not yet added.
2793 if (num_eligible_addr >= addr_wanted) {
2796 num_eligible_addr++;
2803 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2804 struct sctp_tcb *stcb,
2805 int non_asoc_addr_ok,
2806 uint8_t dest_is_loop,
2807 uint8_t dest_is_priv,
2810 struct sctp_ifa *ifa, *sifa;
2811 int num_eligible_addr = 0;
2813 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2814 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2815 (non_asoc_addr_ok == 0)) {
2818 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2824 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2827 if (((non_asoc_addr_ok == 0) &&
2828 (sctp_is_addr_restricted(stcb, sifa))) ||
2829 (non_asoc_addr_ok &&
2830 (sctp_is_addr_restricted(stcb, sifa)) &&
2831 (!sctp_is_addr_pending(stcb, sifa)))) {
2833 * It is restricted for some reason..
2834 * probably not yet added.
2839 num_eligible_addr++;
2841 return (num_eligible_addr);
2844 static struct sctp_ifa *
2845 sctp_choose_boundall(struct sctp_tcb *stcb,
2846 struct sctp_nets *net,
2849 uint8_t dest_is_priv,
2850 uint8_t dest_is_loop,
2851 int non_asoc_addr_ok,
2854 int cur_addr_num = 0, num_preferred = 0;
2856 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2857 struct sctp_ifa *sctp_ifa, *sifa;
2859 struct sctp_vrf *vrf;
2867 * For boundall we can use any address in the association.
2868 * If non_asoc_addr_ok is set we can use any address (at least in
2869 * theory). So we look for preferred addresses first. If we find one,
2870 * we use it. Otherwise we next try to get an address on the
2871 * interface, which we should be able to do (unless non_asoc_addr_ok
2872 * is false and we are routed out that way). In these cases where we
2873 * can't use the address of the interface we go through all the
2874 * ifn's looking for an address we can use and fill that in. Punting
2875 * means we send back address 0, which will probably cause problems
2876 * actually since then IP will fill in the address of the route ifn,
2877 * which means we probably already rejected it.. i.e. here comes an
2880 vrf = sctp_find_vrf(vrf_id);
2884 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2885 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2886 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2887 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2888 if (sctp_ifn == NULL) {
2889 /* ?? We don't have this guy ?? */
2890 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2891 goto bound_all_plan_b;
2893 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2894 ifn_index, sctp_ifn->ifn_name);
2897 cur_addr_num = net->indx_of_eligible_next_to_use;
2899 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2904 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2905 num_preferred, sctp_ifn->ifn_name);
2906 if (num_preferred == 0) {
2908 * no eligible addresses, we must use some other interface
2909 * address if we can find one.
2911 goto bound_all_plan_b;
2914 * Ok we have num_eligible_addr set with how many we can use, this
2915 * may vary from call to call due to addresses being deprecated
2918 if (cur_addr_num >= num_preferred) {
2922 * select the nth address from the list (where cur_addr_num is the
2923 * nth) and 0 is the first one, 1 is the second one etc...
2925 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2927 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2928 dest_is_priv, cur_addr_num, fam, ro);
2930 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2932 atomic_add_int(&sctp_ifa->refcount, 1);
2934 /* save off where the next one we will want */
2935 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2940 * plan_b: Look at all interfaces and find a preferred address. If
2941 * no preferred fall through to plan_c.
2944 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2945 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2946 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2947 sctp_ifn->ifn_name);
2948 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2949 /* wrong base scope */
2950 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2953 if ((sctp_ifn == looked_at) && looked_at) {
2954 /* already looked at this guy */
2955 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2958 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2959 dest_is_loop, dest_is_priv, fam);
2960 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2961 "Found ifn:%p %d preferred source addresses\n",
2962 ifn, num_preferred);
2963 if (num_preferred == 0) {
2964 /* None on this interface. */
2965 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2968 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2969 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2970 num_preferred, (void *)sctp_ifn, cur_addr_num);
2973 * Ok we have num_eligible_addr set with how many we can
2974 * use, this may vary from call to call due to addresses
2975 * being deprecated etc..
2977 if (cur_addr_num >= num_preferred) {
2980 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2981 dest_is_priv, cur_addr_num, fam, ro);
2985 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2986 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
2988 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
2989 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
2990 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
2991 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
2993 atomic_add_int(&sifa->refcount, 1);
2997 again_with_private_addresses_allowed:
2999 /* plan_c: do we have an acceptable address on the emit interface */
3001 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3002 if (emit_ifn == NULL) {
3003 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3006 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3007 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3008 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3009 (non_asoc_addr_ok == 0)) {
3010 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3013 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3016 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3020 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3021 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3025 if (((non_asoc_addr_ok == 0) &&
3026 (sctp_is_addr_restricted(stcb, sifa))) ||
3027 (non_asoc_addr_ok &&
3028 (sctp_is_addr_restricted(stcb, sifa)) &&
3029 (!sctp_is_addr_pending(stcb, sifa)))) {
3031 * It is restricted for some reason..
3032 * probably not yet added.
3034 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3039 SCTP_PRINTF("Stcb is null - no print\n");
3041 atomic_add_int(&sifa->refcount, 1);
3046 * plan_d: We are in trouble. No preferred address on the emit
3047 * interface. And not even a preferred address on all interfaces. Go
3048 * out and see if we can find an acceptable address somewhere
3049 * amongst all interfaces.
3051 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3052 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3053 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3054 /* wrong base scope */
3057 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3058 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3059 (non_asoc_addr_ok == 0))
3061 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3067 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3071 if (((non_asoc_addr_ok == 0) &&
3072 (sctp_is_addr_restricted(stcb, sifa))) ||
3073 (non_asoc_addr_ok &&
3074 (sctp_is_addr_restricted(stcb, sifa)) &&
3075 (!sctp_is_addr_pending(stcb, sifa)))) {
3077 * It is restricted for some
3078 * reason.. probably not yet added.
3088 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3089 stcb->asoc.scope.ipv4_local_scope = 1;
3091 goto again_with_private_addresses_allowed;
3092 } else if (retried == 1) {
3093 stcb->asoc.scope.ipv4_local_scope = 0;
3100 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3101 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3102 /* wrong base scope */
3105 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3106 struct sctp_ifa *tmp_sifa;
3108 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3109 (non_asoc_addr_ok == 0))
3111 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3114 if (tmp_sifa == NULL) {
3117 if (tmp_sifa == sifa) {
3121 if (sctp_is_address_in_scope(tmp_sifa,
3122 &stcb->asoc.scope, 0) == 0) {
3125 if (((non_asoc_addr_ok == 0) &&
3126 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3127 (non_asoc_addr_ok &&
3128 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3129 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3139 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3140 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3141 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3146 atomic_add_int(&sifa->refcount, 1);
3154 /* tcb may be NULL */
3156 sctp_source_address_selection(struct sctp_inpcb *inp,
3157 struct sctp_tcb *stcb,
3159 struct sctp_nets *net,
3160 int non_asoc_addr_ok, uint32_t vrf_id)
3162 struct sctp_ifa *answer;
3163 uint8_t dest_is_priv, dest_is_loop;
3167 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3171 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3176 * Rules: - Find the route if needed, cache if I can. - Look at
3177 * interface address in route, Is it in the bound list. If so we
3178 * have the best source. - If not we must rotate amongst the
3183 * Do we need to pay attention to scope. We can have a private address
3184 * or a global address we are sourcing or sending to. So if we draw
3186 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3188 * ------------------------------------------
3189 * source * dest * result
3190 * -----------------------------------------
3191 * <a> Private * Global * NAT
3192 * -----------------------------------------
3193 * <b> Private * Private * No problem
3194 * -----------------------------------------
3195 * <c> Global * Private * Huh, How will this work?
3196 * -----------------------------------------
3197 * <d> Global * Global * No Problem
3198 *------------------------------------------
3199 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3201 *------------------------------------------
3202 * source * dest * result
3203 * -----------------------------------------
3204 * <a> Linklocal * Global *
3205 * -----------------------------------------
3206 * <b> Linklocal * Linklocal * No problem
3207 * -----------------------------------------
3208 * <c> Global * Linklocal * Huh, How will this work?
3209 * -----------------------------------------
3210 * <d> Global * Global * No Problem
3211 *------------------------------------------
3212 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3214 * And then we add to that what happens if there are multiple addresses
3215 * assigned to an interface. Remember the ifa on a ifn is a linked
3216 * list of addresses. So one interface can have more than one IP
3217 * address. What happens if we have both a private and a global
3218 * address? Do we then use context of destination to sort out which
3219 * one is best? And what about NAT's sending P->G may get you a NAT
3220 * translation, or should you select the G thats on the interface in
3225 * - count the number of addresses on the interface.
3226 * - if it is one, no problem except case <c>.
3227 * For <a> we will assume a NAT out there.
3228 * - if there are more than one, then we need to worry about scope P
3229 * or G. We should prefer G -> G and P -> P if possible.
3230 * Then as a secondary fall back to mixed types G->P being a last
3232 * - The above all works for bound all, but bound specific we need to
3233 * use the same concept but instead only consider the bound
3234 * addresses. If the bound set is NOT assigned to the interface then
3235 * we must use rotation amongst the bound addresses..
3237 if (ro->ro_rt == NULL) {
3239 * Need a route to cache.
3241 SCTP_RTALLOC(ro, vrf_id);
3243 if (ro->ro_rt == NULL) {
3246 fam = ro->ro_dst.sa_family;
3247 dest_is_priv = dest_is_loop = 0;
3248 /* Setup our scopes for the destination */
3252 /* Scope based on outbound address */
3253 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3256 /* mark it as local */
3257 net->addr_is_local = 1;
3259 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3266 /* Scope based on outbound address */
3267 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3268 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3270 * If the address is a loopback address, which
3271 * consists of "::1" OR "fe80::1%lo0", we are
3272 * loopback scope. But we don't use dest_is_priv
3273 * (link local addresses).
3277 /* mark it as local */
3278 net->addr_is_local = 1;
3280 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3286 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3287 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3288 SCTP_IPI_ADDR_RLOCK();
3289 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3293 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3294 dest_is_priv, dest_is_loop,
3295 non_asoc_addr_ok, fam);
3296 SCTP_IPI_ADDR_RUNLOCK();
3303 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3304 vrf_id, dest_is_priv,
3306 non_asoc_addr_ok, fam);
3308 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3313 SCTP_IPI_ADDR_RUNLOCK();
3318 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3321 int tlen, at, found;
3322 struct sctp_sndinfo sndinfo;
3323 struct sctp_prinfo prinfo;
3324 struct sctp_authinfo authinfo;
3326 tlen = SCTP_BUF_LEN(control);
3330 * Independent of how many mbufs, find the c_type inside the control
3331 * structure and copy out the data.
3334 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3335 /* There is not enough room for one more. */
3338 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3339 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3340 /* We dont't have a complete CMSG header. */
3343 if (((int)cmh.cmsg_len + at) > tlen) {
3344 /* We don't have the complete CMSG. */
3347 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3348 ((c_type == cmh.cmsg_type) ||
3349 ((c_type == SCTP_SNDRCV) &&
3350 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3351 (cmh.cmsg_type == SCTP_PRINFO) ||
3352 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3353 if (c_type == cmh.cmsg_type) {
3354 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3357 /* It is exactly what we want. Copy it out. */
3358 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3361 struct sctp_sndrcvinfo *sndrcvinfo;
3363 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3365 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3368 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3370 switch (cmh.cmsg_type) {
3372 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3375 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3376 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3377 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3378 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3379 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3380 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3383 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3386 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3387 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3388 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3391 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3394 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3395 sndrcvinfo->sinfo_keynumber_valid = 1;
3396 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3404 at += CMSG_ALIGN(cmh.cmsg_len);
3410 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3414 struct sctp_initmsg initmsg;
3417 struct sockaddr_in sin;
3421 struct sockaddr_in6 sin6;
3425 tlen = SCTP_BUF_LEN(control);
3428 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3429 /* There is not enough room for one more. */
3433 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3434 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3435 /* We dont't have a complete CMSG header. */
3439 if (((int)cmh.cmsg_len + at) > tlen) {
3440 /* We don't have the complete CMSG. */
3444 if (cmh.cmsg_level == IPPROTO_SCTP) {
3445 switch (cmh.cmsg_type) {
3447 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3451 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3452 if (initmsg.sinit_max_attempts)
3453 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3454 if (initmsg.sinit_num_ostreams)
3455 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3456 if (initmsg.sinit_max_instreams)
3457 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3458 if (initmsg.sinit_max_init_timeo)
3459 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3460 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3461 struct sctp_stream_out *tmp_str;
3464 /* Default is NOT correct */
3465 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3466 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3467 SCTP_TCB_UNLOCK(stcb);
3468 SCTP_MALLOC(tmp_str,
3469 struct sctp_stream_out *,
3470 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3472 SCTP_TCB_LOCK(stcb);
3473 if (tmp_str != NULL) {
3474 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3475 stcb->asoc.strmout = tmp_str;
3476 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3478 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3480 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3481 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3482 stcb->asoc.strmout[i].chunks_on_queues = 0;
3483 stcb->asoc.strmout[i].next_sequence_send = 0;
3484 stcb->asoc.strmout[i].stream_no = i;
3485 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3486 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3491 case SCTP_DSTADDRV4:
3492 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3496 memset(&sin, 0, sizeof(struct sockaddr_in));
3497 sin.sin_family = AF_INET;
3498 sin.sin_len = sizeof(struct sockaddr_in);
3499 sin.sin_port = stcb->rport;
3500 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3501 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3502 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3503 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3507 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3508 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3515 case SCTP_DSTADDRV6:
3516 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3520 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3521 sin6.sin6_family = AF_INET6;
3522 sin6.sin6_len = sizeof(struct sockaddr_in6);
3523 sin6.sin6_port = stcb->rport;
3524 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3525 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3526 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3531 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3532 in6_sin6_2_sin(&sin, &sin6);
3533 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3534 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3535 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3539 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3540 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3546 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3547 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3557 at += CMSG_ALIGN(cmh.cmsg_len);
3562 static struct sctp_tcb *
3563 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3565 struct mbuf *control,
3566 struct sctp_nets **net_p,
3571 struct sctp_tcb *stcb;
3572 struct sockaddr *addr;
3575 struct sockaddr_in sin;
3579 struct sockaddr_in6 sin6;
3583 tlen = SCTP_BUF_LEN(control);
3586 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3587 /* There is not enough room for one more. */
3591 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3592 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3593 /* We dont't have a complete CMSG header. */
3597 if (((int)cmh.cmsg_len + at) > tlen) {
3598 /* We don't have the complete CMSG. */
3602 if (cmh.cmsg_level == IPPROTO_SCTP) {
3603 switch (cmh.cmsg_type) {
3605 case SCTP_DSTADDRV4:
3606 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3610 memset(&sin, 0, sizeof(struct sockaddr_in));
3611 sin.sin_family = AF_INET;
3612 sin.sin_len = sizeof(struct sockaddr_in);
3613 sin.sin_port = port;
3614 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3615 addr = (struct sockaddr *)&sin;
3619 case SCTP_DSTADDRV6:
3620 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3624 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3625 sin6.sin6_family = AF_INET6;
3626 sin6.sin6_len = sizeof(struct sockaddr_in6);
3627 sin6.sin6_port = port;
3628 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3630 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3631 in6_sin6_2_sin(&sin, &sin6);
3632 addr = (struct sockaddr *)&sin;
3635 addr = (struct sockaddr *)&sin6;
3643 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3649 at += CMSG_ALIGN(cmh.cmsg_len);
3654 static struct mbuf *
3655 sctp_add_cookie(struct mbuf *init, int init_offset,
3656 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3658 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3659 struct sctp_state_cookie *stc;
3660 struct sctp_paramhdr *ph;
3666 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3667 sizeof(struct sctp_paramhdr)), 0,
3668 M_NOWAIT, 1, MT_DATA);
3672 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3673 if (copy_init == NULL) {
3677 #ifdef SCTP_MBUF_LOGGING
3678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3681 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3682 if (SCTP_BUF_IS_EXTENDED(mat)) {
3683 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3688 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3690 if (copy_initack == NULL) {
3692 sctp_m_freem(copy_init);
3695 #ifdef SCTP_MBUF_LOGGING
3696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3699 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3700 if (SCTP_BUF_IS_EXTENDED(mat)) {
3701 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3706 /* easy side we just drop it on the end */
3707 ph = mtod(mret, struct sctp_paramhdr *);
3708 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3709 sizeof(struct sctp_paramhdr);
3710 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3711 sizeof(struct sctp_paramhdr));
3712 ph->param_type = htons(SCTP_STATE_COOKIE);
3713 ph->param_length = 0; /* fill in at the end */
3714 /* Fill in the stc cookie data */
3715 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3717 /* tack the INIT and then the INIT-ACK onto the chain */
3719 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3720 cookie_sz += SCTP_BUF_LEN(m_at);
3721 if (SCTP_BUF_NEXT(m_at) == NULL) {
3722 SCTP_BUF_NEXT(m_at) = copy_init;
3726 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3727 cookie_sz += SCTP_BUF_LEN(m_at);
3728 if (SCTP_BUF_NEXT(m_at) == NULL) {
3729 SCTP_BUF_NEXT(m_at) = copy_initack;
3733 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3734 cookie_sz += SCTP_BUF_LEN(m_at);
3735 if (SCTP_BUF_NEXT(m_at) == NULL) {
3739 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3741 /* no space, so free the entire chain */
3745 SCTP_BUF_LEN(sig) = 0;
3746 SCTP_BUF_NEXT(m_at) = sig;
3748 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3749 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3751 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3752 cookie_sz += SCTP_SIGNATURE_SIZE;
3753 ph->param_length = htons(cookie_sz);
3759 sctp_get_ect(struct sctp_tcb *stcb)
3761 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3762 return (SCTP_ECT0_BIT);
3768 #if defined(INET) || defined(INET6)
3770 sctp_handle_no_route(struct sctp_tcb *stcb,
3771 struct sctp_nets *net,
3774 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3777 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3778 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3779 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3780 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3781 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3782 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3786 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3787 net->dest_state &= ~SCTP_ADDR_PF;
3791 if (net == stcb->asoc.primary_destination) {
3792 /* need a new primary */
3793 struct sctp_nets *alt;
3795 alt = sctp_find_alternate_net(stcb, net, 0);
3797 if (stcb->asoc.alternate) {
3798 sctp_free_remote_addr(stcb->asoc.alternate);
3800 stcb->asoc.alternate = alt;
3801 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3802 if (net->ro._s_addr) {
3803 sctp_free_ifa(net->ro._s_addr);
3804 net->ro._s_addr = NULL;
3806 net->src_addr_selected = 0;
3816 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3817 struct sctp_tcb *stcb, /* may be NULL */
3818 struct sctp_nets *net,
3819 struct sockaddr *to,
3821 uint32_t auth_offset,
3822 struct sctp_auth_chunk *auth,
3823 uint16_t auth_keyid,
3824 int nofragment_flag,
3831 union sctp_sockstore *over_addr,
3832 uint8_t use_mflowid, uint32_t mflowid,
3833 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3834 int so_locked SCTP_UNUSED
3839 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3842 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3843 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3844 * - fill in the HMAC digest of any AUTH chunk in the packet.
3845 * - calculate and fill in the SCTP checksum.
3846 * - prepend an IP address header.
3847 * - if boundall use INADDR_ANY.
3848 * - if boundspecific do source address selection.
3849 * - set fragmentation option for ipV4.
3850 * - On return from IP output, check/adjust mtu size of output
3851 * interface and smallest_mtu size as well.
3853 /* Will need ifdefs around this */
3855 struct sctphdr *sctphdr;
3860 #if defined(INET) || defined(INET6)
3862 sctp_route_t *ro = NULL;
3863 struct udphdr *udp = NULL;
3868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3869 struct socket *so = NULL;
3873 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3874 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3879 vrf_id = stcb->asoc.vrf_id;
3881 vrf_id = inp->def_vrf_id;
3884 /* fill in the HMAC digest for any AUTH chunk in the packet */
3885 if ((auth != NULL) && (stcb != NULL)) {
3886 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3889 tos_value = net->dscp;
3891 tos_value = stcb->asoc.default_dscp;
3893 tos_value = inp->sctp_ep.default_dscp;
3896 switch (to->sa_family) {
3900 struct ip *ip = NULL;
3901 sctp_route_t iproute;
3904 len = sizeof(struct ip) + sizeof(struct sctphdr);
3906 len += sizeof(struct udphdr);
3908 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
3911 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3914 SCTP_ALIGN_TO_END(newm, len);
3915 SCTP_BUF_LEN(newm) = len;
3916 SCTP_BUF_NEXT(newm) = m;
3920 if (net->flowidset == 0) {
3921 panic("Flow ID not set");
3924 m->m_pkthdr.flowid = net->flowid;
3925 m->m_flags |= M_FLOWID;
3927 if (use_mflowid != 0) {
3928 m->m_pkthdr.flowid = mflowid;
3929 m->m_flags |= M_FLOWID;
3932 packet_length = sctp_calculate_len(m);
3933 ip = mtod(m, struct ip *);
3934 ip->ip_v = IPVERSION;
3935 ip->ip_hl = (sizeof(struct ip) >> 2);
3936 if (tos_value == 0) {
3938 * This means especially, that it is not set
3939 * at the SCTP layer. So use the value from
3942 tos_value = inp->ip_inp.inp.inp_ip_tos;
3946 tos_value |= sctp_get_ect(stcb);
3948 if ((nofragment_flag) && (port == 0)) {
3949 ip->ip_off = htons(IP_DF);
3951 ip->ip_off = htons(0);
3953 /* FreeBSD has a function for ip_id's */
3954 ip->ip_id = ip_newid();
3956 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3957 ip->ip_len = htons(packet_length);
3958 ip->ip_tos = tos_value;
3960 ip->ip_p = IPPROTO_UDP;
3962 ip->ip_p = IPPROTO_SCTP;
3967 memset(&iproute, 0, sizeof(iproute));
3968 memcpy(&ro->ro_dst, to, to->sa_len);
3970 ro = (sctp_route_t *) & net->ro;
3972 /* Now the address selection part */
3973 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3975 /* call the routine to select the src address */
3976 if (net && out_of_asoc_ok == 0) {
3977 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3978 sctp_free_ifa(net->ro._s_addr);
3979 net->ro._s_addr = NULL;
3980 net->src_addr_selected = 0;
3986 if (net->src_addr_selected == 0) {
3987 /* Cache the source address */
3988 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
3991 net->src_addr_selected = 1;
3993 if (net->ro._s_addr == NULL) {
3994 /* No route to host */
3995 net->src_addr_selected = 0;
3996 sctp_handle_no_route(stcb, net, so_locked);
3997 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
3999 return (EHOSTUNREACH);
4001 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4003 if (over_addr == NULL) {
4004 struct sctp_ifa *_lsrc;
4006 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4010 if (_lsrc == NULL) {
4011 sctp_handle_no_route(stcb, net, so_locked);
4012 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4014 return (EHOSTUNREACH);
4016 ip->ip_src = _lsrc->address.sin.sin_addr;
4017 sctp_free_ifa(_lsrc);
4019 ip->ip_src = over_addr->sin.sin_addr;
4020 SCTP_RTALLOC(ro, vrf_id);
4024 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4025 sctp_handle_no_route(stcb, net, so_locked);
4026 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4028 return (EHOSTUNREACH);
4030 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4031 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4032 udp->uh_dport = port;
4033 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4035 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4039 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4041 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4044 sctphdr->src_port = src_port;
4045 sctphdr->dest_port = dest_port;
4046 sctphdr->v_tag = v_tag;
4047 sctphdr->checksum = 0;
4050 * If source address selection fails and we find no
4051 * route then the ip_output should fail as well with
4052 * a NO_ROUTE_TO_HOST type error. We probably should
4053 * catch that somewhere and abort the association
4054 * right away (assuming this is an INIT being sent).
4056 if (ro->ro_rt == NULL) {
4058 * src addr selection failed to find a route
4059 * (or valid source addr), so we can't get
4060 * there from here (yet)!
4062 sctp_handle_no_route(stcb, net, so_locked);
4063 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4065 return (EHOSTUNREACH);
4067 if (ro != &iproute) {
4068 memcpy(&iproute, ro, sizeof(*ro));
4070 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4071 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4072 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4073 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4074 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4077 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4078 /* failed to prepend data, give up */
4079 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4083 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4085 #if defined(SCTP_WITH_NO_CSUM)
4086 SCTP_STAT_INCR(sctps_sendnocrc);
4088 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4089 SCTP_STAT_INCR(sctps_sendswcrc);
4092 SCTP_ENABLE_UDP_CSUM(o_pak);
4095 #if defined(SCTP_WITH_NO_CSUM)
4096 SCTP_STAT_INCR(sctps_sendnocrc);
4098 m->m_pkthdr.csum_flags = CSUM_SCTP;
4099 m->m_pkthdr.csum_data = 0;
4100 SCTP_STAT_INCR(sctps_sendhwcrc);
4103 #ifdef SCTP_PACKET_LOGGING
4104 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4105 sctp_packet_log(o_pak);
4107 /* send it out. table id is taken from stcb */
4108 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4109 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4110 so = SCTP_INP_SO(inp);
4111 SCTP_SOCKET_UNLOCK(so, 0);
4114 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4115 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4116 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4117 atomic_add_int(&stcb->asoc.refcnt, 1);
4118 SCTP_TCB_UNLOCK(stcb);
4119 SCTP_SOCKET_LOCK(so, 0);
4120 SCTP_TCB_LOCK(stcb);
4121 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4124 SCTP_STAT_INCR(sctps_sendpackets);
4125 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4127 SCTP_STAT_INCR(sctps_senderrors);
4129 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4131 /* free tempy routes */
4135 * PMTU check versus smallest asoc MTU goes
4138 if ((ro->ro_rt != NULL) &&
4139 (net->ro._s_addr)) {
4142 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4144 mtu -= sizeof(struct udphdr);
4146 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4147 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4150 } else if (ro->ro_rt == NULL) {
4151 /* route was freed */
4152 if (net->ro._s_addr &&
4153 net->src_addr_selected) {
4154 sctp_free_ifa(net->ro._s_addr);
4155 net->ro._s_addr = NULL;
4157 net->src_addr_selected = 0;
4166 uint32_t flowlabel, flowinfo;
4167 struct ip6_hdr *ip6h;
4168 struct route_in6 ip6route;
4170 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4172 struct sockaddr_in6 lsa6_storage;
4174 u_short prev_port = 0;
4178 flowlabel = net->flowlabel;
4180 flowlabel = stcb->asoc.default_flowlabel;
4182 flowlabel = inp->sctp_ep.default_flowlabel;
4184 if (flowlabel == 0) {
4186 * This means especially, that it is not set
4187 * at the SCTP layer. So use the value from
4190 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4192 flowlabel &= 0x000fffff;
4193 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4195 len += sizeof(struct udphdr);
4197 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4200 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4203 SCTP_ALIGN_TO_END(newm, len);
4204 SCTP_BUF_LEN(newm) = len;
4205 SCTP_BUF_NEXT(newm) = m;
4209 if (net->flowidset == 0) {
4210 panic("Flow ID not set");
4213 m->m_pkthdr.flowid = net->flowid;
4214 m->m_flags |= M_FLOWID;
4216 if (use_mflowid != 0) {
4217 m->m_pkthdr.flowid = mflowid;
4218 m->m_flags |= M_FLOWID;
4221 packet_length = sctp_calculate_len(m);
4223 ip6h = mtod(m, struct ip6_hdr *);
4224 /* protect *sin6 from overwrite */
4225 sin6 = (struct sockaddr_in6 *)to;
4229 /* KAME hack: embed scopeid */
4230 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4231 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4235 memset(&ip6route, 0, sizeof(ip6route));
4236 ro = (sctp_route_t *) & ip6route;
4237 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4239 ro = (sctp_route_t *) & net->ro;
4242 * We assume here that inp_flow is in host byte
4243 * order within the TCB!
4245 if (tos_value == 0) {
4247 * This means especially, that it is not set
4248 * at the SCTP layer. So use the value from
4251 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4255 tos_value |= sctp_get_ect(stcb);
4259 flowinfo |= tos_value;
4261 flowinfo |= flowlabel;
4262 ip6h->ip6_flow = htonl(flowinfo);
4264 ip6h->ip6_nxt = IPPROTO_UDP;
4266 ip6h->ip6_nxt = IPPROTO_SCTP;
4268 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4269 ip6h->ip6_dst = sin6->sin6_addr;
4272 * Add SRC address selection here: we can only reuse
4273 * to a limited degree the kame src-addr-sel, since
4274 * we can try their selection but it may not be
4277 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4278 lsa6_tmp.sin6_family = AF_INET6;
4279 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4281 if (net && out_of_asoc_ok == 0) {
4282 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4283 sctp_free_ifa(net->ro._s_addr);
4284 net->ro._s_addr = NULL;
4285 net->src_addr_selected = 0;
4291 if (net->src_addr_selected == 0) {
4292 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4293 /* KAME hack: embed scopeid */
4294 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4295 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4298 /* Cache the source address */
4299 net->ro._s_addr = sctp_source_address_selection(inp,
4305 (void)sa6_recoverscope(sin6);
4306 net->src_addr_selected = 1;
4308 if (net->ro._s_addr == NULL) {
4309 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4310 net->src_addr_selected = 0;
4311 sctp_handle_no_route(stcb, net, so_locked);
4312 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4314 return (EHOSTUNREACH);
4316 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4318 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4319 /* KAME hack: embed scopeid */
4320 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4321 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4324 if (over_addr == NULL) {
4325 struct sctp_ifa *_lsrc;
4327 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4331 if (_lsrc == NULL) {
4332 sctp_handle_no_route(stcb, net, so_locked);
4333 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4335 return (EHOSTUNREACH);
4337 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4338 sctp_free_ifa(_lsrc);
4340 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4341 SCTP_RTALLOC(ro, vrf_id);
4343 (void)sa6_recoverscope(sin6);
4345 lsa6->sin6_port = inp->sctp_lport;
4347 if (ro->ro_rt == NULL) {
4349 * src addr selection failed to find a route
4350 * (or valid source addr), so we can't get
4353 sctp_handle_no_route(stcb, net, so_locked);
4354 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4356 return (EHOSTUNREACH);
4359 * XXX: sa6 may not have a valid sin6_scope_id in
4360 * the non-SCOPEDROUTING case.
4362 bzero(&lsa6_storage, sizeof(lsa6_storage));
4363 lsa6_storage.sin6_family = AF_INET6;
4364 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4365 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4366 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4372 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4373 lsa6_storage.sin6_port = inp->sctp_lport;
4374 lsa6 = &lsa6_storage;
4375 ip6h->ip6_src = lsa6->sin6_addr;
4378 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4379 sctp_handle_no_route(stcb, net, so_locked);
4380 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4382 return (EHOSTUNREACH);
4384 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4385 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4386 udp->uh_dport = port;
4387 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4389 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4391 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4394 sctphdr->src_port = src_port;
4395 sctphdr->dest_port = dest_port;
4396 sctphdr->v_tag = v_tag;
4397 sctphdr->checksum = 0;
4400 * We set the hop limit now since there is a good
4401 * chance that our ro pointer is now filled
4403 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4404 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4407 /* Copy to be sure something bad is not happening */
4408 sin6->sin6_addr = ip6h->ip6_dst;
4409 lsa6->sin6_addr = ip6h->ip6_src;
4412 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4413 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4414 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4415 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4416 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4418 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4420 * preserve the port and scope for link
4423 prev_scope = sin6->sin6_scope_id;
4424 prev_port = sin6->sin6_port;
4426 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4427 /* failed to prepend data, give up */
4429 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4432 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4434 #if defined(SCTP_WITH_NO_CSUM)
4435 SCTP_STAT_INCR(sctps_sendnocrc);
4437 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4438 SCTP_STAT_INCR(sctps_sendswcrc);
4440 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4441 udp->uh_sum = 0xffff;
4444 #if defined(SCTP_WITH_NO_CSUM)
4445 SCTP_STAT_INCR(sctps_sendnocrc);
4447 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4448 m->m_pkthdr.csum_data = 0;
4449 SCTP_STAT_INCR(sctps_sendhwcrc);
4452 /* send it out. table id is taken from stcb */
4453 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4454 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4455 so = SCTP_INP_SO(inp);
4456 SCTP_SOCKET_UNLOCK(so, 0);
4459 #ifdef SCTP_PACKET_LOGGING
4460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4461 sctp_packet_log(o_pak);
4463 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4464 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4465 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4466 atomic_add_int(&stcb->asoc.refcnt, 1);
4467 SCTP_TCB_UNLOCK(stcb);
4468 SCTP_SOCKET_LOCK(so, 0);
4469 SCTP_TCB_LOCK(stcb);
4470 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4474 /* for link local this must be done */
4475 sin6->sin6_scope_id = prev_scope;
4476 sin6->sin6_port = prev_port;
4478 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4479 SCTP_STAT_INCR(sctps_sendpackets);
4480 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4482 SCTP_STAT_INCR(sctps_senderrors);
4485 /* Now if we had a temp route free it */
4489 * PMTU check versus smallest asoc MTU goes
4492 if (ro->ro_rt == NULL) {
4493 /* Route was freed */
4494 if (net->ro._s_addr &&
4495 net->src_addr_selected) {
4496 sctp_free_ifa(net->ro._s_addr);
4497 net->ro._s_addr = NULL;
4499 net->src_addr_selected = 0;
4501 if ((ro->ro_rt != NULL) &&
4502 (net->ro._s_addr)) {
4505 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4507 (stcb->asoc.smallest_mtu > mtu)) {
4508 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4511 net->mtu -= sizeof(struct udphdr);
4515 if (ND_IFINFO(ifp)->linkmtu &&
4516 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4517 sctp_mtu_size_reset(inp,
4519 ND_IFINFO(ifp)->linkmtu);
4527 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4528 ((struct sockaddr *)to)->sa_family);
4530 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4537 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4538 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4544 struct sctp_nets *net;
4545 struct sctp_init_chunk *init;
4546 struct sctp_supported_addr_param *sup_addr;
4547 struct sctp_adaptation_layer_indication *ali;
4548 struct sctp_supported_chunk_types_param *pr_supported;
4549 struct sctp_paramhdr *ph;
4550 int cnt_inits_to = 0;
4552 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4554 /* INIT's always go to the primary (and usually ONLY address) */
4555 net = stcb->asoc.primary_destination;
4557 net = TAILQ_FIRST(&stcb->asoc.nets);
4562 /* we confirm any address we send an INIT to */
4563 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4564 (void)sctp_set_primary_addr(stcb, NULL, net);
4566 /* we confirm any address we send an INIT to */
4567 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4569 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4571 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4573 * special hook, if we are sending to link local it will not
4574 * show up in our private address count.
4576 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4580 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4581 /* This case should not happen */
4582 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4585 /* start the INIT timer */
4586 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4588 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4590 /* No memory, INIT timer will re-attempt. */
4591 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4594 chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
4597 * assume peer supports asconf in order to be able to queue local
4598 * address changes while an INIT is in flight and before the assoc
4601 stcb->asoc.peer_supports_asconf = 1;
4602 /* Now lets put the chunk header in place */
4603 init = mtod(m, struct sctp_init_chunk *);
4604 /* now the chunk header */
4605 init->ch.chunk_type = SCTP_INITIATION;
4606 init->ch.chunk_flags = 0;
4607 /* fill in later from mbuf we build */
4608 init->ch.chunk_length = 0;
4609 /* place in my tag */
4610 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4611 /* set up some of the credits. */
4612 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4613 SCTP_MINIMAL_RWND));
4614 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4615 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4616 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4618 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4621 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4622 if (stcb->asoc.scope.ipv4_addr_legal) {
4623 parameter_len += (uint16_t) sizeof(uint16_t);
4625 if (stcb->asoc.scope.ipv6_addr_legal) {
4626 parameter_len += (uint16_t) sizeof(uint16_t);
4628 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4629 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4630 sup_addr->ph.param_length = htons(parameter_len);
4632 if (stcb->asoc.scope.ipv4_addr_legal) {
4633 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4635 if (stcb->asoc.scope.ipv6_addr_legal) {
4636 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4638 padding_len = 4 - 2 * i;
4639 chunk_len += parameter_len;
4641 /* Adaptation layer indication parameter */
4642 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4643 if (padding_len > 0) {
4644 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4645 chunk_len += padding_len;
4648 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
4649 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4650 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4651 ali->ph.param_length = htons(parameter_len);
4652 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4653 chunk_len += parameter_len;
4655 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4656 /* Add NAT friendly parameter. */
4657 if (padding_len > 0) {
4658 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4659 chunk_len += padding_len;
4662 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4663 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4664 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4665 ph->param_length = htons(parameter_len);
4666 chunk_len += parameter_len;
4668 /* now any cookie time extensions */
4669 if (stcb->asoc.cookie_preserve_req) {
4670 struct sctp_cookie_perserve_param *cookie_preserve;
4672 if (padding_len > 0) {
4673 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4674 chunk_len += padding_len;
4677 parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
4678 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4679 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4680 cookie_preserve->ph.param_length = htons(parameter_len);
4681 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4682 stcb->asoc.cookie_preserve_req = 0;
4683 chunk_len += parameter_len;
4686 if (stcb->asoc.ecn_allowed == 1) {
4687 if (padding_len > 0) {
4688 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4689 chunk_len += padding_len;
4692 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4693 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4694 ph->param_type = htons(SCTP_ECN_CAPABLE);
4695 ph->param_length = htons(parameter_len);
4696 chunk_len += parameter_len;
4698 /* And now tell the peer we do support PR-SCTP. */
4699 if (padding_len > 0) {
4700 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4701 chunk_len += padding_len;
4704 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4705 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4706 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4707 ph->param_length = htons(parameter_len);
4708 chunk_len += parameter_len;
4710 /* And now tell the peer we do all the extensions */
4711 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4712 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4714 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4715 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4716 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4717 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4718 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4719 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4720 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4722 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4723 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4725 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4726 pr_supported->ph.param_length = htons(parameter_len);
4727 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4728 chunk_len += parameter_len;
4730 /* add authentication parameters */
4731 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4732 /* attach RANDOM parameter, if available */
4733 if (stcb->asoc.authinfo.random != NULL) {
4734 struct sctp_auth_random *randp;
4736 if (padding_len > 0) {
4737 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4738 chunk_len += padding_len;
4741 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4742 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4743 /* random key already contains the header */
4744 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4745 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4746 chunk_len += parameter_len;
4748 /* add HMAC_ALGO parameter */
4749 if ((stcb->asoc.local_hmacs != NULL) &&
4750 (stcb->asoc.local_hmacs->num_algo > 0)) {
4751 struct sctp_auth_hmac_algo *hmacs;
4753 if (padding_len > 0) {
4754 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4755 chunk_len += padding_len;
4758 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4759 parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
4760 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4761 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4762 hmacs->ph.param_length = htons(parameter_len);
4763 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
4764 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4765 chunk_len += parameter_len;
4767 /* add CHUNKS parameter */
4768 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
4769 struct sctp_auth_chunk_list *chunks;
4771 if (padding_len > 0) {
4772 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4773 chunk_len += padding_len;
4776 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4777 parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
4778 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4779 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4780 chunks->ph.param_length = htons(parameter_len);
4781 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4782 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4783 chunk_len += parameter_len;
4786 SCTP_BUF_LEN(m) = chunk_len;
4788 /* now the addresses */
4790 * To optimize this we could put the scoping stuff into a structure
4791 * and remove the individual uint8's from the assoc structure. Then
4792 * we could just sifa in the address within the stcb. But for now
4793 * this is a quick hack to get the address stuff teased apart.
4795 sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len);
4797 init->ch.chunk_length = htons(chunk_len);
4798 if (padding_len > 0) {
4799 struct mbuf *m_at, *mp_last;
4802 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4803 if (SCTP_BUF_NEXT(m_at) == NULL)
4806 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
4811 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4812 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4813 (struct sockaddr *)&net->ro._l_addr,
4814 m, 0, NULL, 0, 0, 0, 0,
4815 inp->sctp_lport, stcb->rport, htonl(0),
4819 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4820 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4821 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4825 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4826 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4829 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4830 * being equal to the beginning of the params i.e. (iphlen +
4831 * sizeof(struct sctp_init_msg) parse through the parameters to the
4832 * end of the mbuf verifying that all parameters are known.
4834 * For unknown parameters build and return a mbuf with
4835 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4836 * processing this chunk stop, and set *abort_processing to 1.
4838 * By having param_offset be pre-set to where parameters begin it is
4839 * hoped that this routine may be reused in the future by new
4842 struct sctp_paramhdr *phdr, params;
4844 struct mbuf *mat, *op_err;
4845 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4846 int at, limit, pad_needed;
4847 uint16_t ptype, plen, padded_size;
4850 *abort_processing = 0;
4853 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4856 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4857 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4858 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4859 ptype = ntohs(phdr->param_type);
4860 plen = ntohs(phdr->param_length);
4861 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4862 /* wacked parameter */
4863 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4866 limit -= SCTP_SIZE32(plen);
4868 * All parameters for all chunks that we know/understand are
4869 * listed here. We process them other places and make
4870 * appropriate stop actions per the upper bits. However this
4871 * is the generic routine processor's can call to get back
4872 * an operr.. to either incorporate (init-ack) or send.
4874 padded_size = SCTP_SIZE32(plen);
4876 /* Param's with variable size */
4877 case SCTP_HEARTBEAT_INFO:
4878 case SCTP_STATE_COOKIE:
4879 case SCTP_UNRECOG_PARAM:
4880 case SCTP_ERROR_CAUSE_IND:
4884 /* Param's with variable size within a range */
4885 case SCTP_CHUNK_LIST:
4886 case SCTP_SUPPORTED_CHUNK_EXT:
4887 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4888 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4893 case SCTP_SUPPORTED_ADDRTYPE:
4894 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4895 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4901 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4902 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4907 case SCTP_SET_PRIM_ADDR:
4908 case SCTP_DEL_IP_ADDRESS:
4909 case SCTP_ADD_IP_ADDRESS:
4910 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4911 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4912 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4917 /* Param's with a fixed size */
4918 case SCTP_IPV4_ADDRESS:
4919 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4920 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4925 case SCTP_IPV6_ADDRESS:
4926 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4927 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4932 case SCTP_COOKIE_PRESERVE:
4933 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4934 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4939 case SCTP_HAS_NAT_SUPPORT:
4942 case SCTP_PRSCTP_SUPPORTED:
4944 if (padded_size != sizeof(struct sctp_paramhdr)) {
4945 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4950 case SCTP_ECN_CAPABLE:
4951 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4952 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4957 case SCTP_ULP_ADAPTATION:
4958 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4959 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4964 case SCTP_SUCCESS_REPORT:
4965 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4966 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4971 case SCTP_HOSTNAME_ADDRESS:
4973 /* We can NOT handle HOST NAME addresses!! */
4976 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
4977 *abort_processing = 1;
4978 if (op_err == NULL) {
4979 /* Ok need to try to get a mbuf */
4981 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4983 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4986 l_len += sizeof(struct sctp_paramhdr);
4987 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
4989 SCTP_BUF_LEN(op_err) = 0;
4991 * pre-reserve space for ip
4992 * and sctp header and
4996 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4998 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5000 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5001 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5005 /* If we have space */
5006 struct sctp_paramhdr s;
5009 uint32_t cpthis = 0;
5011 pad_needed = 4 - (err_at % 4);
5012 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5013 err_at += pad_needed;
5015 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5016 s.param_length = htons(sizeof(s) + plen);
5017 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5018 err_at += sizeof(s);
5019 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5021 sctp_m_freem(op_err);
5023 * we are out of memory but
5024 * we still need to have a
5025 * look at what to do (the
5026 * system is in trouble
5031 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5038 * we do not recognize the parameter figure out what
5041 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5042 if ((ptype & 0x4000) == 0x4000) {
5043 /* Report bit is set?? */
5044 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5045 if (op_err == NULL) {
5048 /* Ok need to try to get an mbuf */
5050 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5052 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5055 l_len += sizeof(struct sctp_paramhdr);
5056 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5058 SCTP_BUF_LEN(op_err) = 0;
5060 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5062 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5064 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5065 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5069 /* If we have space */
5070 struct sctp_paramhdr s;
5073 uint32_t cpthis = 0;
5075 pad_needed = 4 - (err_at % 4);
5076 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5077 err_at += pad_needed;
5079 s.param_type = htons(SCTP_UNRECOG_PARAM);
5080 s.param_length = htons(sizeof(s) + plen);
5081 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5082 err_at += sizeof(s);
5083 if (plen > sizeof(tempbuf)) {
5084 plen = sizeof(tempbuf);
5086 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5088 sctp_m_freem(op_err);
5090 * we are out of memory but
5091 * we still need to have a
5092 * look at what to do (the
5093 * system is in trouble
5097 goto more_processing;
5099 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5104 if ((ptype & 0x8000) == 0x0000) {
5105 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5108 /* skip this chunk and continue processing */
5109 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5110 at += SCTP_SIZE32(plen);
5115 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5119 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5120 *abort_processing = 1;
5121 if ((op_err == NULL) && phdr) {
5125 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5127 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5129 l_len += (2 * sizeof(struct sctp_paramhdr));
5130 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5132 SCTP_BUF_LEN(op_err) = 0;
5134 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5136 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5138 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5139 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5142 if ((op_err) && phdr) {
5143 struct sctp_paramhdr s;
5146 uint32_t cpthis = 0;
5148 pad_needed = 4 - (err_at % 4);
5149 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5150 err_at += pad_needed;
5152 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5153 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5154 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5155 err_at += sizeof(s);
5156 /* Only copy back the p-hdr that caused the issue */
5157 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5163 sctp_are_there_new_addresses(struct sctp_association *asoc,
5164 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5167 * Given a INIT packet, look through the packet to verify that there
5168 * are NO new addresses. As we go through the parameters add reports
5169 * of any un-understood parameters that require an error. Also we
5170 * must return (1) to drop the packet if we see a un-understood
5171 * parameter that tells us to drop the chunk.
5173 struct sockaddr *sa_touse;
5174 struct sockaddr *sa;
5175 struct sctp_paramhdr *phdr, params;
5176 uint16_t ptype, plen;
5178 struct sctp_nets *net;
5181 struct sockaddr_in sin4, *sa4;
5185 struct sockaddr_in6 sin6, *sa6;
5190 memset(&sin4, 0, sizeof(sin4));
5191 sin4.sin_family = AF_INET;
5192 sin4.sin_len = sizeof(sin4);
5195 memset(&sin6, 0, sizeof(sin6));
5196 sin6.sin6_family = AF_INET6;
5197 sin6.sin6_len = sizeof(sin6);
5199 /* First what about the src address of the pkt ? */
5201 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5202 sa = (struct sockaddr *)&net->ro._l_addr;
5203 if (sa->sa_family == src->sa_family) {
5205 if (sa->sa_family == AF_INET) {
5206 struct sockaddr_in *src4;
5208 sa4 = (struct sockaddr_in *)sa;
5209 src4 = (struct sockaddr_in *)src;
5210 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5217 if (sa->sa_family == AF_INET6) {
5218 struct sockaddr_in6 *src6;
5220 sa6 = (struct sockaddr_in6 *)sa;
5221 src6 = (struct sockaddr_in6 *)src;
5222 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5231 /* New address added! no need to look futher. */
5234 /* Ok so far lets munge through the rest of the packet */
5235 offset += sizeof(struct sctp_init_chunk);
5236 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5239 ptype = ntohs(phdr->param_type);
5240 plen = ntohs(phdr->param_length);
5243 case SCTP_IPV4_ADDRESS:
5245 struct sctp_ipv4addr_param *p4, p4_buf;
5247 phdr = sctp_get_next_param(in_initpkt, offset,
5248 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5249 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5253 p4 = (struct sctp_ipv4addr_param *)phdr;
5254 sin4.sin_addr.s_addr = p4->addr;
5255 sa_touse = (struct sockaddr *)&sin4;
5260 case SCTP_IPV6_ADDRESS:
5262 struct sctp_ipv6addr_param *p6, p6_buf;
5264 phdr = sctp_get_next_param(in_initpkt, offset,
5265 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5266 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5270 p6 = (struct sctp_ipv6addr_param *)phdr;
5271 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5273 sa_touse = (struct sockaddr *)&sin6;
5282 /* ok, sa_touse points to one to check */
5284 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5285 sa = (struct sockaddr *)&net->ro._l_addr;
5286 if (sa->sa_family != sa_touse->sa_family) {
5290 if (sa->sa_family == AF_INET) {
5291 sa4 = (struct sockaddr_in *)sa;
5292 if (sa4->sin_addr.s_addr ==
5293 sin4.sin_addr.s_addr) {
5300 if (sa->sa_family == AF_INET6) {
5301 sa6 = (struct sockaddr_in6 *)sa;
5302 if (SCTP6_ARE_ADDR_EQUAL(
5311 /* New addr added! no need to look further */
5315 offset += SCTP_SIZE32(plen);
5316 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5322 * Given a MBUF chain that was sent into us containing an INIT. Build a
5323 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5324 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5325 * message (i.e. the struct sctp_init_msg).
5328 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5329 struct mbuf *init_pkt, int iphlen, int offset,
5330 struct sockaddr *src, struct sockaddr *dst,
5331 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5332 uint8_t use_mflowid, uint32_t mflowid,
5333 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5335 struct sctp_association *asoc;
5336 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5337 struct sctp_init_ack_chunk *initack;
5338 struct sctp_adaptation_layer_indication *ali;
5339 struct sctp_ecn_supported_param *ecn;
5340 struct sctp_prsctp_supported_param *prsctp;
5341 struct sctp_supported_chunk_types_param *pr_supported;
5342 union sctp_sockstore *over_addr;
5345 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5346 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5347 struct sockaddr_in *sin;
5351 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5352 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5353 struct sockaddr_in6 *sin6;
5356 struct sockaddr *to;
5357 struct sctp_state_cookie stc;
5358 struct sctp_nets *net = NULL;
5359 uint8_t *signature = NULL;
5360 int cnt_inits_to = 0;
5361 uint16_t his_limit, i_want;
5362 int abort_flag, padval;
5365 int nat_friendly = 0;
5374 if ((asoc != NULL) &&
5375 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5376 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5377 /* new addresses, out of here in non-cookie-wait states */
5379 * Send a ABORT, we don't add the new address error clause
5380 * though we even set the T bit and copy in the 0 tag.. this
5381 * looks no different than if no listener was present.
5383 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, NULL,
5384 use_mflowid, mflowid,
5389 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5390 (offset + sizeof(struct sctp_init_chunk)),
5391 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5394 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5395 init_chk->init.initiate_tag, op_err,
5396 use_mflowid, mflowid,
5400 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5402 /* No memory, INIT timer will re-attempt. */
5404 sctp_m_freem(op_err);
5407 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5410 * We might not overwrite the identification[] completely and on
5411 * some platforms time_entered will contain some padding. Therefore
5412 * zero out the cookie to avoid putting uninitialized memory on the
5415 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5417 /* the time I built cookie */
5418 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5420 /* populate any tie tags */
5422 /* unlock before tag selections */
5423 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5424 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5425 stc.cookie_life = asoc->cookie_life;
5426 net = asoc->primary_destination;
5428 stc.tie_tag_my_vtag = 0;
5429 stc.tie_tag_peer_vtag = 0;
5430 /* life I will award this cookie */
5431 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5434 /* copy in the ports for later check */
5435 stc.myport = sh->dest_port;
5436 stc.peerport = sh->src_port;
5439 * If we wanted to honor cookie life extentions, we would add to
5440 * stc.cookie_life. For now we should NOT honor any extension
5442 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5443 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5444 stc.ipv6_addr_legal = 1;
5445 if (SCTP_IPV6_V6ONLY(inp)) {
5446 stc.ipv4_addr_legal = 0;
5448 stc.ipv4_addr_legal = 1;
5451 stc.ipv6_addr_legal = 0;
5452 stc.ipv4_addr_legal = 1;
5454 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5461 switch (dst->sa_family) {
5465 /* lookup address */
5466 stc.address[0] = src4->sin_addr.s_addr;
5470 stc.addr_type = SCTP_IPV4_ADDRESS;
5471 /* local from address */
5472 stc.laddress[0] = dst4->sin_addr.s_addr;
5473 stc.laddress[1] = 0;
5474 stc.laddress[2] = 0;
5475 stc.laddress[3] = 0;
5476 stc.laddr_type = SCTP_IPV4_ADDRESS;
5477 /* scope_id is only for v6 */
5479 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5480 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
5485 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5486 /* Must use the address in this case */
5487 if (sctp_is_address_on_local_host(src, vrf_id)) {
5488 stc.loopback_scope = 1;
5491 stc.local_scope = 0;
5499 stc.addr_type = SCTP_IPV6_ADDRESS;
5500 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5501 stc.scope_id = in6_getscope(&src6->sin6_addr);
5502 if (sctp_is_address_on_local_host(src, vrf_id)) {
5503 stc.loopback_scope = 1;
5504 stc.local_scope = 0;
5507 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
5509 * If the new destination is a
5510 * LINK_LOCAL we must have common
5511 * both site and local scope. Don't
5512 * set local scope though since we
5513 * must depend on the source to be
5514 * added implicitly. We cannot
5515 * assure just because we share one
5516 * link that all links are common.
5518 stc.local_scope = 0;
5522 * we start counting for the private
5523 * address stuff at 1. since the
5524 * link local we source from won't
5525 * show up in our scoped count.
5529 * pull out the scope_id from
5532 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
5534 * If the new destination is
5535 * SITE_LOCAL then we must have site
5540 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5541 stc.laddr_type = SCTP_IPV6_ADDRESS;
5551 /* set the scope per the existing tcb */
5554 struct sctp_nets *lnet;
5558 stc.loopback_scope = asoc->scope.loopback_scope;
5559 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5560 stc.site_scope = asoc->scope.site_scope;
5561 stc.local_scope = asoc->scope.local_scope;
5563 /* Why do we not consider IPv4 LL addresses? */
5564 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5565 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5566 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5568 * if we have a LL address, start
5576 /* use the net pointer */
5577 to = (struct sockaddr *)&net->ro._l_addr;
5578 switch (to->sa_family) {
5581 sin = (struct sockaddr_in *)to;
5582 stc.address[0] = sin->sin_addr.s_addr;
5586 stc.addr_type = SCTP_IPV4_ADDRESS;
5587 if (net->src_addr_selected == 0) {
5589 * strange case here, the INIT should have
5590 * did the selection.
5592 net->ro._s_addr = sctp_source_address_selection(inp,
5593 stcb, (sctp_route_t *) & net->ro,
5595 if (net->ro._s_addr == NULL)
5598 net->src_addr_selected = 1;
5601 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5602 stc.laddress[1] = 0;
5603 stc.laddress[2] = 0;
5604 stc.laddress[3] = 0;
5605 stc.laddr_type = SCTP_IPV4_ADDRESS;
5606 /* scope_id is only for v6 */
5612 sin6 = (struct sockaddr_in6 *)to;
5613 memcpy(&stc.address, &sin6->sin6_addr,
5614 sizeof(struct in6_addr));
5615 stc.addr_type = SCTP_IPV6_ADDRESS;
5616 stc.scope_id = sin6->sin6_scope_id;
5617 if (net->src_addr_selected == 0) {
5619 * strange case here, the INIT should have
5620 * done the selection.
5622 net->ro._s_addr = sctp_source_address_selection(inp,
5623 stcb, (sctp_route_t *) & net->ro,
5625 if (net->ro._s_addr == NULL)
5628 net->src_addr_selected = 1;
5630 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5631 sizeof(struct in6_addr));
5632 stc.laddr_type = SCTP_IPV6_ADDRESS;
5637 /* Now lets put the SCTP header in place */
5638 initack = mtod(m, struct sctp_init_ack_chunk *);
5639 /* Save it off for quick ref */
5640 stc.peers_vtag = init_chk->init.initiate_tag;
5642 memcpy(stc.identification, SCTP_VERSION_STRING,
5643 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5644 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5645 /* now the chunk header */
5646 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5647 initack->ch.chunk_flags = 0;
5648 /* fill in later from mbuf we build */
5649 initack->ch.chunk_length = 0;
5650 /* place in my tag */
5651 if ((asoc != NULL) &&
5652 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5653 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5654 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5655 /* re-use the v-tags and init-seq here */
5656 initack->init.initiate_tag = htonl(asoc->my_vtag);
5657 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5659 uint32_t vtag, itsn;
5661 if (hold_inp_lock) {
5662 SCTP_INP_INCR_REF(inp);
5663 SCTP_INP_RUNLOCK(inp);
5666 atomic_add_int(&asoc->refcnt, 1);
5667 SCTP_TCB_UNLOCK(stcb);
5669 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5670 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5672 * Got a duplicate vtag on some guy behind a
5673 * nat make sure we don't use it.
5677 initack->init.initiate_tag = htonl(vtag);
5678 /* get a TSN to use too */
5679 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5680 initack->init.initial_tsn = htonl(itsn);
5681 SCTP_TCB_LOCK(stcb);
5682 atomic_add_int(&asoc->refcnt, -1);
5684 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5685 initack->init.initiate_tag = htonl(vtag);
5686 /* get a TSN to use too */
5687 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5689 if (hold_inp_lock) {
5690 SCTP_INP_RLOCK(inp);
5691 SCTP_INP_DECR_REF(inp);
5694 /* save away my tag to */
5695 stc.my_vtag = initack->init.initiate_tag;
5697 /* set up some of the credits. */
5698 so = inp->sctp_socket;
5700 /* memory problem */
5704 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5706 /* set what I want */
5707 his_limit = ntohs(init_chk->init.num_inbound_streams);
5708 /* choose what I want */
5710 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5711 i_want = asoc->streamoutcnt;
5713 i_want = inp->sctp_ep.pre_open_stream_count;
5716 i_want = inp->sctp_ep.pre_open_stream_count;
5718 if (his_limit < i_want) {
5719 /* I Want more :< */
5720 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5722 /* I can have what I want :> */
5723 initack->init.num_outbound_streams = htons(i_want);
5725 /* tell him his limit. */
5726 initack->init.num_inbound_streams =
5727 htons(inp->sctp_ep.max_open_streams_intome);
5729 /* adaptation layer indication parameter */
5730 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5731 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5732 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5733 ali->ph.param_length = htons(sizeof(*ali));
5734 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5735 SCTP_BUF_LEN(m) += sizeof(*ali);
5736 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5738 ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack));
5742 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5743 (inp->sctp_ecn_enable == 1)) {
5744 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5745 ecn->ph.param_length = htons(sizeof(*ecn));
5746 SCTP_BUF_LEN(m) += sizeof(*ecn);
5748 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5751 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5753 /* And now tell the peer we do pr-sctp */
5754 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5755 prsctp->ph.param_length = htons(sizeof(*prsctp));
5756 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5758 /* Add NAT friendly parameter */
5759 struct sctp_paramhdr *ph;
5761 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5762 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5763 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5764 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5766 /* And now tell the peer we do all the extensions */
5767 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5768 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5770 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5771 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5772 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5773 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5774 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5775 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5776 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5777 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5778 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5779 p_len = sizeof(*pr_supported) + num_ext;
5780 pr_supported->ph.param_length = htons(p_len);
5781 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5782 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5784 /* add authentication parameters */
5785 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5786 struct sctp_auth_random *randp;
5787 struct sctp_auth_hmac_algo *hmacs;
5788 struct sctp_auth_chunk_list *chunks;
5789 uint16_t random_len;
5791 /* generate and add RANDOM parameter */
5792 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5793 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5794 randp->ph.param_type = htons(SCTP_RANDOM);
5795 p_len = sizeof(*randp) + random_len;
5796 randp->ph.param_length = htons(p_len);
5797 SCTP_READ_RANDOM(randp->random_data, random_len);
5798 /* zero out any padding required */
5799 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5800 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5802 /* add HMAC_ALGO parameter */
5803 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5804 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5805 (uint8_t *) hmacs->hmac_ids);
5807 p_len += sizeof(*hmacs);
5808 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5809 hmacs->ph.param_length = htons(p_len);
5810 /* zero out any padding required */
5811 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5812 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5814 /* add CHUNKS parameter */
5815 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5816 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5817 chunks->chunk_types);
5819 p_len += sizeof(*chunks);
5820 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5821 chunks->ph.param_length = htons(p_len);
5822 /* zero out any padding required */
5823 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5824 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5828 /* now the addresses */
5830 struct sctp_scoping scp;
5833 * To optimize this we could put the scoping stuff into a
5834 * structure and remove the individual uint8's from the stc
5835 * structure. Then we could just sifa in the address within
5836 * the stc.. but for now this is a quick hack to get the
5837 * address stuff teased apart.
5839 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5840 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5841 scp.loopback_scope = stc.loopback_scope;
5842 scp.ipv4_local_scope = stc.ipv4_scope;
5843 scp.local_scope = stc.local_scope;
5844 scp.site_scope = stc.site_scope;
5845 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
5848 /* tack on the operational error if present */
5857 llen += SCTP_BUF_LEN(ol);
5858 ol = SCTP_BUF_NEXT(ol);
5861 /* must add a pad to the param */
5862 uint32_t cpthis = 0;
5865 padlen = 4 - (llen % 4);
5866 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5868 while (SCTP_BUF_NEXT(m_at) != NULL) {
5869 m_at = SCTP_BUF_NEXT(m_at);
5871 SCTP_BUF_NEXT(m_at) = op_err;
5872 while (SCTP_BUF_NEXT(m_at) != NULL) {
5873 m_at = SCTP_BUF_NEXT(m_at);
5876 /* pre-calulate the size and update pkt header and chunk header */
5878 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5879 p_len += SCTP_BUF_LEN(m_tmp);
5880 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5881 /* m_tmp should now point to last one */
5886 /* Now we must build a cookie */
5887 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
5888 if (m_cookie == NULL) {
5889 /* memory problem */
5893 /* Now append the cookie to the end and update the space/size */
5894 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5896 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5897 p_len += SCTP_BUF_LEN(m_tmp);
5898 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5899 /* m_tmp should now point to last one */
5905 * Place in the size, but we don't include the last pad (if any) in
5908 initack->ch.chunk_length = htons(p_len);
5911 * Time to sign the cookie, we don't sign over the cookie signature
5912 * though thus we set trailer.
5914 (void)sctp_hmac_m(SCTP_HMAC,
5915 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5916 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5917 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5919 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5920 * here since the timer will drive a retranmission.
5923 if ((padval) && (mp_last)) {
5924 /* see my previous comments on mp_last */
5925 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
5926 /* Houston we have a problem, no space */
5931 if (stc.loopback_scope) {
5932 over_addr = (union sctp_sockstore *)dst;
5937 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5939 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
5941 use_mflowid, mflowid,
5942 SCTP_SO_NOT_LOCKED);
5943 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5948 sctp_prune_prsctp(struct sctp_tcb *stcb,
5949 struct sctp_association *asoc,
5950 struct sctp_sndrcvinfo *srcv,
5954 struct sctp_tmit_chunk *chk, *nchk;
5956 SCTP_TCB_LOCK_ASSERT(stcb);
5957 if ((asoc->peer_supports_prsctp) &&
5958 (asoc->sent_queue_cnt_removeable > 0)) {
5959 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5961 * Look for chunks marked with the PR_SCTP flag AND
5962 * the buffer space flag. If the one being sent is
5963 * equal or greater priority then purge the old one
5964 * and free some space.
5966 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5968 * This one is PR-SCTP AND buffer space
5971 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5973 * Lower numbers equates to higher
5974 * priority so if the one we are
5975 * looking at has a larger or equal
5976 * priority we want to drop the data
5977 * and NOT retransmit it.
5981 * We release the book_size
5982 * if the mbuf is here
5987 if (chk->sent > SCTP_DATAGRAM_UNSENT)
5991 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5994 freed_spc += ret_spc;
5995 if (freed_spc >= dataout) {
5998 } /* if chunk was present */
5999 } /* if of sufficent priority */
6000 } /* if chunk has enabled */
6001 } /* tailqforeach */
6003 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6004 /* Here we must move to the sent queue and mark */
6005 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6006 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6009 * We release the book_size
6010 * if the mbuf is here
6014 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6017 freed_spc += ret_spc;
6018 if (freed_spc >= dataout) {
6021 } /* end if chk->data */
6022 } /* end if right class */
6023 } /* end if chk pr-sctp */
6024 } /* tailqforeachsafe (chk) */
6025 } /* if enabled in asoc */
6029 sctp_get_frag_point(struct sctp_tcb *stcb,
6030 struct sctp_association *asoc)
6035 * For endpoints that have both v6 and v4 addresses we must reserve
6036 * room for the ipv6 header, for those that are only dealing with V4
6037 * we use a larger frag point.
6039 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6040 ovh = SCTP_MED_OVERHEAD;
6042 ovh = SCTP_MED_V4_OVERHEAD;
6045 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6046 siz = asoc->smallest_mtu - ovh;
6048 siz = (stcb->asoc.sctp_frag_point - ovh);
6050 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6052 /* A data chunk MUST fit in a cluster */
6053 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6056 /* adjust for an AUTH chunk if DATA requires auth */
6057 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6058 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6061 /* make it an even word boundary please */
6068 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6071 * We assume that the user wants PR_SCTP_TTL if the user provides a
6072 * positive lifetime but does not specify any PR_SCTP policy. This
6073 * is a BAD assumption and causes problems at least with the
6074 * U-Vancovers MPI folks. I will change this to be no policy means
6077 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6078 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6082 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6083 case CHUNK_FLAGS_PR_SCTP_BUF:
6085 * Time to live is a priority stored in tv_sec when doing
6086 * the buffer drop thing.
6088 sp->ts.tv_sec = sp->timetolive;
6091 case CHUNK_FLAGS_PR_SCTP_TTL:
6095 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6096 tv.tv_sec = sp->timetolive / 1000;
6097 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6099 * TODO sctp_constants.h needs alternative time
6100 * macros when _KERNEL is undefined.
6102 timevaladd(&sp->ts, &tv);
6105 case CHUNK_FLAGS_PR_SCTP_RTX:
6107 * Time to live is a the number or retransmissions stored in
6110 sp->ts.tv_sec = sp->timetolive;
6114 SCTPDBG(SCTP_DEBUG_USRREQ1,
6115 "Unknown PR_SCTP policy %u.\n",
6116 PR_SCTP_POLICY(sp->sinfo_flags));
6122 sctp_msg_append(struct sctp_tcb *stcb,
6123 struct sctp_nets *net,
6125 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6129 struct sctp_stream_queue_pending *sp = NULL;
6130 struct sctp_stream_out *strm;
6133 * Given an mbuf chain, put it into the association send queue and
6134 * place it on the wheel
6136 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6137 /* Invalid stream number */
6138 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6142 if ((stcb->asoc.stream_locked) &&
6143 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6144 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6148 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6149 /* Now can we send this? */
6150 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6151 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6152 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6153 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6154 /* got data while shutting down */
6155 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6159 sctp_alloc_a_strmoq(stcb, sp);
6161 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6165 sp->sinfo_flags = srcv->sinfo_flags;
6166 sp->timetolive = srcv->sinfo_timetolive;
6167 sp->ppid = srcv->sinfo_ppid;
6168 sp->context = srcv->sinfo_context;
6169 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6171 atomic_add_int(&sp->net->ref_count, 1);
6175 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6176 sp->stream = srcv->sinfo_stream;
6177 sp->msg_is_complete = 1;
6178 sp->sender_all_done = 1;
6181 sp->tail_mbuf = NULL;
6182 sctp_set_prsctp_policy(sp);
6184 * We could in theory (for sendall) sifa the length in, but we would
6185 * still have to hunt through the chain since we need to setup the
6189 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6190 if (SCTP_BUF_NEXT(at) == NULL)
6192 sp->length += SCTP_BUF_LEN(at);
6194 if (srcv->sinfo_keynumber_valid) {
6195 sp->auth_keyid = srcv->sinfo_keynumber;
6197 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6199 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6200 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6201 sp->holds_key_ref = 1;
6203 if (hold_stcb_lock == 0) {
6204 SCTP_TCB_SEND_LOCK(stcb);
6206 sctp_snd_sb_alloc(stcb, sp->length);
6207 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6208 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6209 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6211 if (hold_stcb_lock == 0) {
6212 SCTP_TCB_SEND_UNLOCK(stcb);
6222 static struct mbuf *
6223 sctp_copy_mbufchain(struct mbuf *clonechain,
6224 struct mbuf *outchain,
6225 struct mbuf **endofchain,
6228 uint8_t copy_by_ref)
6231 struct mbuf *appendchain;
6235 if (endofchain == NULL) {
6239 sctp_m_freem(outchain);
6242 if (can_take_mbuf) {
6243 appendchain = clonechain;
6246 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6248 /* Its not in a cluster */
6249 if (*endofchain == NULL) {
6250 /* lets get a mbuf cluster */
6251 if (outchain == NULL) {
6252 /* This is the general case */
6254 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6255 if (outchain == NULL) {
6258 SCTP_BUF_LEN(outchain) = 0;
6259 *endofchain = outchain;
6260 /* get the prepend space */
6261 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6264 * We really should not get a NULL
6270 if (SCTP_BUF_NEXT(m) == NULL) {
6274 m = SCTP_BUF_NEXT(m);
6277 if (*endofchain == NULL) {
6279 * huh, TSNH XXX maybe we
6282 sctp_m_freem(outchain);
6286 /* get the new end of length */
6287 len = M_TRAILINGSPACE(*endofchain);
6289 /* how much is left at the end? */
6290 len = M_TRAILINGSPACE(*endofchain);
6292 /* Find the end of the data, for appending */
6293 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6295 /* Now lets copy it out */
6296 if (len >= sizeofcpy) {
6297 /* It all fits, copy it in */
6298 m_copydata(clonechain, 0, sizeofcpy, cp);
6299 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6301 /* fill up the end of the chain */
6303 m_copydata(clonechain, 0, len, cp);
6304 SCTP_BUF_LEN((*endofchain)) += len;
6305 /* now we need another one */
6308 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6313 SCTP_BUF_NEXT((*endofchain)) = m;
6315 cp = mtod((*endofchain), caddr_t);
6316 m_copydata(clonechain, len, sizeofcpy, cp);
6317 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6321 /* copy the old fashion way */
6322 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6323 #ifdef SCTP_MBUF_LOGGING
6324 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6327 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6328 if (SCTP_BUF_IS_EXTENDED(mat)) {
6329 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6336 if (appendchain == NULL) {
6339 sctp_m_freem(outchain);
6343 /* tack on to the end */
6344 if (*endofchain != NULL) {
6345 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6349 if (SCTP_BUF_NEXT(m) == NULL) {
6350 SCTP_BUF_NEXT(m) = appendchain;
6353 m = SCTP_BUF_NEXT(m);
6357 * save off the end and update the end-chain postion
6361 if (SCTP_BUF_NEXT(m) == NULL) {
6365 m = SCTP_BUF_NEXT(m);
6369 /* save off the end and update the end-chain postion */
6372 if (SCTP_BUF_NEXT(m) == NULL) {
6376 m = SCTP_BUF_NEXT(m);
6378 return (appendchain);
6383 sctp_med_chunk_output(struct sctp_inpcb *inp,
6384 struct sctp_tcb *stcb,
6385 struct sctp_association *asoc,
6388 int control_only, int from_where,
6389 struct timeval *now, int *now_filled, int frag_point, int so_locked
6390 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6396 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6397 uint32_t val SCTP_UNUSED)
6399 struct sctp_copy_all *ca;
6402 int added_control = 0;
6403 int un_sent, do_chunk_output = 1;
6404 struct sctp_association *asoc;
6405 struct sctp_nets *net;
6407 ca = (struct sctp_copy_all *)ptr;
6408 if (ca->m == NULL) {
6411 if (ca->inp != inp) {
6415 if (ca->sndlen > 0) {
6416 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6418 /* can't copy so we are done */
6422 #ifdef SCTP_MBUF_LOGGING
6423 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6426 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6427 if (SCTP_BUF_IS_EXTENDED(mat)) {
6428 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6436 SCTP_TCB_LOCK_ASSERT(stcb);
6437 if (stcb->asoc.alternate) {
6438 net = stcb->asoc.alternate;
6440 net = stcb->asoc.primary_destination;
6442 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6443 /* Abort this assoc with m as the user defined reason */
6445 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6447 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6448 0, M_NOWAIT, 1, MT_DATA);
6449 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6452 struct sctp_paramhdr *ph;
6454 ph = mtod(m, struct sctp_paramhdr *);
6455 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6456 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen);
6459 * We add one here to keep the assoc from dis-appearing on
6462 atomic_add_int(&stcb->asoc.refcnt, 1);
6463 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6465 * sctp_abort_an_association calls sctp_free_asoc() free
6466 * association will NOT free it since we incremented the
6467 * refcnt .. we do this to prevent it being freed and things
6468 * getting tricky since we could end up (from free_asoc)
6469 * calling inpcb_free which would get a recursive lock call
6470 * to the iterator lock.. But as a consequence of that the
6471 * stcb will return to us un-locked.. since free_asoc
6472 * returns with either no TCB or the TCB unlocked, we must
6473 * relock.. to unlock in the iterator timer :-0
6475 SCTP_TCB_LOCK(stcb);
6476 atomic_add_int(&stcb->asoc.refcnt, -1);
6477 goto no_chunk_output;
6480 ret = sctp_msg_append(stcb, net, m,
6484 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6485 /* shutdown this assoc */
6488 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6490 if (TAILQ_EMPTY(&asoc->send_queue) &&
6491 TAILQ_EMPTY(&asoc->sent_queue) &&
6493 if (asoc->locked_on_sending) {
6497 * there is nothing queued to send, so I'm
6500 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6501 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6502 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6504 * only send SHUTDOWN the first time
6507 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6508 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6510 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6511 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6512 sctp_stop_timers_for_shutdown(stcb);
6513 sctp_send_shutdown(stcb, net);
6514 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6516 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6517 asoc->primary_destination);
6519 do_chunk_output = 0;
6523 * we still got (or just got) data to send,
6524 * so set SHUTDOWN_PENDING
6527 * XXX sockets draft says that SCTP_EOF
6528 * should be sent with no data. currently,
6529 * we will allow user data to be sent first
6530 * and move to SHUTDOWN-PENDING
6532 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6533 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6534 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6535 if (asoc->locked_on_sending) {
6537 * Locked to send out the
6540 struct sctp_stream_queue_pending *sp;
6542 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6544 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6545 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6548 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6549 if (TAILQ_EMPTY(&asoc->send_queue) &&
6550 TAILQ_EMPTY(&asoc->sent_queue) &&
6551 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6553 atomic_add_int(&stcb->asoc.refcnt, 1);
6554 sctp_abort_an_association(stcb->sctp_ep, stcb,
6555 NULL, SCTP_SO_NOT_LOCKED);
6556 atomic_add_int(&stcb->asoc.refcnt, -1);
6557 goto no_chunk_output;
6559 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6560 asoc->primary_destination);
6566 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6567 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6569 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6570 (stcb->asoc.total_flight > 0) &&
6571 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6572 do_chunk_output = 0;
6574 if (do_chunk_output)
6575 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6576 else if (added_control) {
6577 int num_out = 0, reason = 0, now_filled = 0;
6581 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6582 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6583 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6594 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6596 struct sctp_copy_all *ca;
6598 ca = (struct sctp_copy_all *)ptr;
6600 * Do a notify here? Kacheong suggests that the notify be done at
6601 * the send time.. so you would push up a notification if any send
6602 * failed. Don't know if this is feasable since the only failures we
6603 * have is "memory" related and if you cannot get an mbuf to send
6604 * the data you surely can't get an mbuf to send up to notify the
6605 * user you can't send the data :->
6608 /* now free everything */
6609 sctp_m_freem(ca->m);
6610 SCTP_FREE(ca, SCTP_M_COPYAL);
6614 #define MC_ALIGN(m, len) do { \
6615 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6620 static struct mbuf *
6621 sctp_copy_out_all(struct uio *uio, int len)
6623 struct mbuf *ret, *at;
6624 int left, willcpy, cancpy, error;
6626 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6632 SCTP_BUF_LEN(ret) = 0;
6633 /* save space for the data chunk header */
6634 cancpy = M_TRAILINGSPACE(ret);
6635 willcpy = min(cancpy, left);
6638 /* Align data to the end */
6639 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6645 SCTP_BUF_LEN(at) = willcpy;
6646 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6649 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
6650 if (SCTP_BUF_NEXT(at) == NULL) {
6653 at = SCTP_BUF_NEXT(at);
6654 SCTP_BUF_LEN(at) = 0;
6655 cancpy = M_TRAILINGSPACE(at);
6656 willcpy = min(cancpy, left);
6663 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6664 struct sctp_sndrcvinfo *srcv)
6667 struct sctp_copy_all *ca;
6669 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6673 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6676 memset(ca, 0, sizeof(struct sctp_copy_all));
6680 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6683 * take off the sendall flag, it would be bad if we failed to do
6686 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6687 /* get length and mbuf chain */
6689 ca->sndlen = uio->uio_resid;
6690 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6691 if (ca->m == NULL) {
6692 SCTP_FREE(ca, SCTP_M_COPYAL);
6693 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6697 /* Gather the length of the send */
6701 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6702 ca->sndlen += SCTP_BUF_LEN(mat);
6705 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6706 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6707 SCTP_ASOC_ANY_STATE,
6709 sctp_sendall_completes, inp, 1);
6711 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6712 SCTP_FREE(ca, SCTP_M_COPYAL);
6713 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6721 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6723 struct sctp_tmit_chunk *chk, *nchk;
6725 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6726 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6727 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6729 sctp_m_freem(chk->data);
6732 asoc->ctrl_queue_cnt--;
6733 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6739 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6741 struct sctp_association *asoc;
6742 struct sctp_tmit_chunk *chk, *nchk;
6743 struct sctp_asconf_chunk *acp;
6746 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6747 /* find SCTP_ASCONF chunk in queue */
6748 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6750 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6751 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6756 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6758 sctp_m_freem(chk->data);
6761 asoc->ctrl_queue_cnt--;
6762 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6769 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6770 struct sctp_association *asoc,
6771 struct sctp_tmit_chunk **data_list,
6773 struct sctp_nets *net)
6776 struct sctp_tmit_chunk *tp1;
6778 for (i = 0; i < bundle_at; i++) {
6779 /* off of the send queue */
6780 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6781 asoc->send_queue_cnt--;
6784 * Any chunk NOT 0 you zap the time chunk 0 gets
6785 * zapped or set based on if a RTO measurment is
6788 data_list[i]->do_rtt = 0;
6791 data_list[i]->sent_rcv_time = net->last_sent_time;
6792 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6793 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6794 if (data_list[i]->whoTo == NULL) {
6795 data_list[i]->whoTo = net;
6796 atomic_add_int(&net->ref_count, 1);
6798 /* on to the sent queue */
6799 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6800 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6801 struct sctp_tmit_chunk *tpp;
6803 /* need to move back */
6805 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6807 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6811 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6814 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6816 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6821 /* This does not lower until the cum-ack passes it */
6822 asoc->sent_queue_cnt++;
6823 if ((asoc->peers_rwnd <= 0) &&
6824 (asoc->total_flight == 0) &&
6826 /* Mark the chunk as being a window probe */
6827 SCTP_STAT_INCR(sctps_windowprobed);
6829 #ifdef SCTP_AUDITING_ENABLED
6830 sctp_audit_log(0xC2, 3);
6832 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6833 data_list[i]->snd_count = 1;
6834 data_list[i]->rec.data.chunk_was_revoked = 0;
6835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6836 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6837 data_list[i]->whoTo->flight_size,
6838 data_list[i]->book_size,
6839 (uintptr_t) data_list[i]->whoTo,
6840 data_list[i]->rec.data.TSN_seq);
6842 sctp_flight_size_increase(data_list[i]);
6843 sctp_total_flight_increase(stcb, data_list[i]);
6844 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6845 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6846 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6848 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6849 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6850 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6851 /* SWS sender side engages */
6852 asoc->peers_rwnd = 0;
6855 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6856 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6861 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6862 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6867 struct sctp_tmit_chunk *chk, *nchk;
6869 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6870 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6871 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6872 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6873 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6874 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6875 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6876 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6877 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6878 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6879 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6880 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6881 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6882 /* Stray chunks must be cleaned up */
6884 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6886 sctp_m_freem(chk->data);
6889 asoc->ctrl_queue_cnt--;
6890 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
6891 asoc->fwd_tsn_cnt--;
6892 sctp_free_a_chunk(stcb, chk, so_locked);
6893 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6894 /* special handling, we must look into the param */
6895 if (chk != asoc->str_reset) {
6896 goto clean_up_anyway;
6904 sctp_can_we_split_this(struct sctp_tcb *stcb,
6906 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6909 * Make a decision on if I should split a msg into multiple parts.
6910 * This is only asked of incomplete messages.
6914 * If we are doing EEOR we need to always send it if its the
6915 * entire thing, since it might be all the guy is putting in
6918 if (goal_mtu >= length) {
6920 * If we have data outstanding,
6921 * we get another chance when the sack
6922 * arrives to transmit - wait for more data
6924 if (stcb->asoc.total_flight == 0) {
6926 * If nothing is in flight, we zero the
6934 /* You can fill the rest */
6939 * For those strange folk that make the send buffer
6940 * smaller than our fragmentation point, we can't
6941 * get a full msg in so we have to allow splitting.
6943 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
6946 if ((length <= goal_mtu) ||
6947 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
6948 /* Sub-optimial residual don't split in non-eeor mode. */
6952 * If we reach here length is larger than the goal_mtu. Do we wish
6953 * to split it for the sake of packet putting together?
6955 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
6956 /* Its ok to split it */
6957 return (min(goal_mtu, frag_point));
6959 /* Nope, can't split */
6965 sctp_move_to_outqueue(struct sctp_tcb *stcb,
6966 struct sctp_stream_out *strq,
6968 uint32_t frag_point,
6974 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6979 /* Move from the stream to the send_queue keeping track of the total */
6980 struct sctp_association *asoc;
6981 struct sctp_stream_queue_pending *sp;
6982 struct sctp_tmit_chunk *chk;
6983 struct sctp_data_chunk *dchkh;
6984 uint32_t to_move, length;
6985 uint8_t rcv_flags = 0;
6987 uint8_t send_lock_up = 0;
6989 SCTP_TCB_LOCK_ASSERT(stcb);
6992 /* sa_ignore FREED_MEMORY */
6993 sp = TAILQ_FIRST(&strq->outqueue);
6996 if (send_lock_up == 0) {
6997 SCTP_TCB_SEND_LOCK(stcb);
7000 sp = TAILQ_FIRST(&strq->outqueue);
7004 if (strq->last_msg_incomplete) {
7005 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7007 strq->last_msg_incomplete);
7008 strq->last_msg_incomplete = 0;
7012 SCTP_TCB_SEND_UNLOCK(stcb);
7017 if ((sp->msg_is_complete) && (sp->length == 0)) {
7018 if (sp->sender_all_done) {
7020 * We are doing differed cleanup. Last time through
7021 * when we took all the data the sender_all_done was
7024 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7025 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7026 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7027 sp->sender_all_done,
7029 sp->msg_is_complete,
7033 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7034 SCTP_TCB_SEND_LOCK(stcb);
7037 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7038 TAILQ_REMOVE(&strq->outqueue, sp, next);
7039 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7041 sctp_free_remote_addr(sp->net);
7045 sctp_m_freem(sp->data);
7048 sctp_free_a_strmoq(stcb, sp, so_locked);
7049 /* we can't be locked to it */
7051 stcb->asoc.locked_on_sending = NULL;
7053 SCTP_TCB_SEND_UNLOCK(stcb);
7056 /* back to get the next msg */
7060 * sender just finished this but still holds a
7069 /* is there some to get */
7070 if (sp->length == 0) {
7076 } else if (sp->discard_rest) {
7077 if (send_lock_up == 0) {
7078 SCTP_TCB_SEND_LOCK(stcb);
7081 /* Whack down the size */
7082 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7083 if ((stcb->sctp_socket != NULL) && \
7084 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7085 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7086 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7089 sctp_m_freem(sp->data);
7091 sp->tail_mbuf = NULL;
7101 some_taken = sp->some_taken;
7102 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7103 sp->msg_is_complete = 1;
7106 length = sp->length;
7107 if (sp->msg_is_complete) {
7108 /* The message is complete */
7109 to_move = min(length, frag_point);
7110 if (to_move == length) {
7111 /* All of it fits in the MTU */
7112 if (sp->some_taken) {
7113 rcv_flags |= SCTP_DATA_LAST_FRAG;
7114 sp->put_last_out = 1;
7116 rcv_flags |= SCTP_DATA_NOT_FRAG;
7117 sp->put_last_out = 1;
7120 /* Not all of it fits, we fragment */
7121 if (sp->some_taken == 0) {
7122 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7127 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7130 * We use a snapshot of length in case it
7131 * is expanding during the compare.
7136 if (to_move >= llen) {
7138 if (send_lock_up == 0) {
7140 * We are taking all of an incomplete msg
7141 * thus we need a send lock.
7143 SCTP_TCB_SEND_LOCK(stcb);
7145 if (sp->msg_is_complete) {
7147 * the sender finished the
7154 if (sp->some_taken == 0) {
7155 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7159 /* Nothing to take. */
7160 if (sp->some_taken) {
7169 /* If we reach here, we can copy out a chunk */
7170 sctp_alloc_a_chunk(stcb, chk);
7172 /* No chunk memory */
7178 * Setup for unordered if needed by looking at the user sent info
7181 if (sp->sinfo_flags & SCTP_UNORDERED) {
7182 rcv_flags |= SCTP_DATA_UNORDERED;
7184 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7185 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7186 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7188 /* clear out the chunk before setting up */
7189 memset(chk, 0, sizeof(*chk));
7190 chk->rec.data.rcv_flags = rcv_flags;
7192 if (to_move >= length) {
7193 /* we think we can steal the whole thing */
7194 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7195 SCTP_TCB_SEND_LOCK(stcb);
7198 if (to_move < sp->length) {
7199 /* bail, it changed */
7202 chk->data = sp->data;
7203 chk->last_mbuf = sp->tail_mbuf;
7204 /* register the stealing */
7205 sp->data = sp->tail_mbuf = NULL;
7210 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7211 chk->last_mbuf = NULL;
7212 if (chk->data == NULL) {
7213 sp->some_taken = some_taken;
7214 sctp_free_a_chunk(stcb, chk, so_locked);
7219 #ifdef SCTP_MBUF_LOGGING
7220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7223 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7224 if (SCTP_BUF_IS_EXTENDED(mat)) {
7225 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7230 /* Pull off the data */
7231 m_adj(sp->data, to_move);
7232 /* Now lets work our way down and compact it */
7234 while (m && (SCTP_BUF_LEN(m) == 0)) {
7235 sp->data = SCTP_BUF_NEXT(m);
7236 SCTP_BUF_NEXT(m) = NULL;
7237 if (sp->tail_mbuf == m) {
7239 * Freeing tail? TSNH since
7240 * we supposedly were taking less
7241 * than the sp->length.
7244 panic("Huh, freing tail? - TSNH");
7246 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7247 sp->tail_mbuf = sp->data = NULL;
7256 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7257 chk->copy_by_ref = 1;
7259 chk->copy_by_ref = 0;
7262 * get last_mbuf and counts of mb useage This is ugly but hopefully
7263 * its only one mbuf.
7265 if (chk->last_mbuf == NULL) {
7266 chk->last_mbuf = chk->data;
7267 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7268 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7271 if (to_move > length) {
7272 /*- This should not happen either
7273 * since we always lower to_move to the size
7274 * of sp->length if its larger.
7277 panic("Huh, how can to_move be larger?");
7279 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7283 atomic_subtract_int(&sp->length, to_move);
7285 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7286 /* Not enough room for a chunk header, get some */
7289 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7292 * we're in trouble here. _PREPEND below will free
7293 * all the data if there is no leading space, so we
7294 * must put the data back and restore.
7296 if (send_lock_up == 0) {
7297 SCTP_TCB_SEND_LOCK(stcb);
7300 if (chk->data == NULL) {
7301 /* unsteal the data */
7302 sp->data = chk->data;
7303 sp->tail_mbuf = chk->last_mbuf;
7307 /* reassemble the data */
7309 sp->data = chk->data;
7310 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7312 sp->some_taken = some_taken;
7313 atomic_add_int(&sp->length, to_move);
7316 sctp_free_a_chunk(stcb, chk, so_locked);
7320 SCTP_BUF_LEN(m) = 0;
7321 SCTP_BUF_NEXT(m) = chk->data;
7323 M_ALIGN(chk->data, 4);
7326 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
7327 if (chk->data == NULL) {
7328 /* HELP, TSNH since we assured it would not above? */
7330 panic("prepend failes HELP?");
7332 SCTP_PRINTF("prepend fails HELP?\n");
7333 sctp_free_a_chunk(stcb, chk, so_locked);
7339 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7340 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7341 chk->book_size_scale = 0;
7342 chk->sent = SCTP_DATAGRAM_UNSENT;
7345 chk->asoc = &stcb->asoc;
7346 chk->pad_inplace = 0;
7347 chk->no_fr_allowed = 0;
7348 chk->rec.data.stream_seq = strq->next_sequence_send;
7349 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7350 strq->next_sequence_send++;
7352 chk->rec.data.stream_number = sp->stream;
7353 chk->rec.data.payloadtype = sp->ppid;
7354 chk->rec.data.context = sp->context;
7355 chk->rec.data.doing_fast_retransmit = 0;
7357 chk->rec.data.timetodrop = sp->ts;
7358 chk->flags = sp->act_flags;
7361 chk->whoTo = sp->net;
7362 atomic_add_int(&chk->whoTo->ref_count, 1);
7366 if (sp->holds_key_ref) {
7367 chk->auth_keyid = sp->auth_keyid;
7368 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7369 chk->holds_key_ref = 1;
7371 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7372 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7373 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7374 (uintptr_t) stcb, sp->length,
7375 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7376 chk->rec.data.TSN_seq);
7378 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7380 * Put the rest of the things in place now. Size was done earlier in
7381 * previous loop prior to padding.
7384 #ifdef SCTP_ASOCLOG_OF_TSNS
7385 SCTP_TCB_LOCK_ASSERT(stcb);
7386 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7387 asoc->tsn_out_at = 0;
7388 asoc->tsn_out_wrapped = 1;
7390 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7391 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7392 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7393 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7394 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7395 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7396 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7397 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7401 dchkh->ch.chunk_type = SCTP_DATA;
7402 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7403 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7404 dchkh->dp.stream_id = htons(strq->stream_no);
7405 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7406 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7407 dchkh->ch.chunk_length = htons(chk->send_size);
7408 /* Now advance the chk->send_size by the actual pad needed. */
7409 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7414 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7415 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7416 chk->pad_inplace = 1;
7418 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7419 /* pad added an mbuf */
7420 chk->last_mbuf = lm;
7422 chk->send_size += pads;
7424 if (PR_SCTP_ENABLED(chk->flags)) {
7425 asoc->pr_sctp_cnt++;
7427 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7428 /* All done pull and kill the message */
7429 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7430 if (sp->put_last_out == 0) {
7431 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7432 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7433 sp->sender_all_done,
7435 sp->msg_is_complete,
7439 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7440 SCTP_TCB_SEND_LOCK(stcb);
7443 TAILQ_REMOVE(&strq->outqueue, sp, next);
7444 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7446 sctp_free_remote_addr(sp->net);
7450 sctp_m_freem(sp->data);
7453 sctp_free_a_strmoq(stcb, sp, so_locked);
7455 /* we can't be locked to it */
7457 stcb->asoc.locked_on_sending = NULL;
7459 /* more to go, we are locked */
7462 asoc->chunks_on_out_queue++;
7463 strq->chunks_on_queues++;
7464 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7465 asoc->send_queue_cnt++;
7468 SCTP_TCB_SEND_UNLOCK(stcb);
7475 sctp_fill_outqueue(struct sctp_tcb *stcb,
7476 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7477 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7482 struct sctp_association *asoc;
7483 struct sctp_stream_out *strq;
7484 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7487 SCTP_TCB_LOCK_ASSERT(stcb);
7489 switch (net->ro._l_addr.sa.sa_family) {
7492 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7497 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7502 goal_mtu = net->mtu;
7505 /* Need an allowance for the data chunk header too */
7506 goal_mtu -= sizeof(struct sctp_data_chunk);
7508 /* must make even word boundary */
7509 goal_mtu &= 0xfffffffc;
7510 if (asoc->locked_on_sending) {
7511 /* We are stuck on one stream until the message completes. */
7512 strq = asoc->locked_on_sending;
7515 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7518 while ((goal_mtu > 0) && strq) {
7521 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7522 &giveup, eeor_mode, &bail, so_locked);
7524 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7527 asoc->locked_on_sending = strq;
7528 if ((moved_how_much == 0) || (giveup) || bail)
7529 /* no more to move for now */
7532 asoc->locked_on_sending = NULL;
7533 if ((giveup) || bail) {
7536 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7541 total_moved += moved_how_much;
7542 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7543 goal_mtu &= 0xfffffffc;
7548 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7550 if (total_moved == 0) {
7551 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7552 (net == stcb->asoc.primary_destination)) {
7553 /* ran dry for primary network net */
7554 SCTP_STAT_INCR(sctps_primary_randry);
7555 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7556 /* ran dry with CMT on */
7557 SCTP_STAT_INCR(sctps_cmt_randry);
7563 sctp_fix_ecn_echo(struct sctp_association *asoc)
7565 struct sctp_tmit_chunk *chk;
7567 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7568 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7569 chk->sent = SCTP_DATAGRAM_UNSENT;
7575 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7577 struct sctp_association *asoc;
7578 struct sctp_tmit_chunk *chk;
7579 struct sctp_stream_queue_pending *sp;
7586 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7587 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7588 if (sp->net == net) {
7589 sctp_free_remote_addr(sp->net);
7594 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7595 if (chk->whoTo == net) {
7596 sctp_free_remote_addr(chk->whoTo);
7603 sctp_med_chunk_output(struct sctp_inpcb *inp,
7604 struct sctp_tcb *stcb,
7605 struct sctp_association *asoc,
7608 int control_only, int from_where,
7609 struct timeval *now, int *now_filled, int frag_point, int so_locked
7610 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7616 * Ok this is the generic chunk service queue. we must do the
7617 * following: - Service the stream queue that is next, moving any
7618 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7619 * LAST to the out queue in one pass) and assigning TSN's - Check to
7620 * see if the cwnd/rwnd allows any output, if so we go ahead and
7621 * fomulate and send the low level chunks. Making sure to combine
7622 * any control in the control chunk queue also.
7624 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7625 struct mbuf *outchain, *endoutchain;
7626 struct sctp_tmit_chunk *chk, *nchk;
7628 /* temp arrays for unlinking */
7629 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7630 int no_fragmentflg, error;
7631 unsigned int max_rwnd_per_dest, max_send_per_dest;
7632 int one_chunk, hbflag, skip_data_for_this_net;
7633 int asconf, cookie, no_out_cnt;
7634 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7635 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7637 uint32_t auth_offset = 0;
7638 struct sctp_auth_chunk *auth = NULL;
7639 uint16_t auth_keyid;
7640 int override_ok = 1;
7641 int skip_fill_up = 0;
7642 int data_auth_reqd = 0;
7645 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7651 auth_keyid = stcb->asoc.authinfo.active_keyid;
7653 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7654 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7655 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7660 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7662 * First lets prime the pump. For each destination, if there is room
7663 * in the flight size, attempt to pull an MTU's worth out of the
7664 * stream queues into the general send_queue
7666 #ifdef SCTP_AUDITING_ENABLED
7667 sctp_audit_log(0xC2, 2);
7669 SCTP_TCB_LOCK_ASSERT(stcb);
7671 if ((control_only) || (asoc->stream_reset_outstanding))
7676 /* Nothing to possible to send? */
7677 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7678 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7679 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7680 TAILQ_EMPTY(&asoc->send_queue) &&
7681 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7686 if (asoc->peers_rwnd == 0) {
7687 /* No room in peers rwnd */
7689 if (asoc->total_flight > 0) {
7690 /* we are allowed one chunk in flight */
7694 if (stcb->asoc.ecn_echo_cnt_onq) {
7695 /* Record where a sack goes, if any */
7696 if (no_data_chunks &&
7697 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7698 /* Nothing but ECNe to send - we don't do that */
7699 goto nothing_to_send;
7701 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7702 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7703 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7704 sack_goes_to = chk->whoTo;
7709 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7710 if (stcb->sctp_socket)
7711 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7713 max_send_per_dest = 0;
7714 if (no_data_chunks == 0) {
7715 /* How many non-directed chunks are there? */
7716 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7717 if (chk->whoTo == NULL) {
7719 * We already have non-directed chunks on
7720 * the queue, no need to do a fill-up.
7728 if ((no_data_chunks == 0) &&
7729 (skip_fill_up == 0) &&
7730 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7731 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7733 * This for loop we are in takes in each net, if
7734 * its's got space in cwnd and has data sent to it
7735 * (when CMT is off) then it calls
7736 * sctp_fill_outqueue for the net. This gets data on
7737 * the send queue for that network.
7739 * In sctp_fill_outqueue TSN's are assigned and data is
7740 * copied out of the stream buffers. Note mostly
7741 * copy by reference (we hope).
7743 net->window_probe = 0;
7744 if ((net != stcb->asoc.alternate) &&
7745 ((net->dest_state & SCTP_ADDR_PF) ||
7746 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7747 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7749 sctp_log_cwnd(stcb, net, 1,
7750 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7754 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7755 (net->flight_size == 0)) {
7756 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7758 if (net->flight_size >= net->cwnd) {
7759 /* skip this network, no room - can't fill */
7760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7761 sctp_log_cwnd(stcb, net, 3,
7762 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7767 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7769 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7771 /* memory alloc failure */
7777 /* now service each destination and send out what we can for it */
7778 /* Nothing to send? */
7779 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7780 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7781 TAILQ_EMPTY(&asoc->send_queue)) {
7785 if (asoc->sctp_cmt_on_off > 0) {
7786 /* get the last start point */
7787 start_at = asoc->last_net_cmt_send_started;
7788 if (start_at == NULL) {
7789 /* null so to beginning */
7790 start_at = TAILQ_FIRST(&asoc->nets);
7792 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7793 if (start_at == NULL) {
7794 start_at = TAILQ_FIRST(&asoc->nets);
7797 asoc->last_net_cmt_send_started = start_at;
7799 start_at = TAILQ_FIRST(&asoc->nets);
7801 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7802 if (chk->whoTo == NULL) {
7803 if (asoc->alternate) {
7804 chk->whoTo = asoc->alternate;
7806 chk->whoTo = asoc->primary_destination;
7808 atomic_add_int(&chk->whoTo->ref_count, 1);
7811 old_start_at = NULL;
7812 again_one_more_time:
7813 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7814 /* how much can we send? */
7815 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7816 if (old_start_at && (old_start_at == net)) {
7817 /* through list ocmpletely. */
7821 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7822 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7823 (net->flight_size >= net->cwnd)) {
7825 * Nothing on control or asconf and flight is full,
7826 * we can skip even in the CMT case.
7831 endoutchain = outchain = NULL;
7834 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7835 skip_data_for_this_net = 1;
7837 skip_data_for_this_net = 0;
7839 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7841 * if we have a route and an ifp check to see if we
7842 * have room to send to this guy
7846 ifp = net->ro.ro_rt->rt_ifp;
7847 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7848 SCTP_STAT_INCR(sctps_ifnomemqueued);
7849 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7850 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7855 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7858 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7863 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7873 if (mtu > asoc->peers_rwnd) {
7874 if (asoc->total_flight > 0) {
7875 /* We have a packet in flight somewhere */
7876 r_mtu = asoc->peers_rwnd;
7878 /* We are always allowed to send one MTU out */
7885 /************************/
7886 /* ASCONF transmission */
7887 /************************/
7888 /* Now first lets go through the asconf queue */
7889 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7890 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7893 if (chk->whoTo == NULL) {
7894 if (asoc->alternate == NULL) {
7895 if (asoc->primary_destination != net) {
7899 if (asoc->alternate != net) {
7904 if (chk->whoTo != net) {
7908 if (chk->data == NULL) {
7911 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7912 chk->sent != SCTP_DATAGRAM_RESEND) {
7916 * if no AUTH is yet included and this chunk
7917 * requires it, make sure to account for it. We
7918 * don't apply the size until the AUTH chunk is
7919 * actually added below in case there is no room for
7920 * this chunk. NOTE: we overload the use of "omtu"
7923 if ((auth == NULL) &&
7924 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7925 stcb->asoc.peer_auth_chunks)) {
7926 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7929 /* Here we do NOT factor the r_mtu */
7930 if ((chk->send_size < (int)(mtu - omtu)) ||
7931 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7933 * We probably should glom the mbuf chain
7934 * from the chk->data for control but the
7935 * problem is it becomes yet one more level
7936 * of tracking to do if for some reason
7937 * output fails. Then I have got to
7938 * reconstruct the merged control chain.. el
7939 * yucko.. for now we take the easy way and
7943 * Add an AUTH chunk, if chunk requires it
7944 * save the offset into the chain for AUTH
7946 if ((auth == NULL) &&
7947 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7948 stcb->asoc.peer_auth_chunks))) {
7949 outchain = sctp_add_auth_chunk(outchain,
7954 chk->rec.chunk_id.id);
7955 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7957 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7958 (int)chk->rec.chunk_id.can_take_data,
7959 chk->send_size, chk->copy_by_ref);
7960 if (outchain == NULL) {
7962 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7965 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7966 /* update our MTU size */
7967 if (mtu > (chk->send_size + omtu))
7968 mtu -= (chk->send_size + omtu);
7971 to_out += (chk->send_size + omtu);
7972 /* Do clear IP_DF ? */
7973 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7976 if (chk->rec.chunk_id.can_take_data)
7979 * set hb flag since we can use these for
7985 * should sysctl this: don't bundle data
7986 * with ASCONF since it requires AUTH
7989 chk->sent = SCTP_DATAGRAM_SENT;
7990 if (chk->whoTo == NULL) {
7992 atomic_add_int(&net->ref_count, 1);
7997 * Ok we are out of room but we can
7998 * output without effecting the
7999 * flight size since this little guy
8000 * is a control only packet.
8002 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8004 * do NOT clear the asconf flag as
8005 * it is used to do appropriate
8006 * source address selection.
8008 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8009 (struct sockaddr *)&net->ro._l_addr,
8010 outchain, auth_offset, auth,
8011 stcb->asoc.authinfo.active_keyid,
8012 no_fragmentflg, 0, asconf,
8013 inp->sctp_lport, stcb->rport,
8014 htonl(stcb->asoc.peer_vtag),
8018 if (error == ENOBUFS) {
8019 asoc->ifp_had_enobuf = 1;
8020 SCTP_STAT_INCR(sctps_lowlevelerr);
8022 if (from_where == 0) {
8023 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8025 if (*now_filled == 0) {
8026 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8028 *now = net->last_sent_time;
8030 net->last_sent_time = *now;
8033 /* error, could not output */
8034 if (error == EHOSTUNREACH) {
8040 sctp_move_chunks_from_net(stcb, net);
8045 asoc->ifp_had_enobuf = 0;
8046 if (*now_filled == 0) {
8047 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8049 *now = net->last_sent_time;
8051 net->last_sent_time = *now;
8055 * increase the number we sent, if a
8056 * cookie is sent we don't tell them
8059 outchain = endoutchain = NULL;
8063 *num_out += ctl_cnt;
8064 /* recalc a clean slate and setup */
8065 switch (net->ro._l_addr.sa.sa_family) {
8068 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8073 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8086 /************************/
8087 /* Control transmission */
8088 /************************/
8089 /* Now first lets go through the control queue */
8090 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8091 if ((sack_goes_to) &&
8092 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8093 (chk->whoTo != sack_goes_to)) {
8095 * if we have a sack in queue, and we are
8096 * looking at an ecn echo that is NOT queued
8097 * to where the sack is going..
8099 if (chk->whoTo == net) {
8101 * Don't transmit it to where its
8102 * going (current net)
8105 } else if (sack_goes_to == net) {
8107 * But do transmit it to this
8110 goto skip_net_check;
8113 if (chk->whoTo == NULL) {
8114 if (asoc->alternate == NULL) {
8115 if (asoc->primary_destination != net) {
8119 if (asoc->alternate != net) {
8124 if (chk->whoTo != net) {
8129 if (chk->data == NULL) {
8132 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8134 * It must be unsent. Cookies and ASCONF's
8135 * hang around but there timers will force
8136 * when marked for resend.
8141 * if no AUTH is yet included and this chunk
8142 * requires it, make sure to account for it. We
8143 * don't apply the size until the AUTH chunk is
8144 * actually added below in case there is no room for
8145 * this chunk. NOTE: we overload the use of "omtu"
8148 if ((auth == NULL) &&
8149 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8150 stcb->asoc.peer_auth_chunks)) {
8151 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8154 /* Here we do NOT factor the r_mtu */
8155 if ((chk->send_size <= (int)(mtu - omtu)) ||
8156 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8158 * We probably should glom the mbuf chain
8159 * from the chk->data for control but the
8160 * problem is it becomes yet one more level
8161 * of tracking to do if for some reason
8162 * output fails. Then I have got to
8163 * reconstruct the merged control chain.. el
8164 * yucko.. for now we take the easy way and
8168 * Add an AUTH chunk, if chunk requires it
8169 * save the offset into the chain for AUTH
8171 if ((auth == NULL) &&
8172 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8173 stcb->asoc.peer_auth_chunks))) {
8174 outchain = sctp_add_auth_chunk(outchain,
8179 chk->rec.chunk_id.id);
8180 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8182 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8183 (int)chk->rec.chunk_id.can_take_data,
8184 chk->send_size, chk->copy_by_ref);
8185 if (outchain == NULL) {
8187 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8190 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8191 /* update our MTU size */
8192 if (mtu > (chk->send_size + omtu))
8193 mtu -= (chk->send_size + omtu);
8196 to_out += (chk->send_size + omtu);
8197 /* Do clear IP_DF ? */
8198 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8201 if (chk->rec.chunk_id.can_take_data)
8203 /* Mark things to be removed, if needed */
8204 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8205 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8206 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8207 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8208 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8209 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8210 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8211 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8212 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8213 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8214 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8215 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8218 /* remove these chunks at the end */
8219 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8220 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8221 /* turn off the timer */
8222 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8223 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8224 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8230 * Other chunks, since they have
8231 * timers running (i.e. COOKIE) we
8232 * just "trust" that it gets sent or
8236 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8239 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8241 * Increment ecne send count
8242 * here this means we may be
8243 * over-zealous in our
8244 * counting if the send
8245 * fails, but its the best
8246 * place to do it (we used
8247 * to do it in the queue of
8248 * the chunk, but that did
8249 * not tell how many times
8252 SCTP_STAT_INCR(sctps_sendecne);
8254 chk->sent = SCTP_DATAGRAM_SENT;
8255 if (chk->whoTo == NULL) {
8257 atomic_add_int(&net->ref_count, 1);
8263 * Ok we are out of room but we can
8264 * output without effecting the
8265 * flight size since this little guy
8266 * is a control only packet.
8269 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8271 * do NOT clear the asconf
8272 * flag as it is used to do
8273 * appropriate source
8274 * address selection.
8278 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8281 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8282 (struct sockaddr *)&net->ro._l_addr,
8285 stcb->asoc.authinfo.active_keyid,
8286 no_fragmentflg, 0, asconf,
8287 inp->sctp_lport, stcb->rport,
8288 htonl(stcb->asoc.peer_vtag),
8292 if (error == ENOBUFS) {
8293 asoc->ifp_had_enobuf = 1;
8294 SCTP_STAT_INCR(sctps_lowlevelerr);
8296 if (from_where == 0) {
8297 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8299 /* error, could not output */
8301 if (*now_filled == 0) {
8302 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8304 *now = net->last_sent_time;
8306 net->last_sent_time = *now;
8310 if (error == EHOSTUNREACH) {
8316 sctp_move_chunks_from_net(stcb, net);
8321 asoc->ifp_had_enobuf = 0;
8322 /* Only HB or ASCONF advances time */
8324 if (*now_filled == 0) {
8325 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8327 *now = net->last_sent_time;
8329 net->last_sent_time = *now;
8334 * increase the number we sent, if a
8335 * cookie is sent we don't tell them
8338 outchain = endoutchain = NULL;
8342 *num_out += ctl_cnt;
8343 /* recalc a clean slate and setup */
8344 switch (net->ro._l_addr.sa.sa_family) {
8347 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8352 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8365 /* JRI: if dest is in PF state, do not send data to it */
8366 if ((asoc->sctp_cmt_on_off > 0) &&
8367 (net != stcb->asoc.alternate) &&
8368 (net->dest_state & SCTP_ADDR_PF)) {
8371 if (net->flight_size >= net->cwnd) {
8374 if ((asoc->sctp_cmt_on_off > 0) &&
8375 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8376 (net->flight_size > max_rwnd_per_dest)) {
8380 * We need a specific accounting for the usage of the send
8381 * buffer. We also need to check the number of messages per
8382 * net. For now, this is better than nothing and it disabled
8385 if ((asoc->sctp_cmt_on_off > 0) &&
8386 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8387 (max_send_per_dest > 0) &&
8388 (net->flight_size > max_send_per_dest)) {
8391 /*********************/
8392 /* Data transmission */
8393 /*********************/
8395 * if AUTH for DATA is required and no AUTH has been added
8396 * yet, account for this in the mtu now... if no data can be
8397 * bundled, this adjustment won't matter anyways since the
8398 * packet will be going out...
8400 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8401 stcb->asoc.peer_auth_chunks);
8402 if (data_auth_reqd && (auth == NULL)) {
8403 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8405 /* now lets add any data within the MTU constraints */
8406 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8409 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8410 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8417 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8418 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8428 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8429 (skip_data_for_this_net == 0)) ||
8431 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8432 if (no_data_chunks) {
8433 /* let only control go out */
8437 if (net->flight_size >= net->cwnd) {
8438 /* skip this net, no room for data */
8442 if ((chk->whoTo != NULL) &&
8443 (chk->whoTo != net)) {
8444 /* Don't send the chunk on this net */
8447 if (asoc->sctp_cmt_on_off == 0) {
8448 if ((asoc->alternate) &&
8449 (asoc->alternate != net) &&
8450 (chk->whoTo == NULL)) {
8452 } else if ((net != asoc->primary_destination) &&
8453 (asoc->alternate == NULL) &&
8454 (chk->whoTo == NULL)) {
8458 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8460 * strange, we have a chunk that is
8461 * to big for its destination and
8462 * yet no fragment ok flag.
8463 * Something went wrong when the
8464 * PMTU changed...we did not mark
8465 * this chunk for some reason?? I
8466 * will fix it here by letting IP
8467 * fragment it for now and printing
8468 * a warning. This really should not
8471 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8472 chk->send_size, mtu);
8473 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8475 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8476 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8477 struct sctp_data_chunk *dchkh;
8479 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8480 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8482 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8483 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8484 /* ok we will add this one */
8487 * Add an AUTH chunk, if chunk
8488 * requires it, save the offset into
8489 * the chain for AUTH
8491 if (data_auth_reqd) {
8493 outchain = sctp_add_auth_chunk(outchain,
8499 auth_keyid = chk->auth_keyid;
8501 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8502 } else if (override_ok) {
8507 auth_keyid = chk->auth_keyid;
8509 } else if (auth_keyid != chk->auth_keyid) {
8517 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8518 chk->send_size, chk->copy_by_ref);
8519 if (outchain == NULL) {
8520 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8521 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8522 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8525 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8528 /* upate our MTU size */
8529 /* Do clear IP_DF ? */
8530 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8533 /* unsigned subtraction of mtu */
8534 if (mtu > chk->send_size)
8535 mtu -= chk->send_size;
8538 /* unsigned subtraction of r_mtu */
8539 if (r_mtu > chk->send_size)
8540 r_mtu -= chk->send_size;
8544 to_out += chk->send_size;
8545 if ((to_out > mx_mtu) && no_fragmentflg) {
8547 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8549 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8553 chk->window_probe = 0;
8554 data_list[bundle_at++] = chk;
8555 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8558 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8559 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8560 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8562 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8564 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8565 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8575 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8577 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8578 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8579 data_list[0]->window_probe = 1;
8580 net->window_probe = 1;
8586 * Must be sent in order of the
8587 * TSN's (on a network)
8591 } /* for (chunk gather loop for this net) */
8592 } /* if asoc.state OPEN */
8594 /* Is there something to send for this destination? */
8596 /* We may need to start a control timer or two */
8598 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8601 * do NOT clear the asconf flag as it is
8602 * used to do appropriate source address
8607 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8610 /* must start a send timer if data is being sent */
8611 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8613 * no timer running on this destination
8616 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8618 /* Now send it, if there is anything to send :> */
8619 if ((error = sctp_lowlevel_chunk_output(inp,
8622 (struct sockaddr *)&net->ro._l_addr,
8630 inp->sctp_lport, stcb->rport,
8631 htonl(stcb->asoc.peer_vtag),
8635 /* error, we could not output */
8636 if (error == ENOBUFS) {
8637 SCTP_STAT_INCR(sctps_lowlevelerr);
8638 asoc->ifp_had_enobuf = 1;
8640 if (from_where == 0) {
8641 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8643 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8645 if (*now_filled == 0) {
8646 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8648 *now = net->last_sent_time;
8650 net->last_sent_time = *now;
8654 if (error == EHOSTUNREACH) {
8656 * Destination went unreachable
8659 sctp_move_chunks_from_net(stcb, net);
8663 * I add this line to be paranoid. As far as
8664 * I can tell the continue, takes us back to
8665 * the top of the for, but just to make sure
8666 * I will reset these again here.
8668 ctl_cnt = bundle_at = 0;
8669 continue; /* This takes us back to the
8670 * for() for the nets. */
8672 asoc->ifp_had_enobuf = 0;
8677 if (bundle_at || hbflag) {
8678 /* For data/asconf and hb set time */
8679 if (*now_filled == 0) {
8680 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8682 *now = net->last_sent_time;
8684 net->last_sent_time = *now;
8688 *num_out += (ctl_cnt + bundle_at);
8691 /* setup for a RTO measurement */
8692 tsns_sent = data_list[0]->rec.data.TSN_seq;
8693 /* fill time if not already filled */
8694 if (*now_filled == 0) {
8695 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8697 *now = asoc->time_last_sent;
8699 asoc->time_last_sent = *now;
8701 if (net->rto_needed) {
8702 data_list[0]->do_rtt = 1;
8703 net->rto_needed = 0;
8705 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8706 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8713 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8716 if (old_start_at == NULL) {
8717 old_start_at = start_at;
8718 start_at = TAILQ_FIRST(&asoc->nets);
8720 goto again_one_more_time;
8723 * At the end there should be no NON timed chunks hanging on this
8726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8727 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8729 if ((*num_out == 0) && (*reason_code == 0)) {
8734 sctp_clean_up_ctl(stcb, asoc, so_locked);
8739 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8742 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8743 * the control chunk queue.
8745 struct sctp_chunkhdr *hdr;
8746 struct sctp_tmit_chunk *chk;
8749 SCTP_TCB_LOCK_ASSERT(stcb);
8750 sctp_alloc_a_chunk(stcb, chk);
8753 sctp_m_freem(op_err);
8756 chk->copy_by_ref = 0;
8757 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8758 if (op_err == NULL) {
8759 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8764 while (mat != NULL) {
8765 chk->send_size += SCTP_BUF_LEN(mat);
8766 mat = SCTP_BUF_NEXT(mat);
8768 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8769 chk->rec.chunk_id.can_take_data = 1;
8770 chk->sent = SCTP_DATAGRAM_UNSENT;
8773 chk->asoc = &stcb->asoc;
8776 hdr = mtod(op_err, struct sctp_chunkhdr *);
8777 hdr->chunk_type = SCTP_OPERATION_ERROR;
8778 hdr->chunk_flags = 0;
8779 hdr->chunk_length = htons(chk->send_size);
8780 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8783 chk->asoc->ctrl_queue_cnt++;
8787 sctp_send_cookie_echo(struct mbuf *m,
8789 struct sctp_tcb *stcb,
8790 struct sctp_nets *net)
8793 * pull out the cookie and put it at the front of the control chunk
8797 struct mbuf *cookie;
8798 struct sctp_paramhdr parm, *phdr;
8799 struct sctp_chunkhdr *hdr;
8800 struct sctp_tmit_chunk *chk;
8801 uint16_t ptype, plen;
8803 /* First find the cookie in the param area */
8805 at = offset + sizeof(struct sctp_init_chunk);
8807 SCTP_TCB_LOCK_ASSERT(stcb);
8809 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8813 ptype = ntohs(phdr->param_type);
8814 plen = ntohs(phdr->param_length);
8815 if (ptype == SCTP_STATE_COOKIE) {
8818 /* found the cookie */
8819 if ((pad = (plen % 4))) {
8822 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
8823 if (cookie == NULL) {
8827 #ifdef SCTP_MBUF_LOGGING
8828 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8831 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
8832 if (SCTP_BUF_IS_EXTENDED(mat)) {
8833 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8840 at += SCTP_SIZE32(plen);
8842 if (cookie == NULL) {
8843 /* Did not find the cookie */
8846 /* ok, we got the cookie lets change it into a cookie echo chunk */
8848 /* first the change from param to cookie */
8849 hdr = mtod(cookie, struct sctp_chunkhdr *);
8850 hdr->chunk_type = SCTP_COOKIE_ECHO;
8851 hdr->chunk_flags = 0;
8852 /* get the chunk stuff now and place it in the FRONT of the queue */
8853 sctp_alloc_a_chunk(stcb, chk);
8856 sctp_m_freem(cookie);
8859 chk->copy_by_ref = 0;
8860 chk->send_size = plen;
8861 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8862 chk->rec.chunk_id.can_take_data = 0;
8863 chk->sent = SCTP_DATAGRAM_UNSENT;
8865 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8866 chk->asoc = &stcb->asoc;
8869 atomic_add_int(&chk->whoTo->ref_count, 1);
8870 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8871 chk->asoc->ctrl_queue_cnt++;
8876 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8880 struct sctp_nets *net)
8883 * take a HB request and make it into a HB ack and send it.
8885 struct mbuf *outchain;
8886 struct sctp_chunkhdr *chdr;
8887 struct sctp_tmit_chunk *chk;
8891 /* must have a net pointer */
8894 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
8895 if (outchain == NULL) {
8896 /* gak out of memory */
8899 #ifdef SCTP_MBUF_LOGGING
8900 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8903 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
8904 if (SCTP_BUF_IS_EXTENDED(mat)) {
8905 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8910 chdr = mtod(outchain, struct sctp_chunkhdr *);
8911 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8912 chdr->chunk_flags = 0;
8913 if (chk_length % 4) {
8915 uint32_t cpthis = 0;
8918 padlen = 4 - (chk_length % 4);
8919 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8921 sctp_alloc_a_chunk(stcb, chk);
8924 sctp_m_freem(outchain);
8927 chk->copy_by_ref = 0;
8928 chk->send_size = chk_length;
8929 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8930 chk->rec.chunk_id.can_take_data = 1;
8931 chk->sent = SCTP_DATAGRAM_UNSENT;
8934 chk->asoc = &stcb->asoc;
8935 chk->data = outchain;
8937 atomic_add_int(&chk->whoTo->ref_count, 1);
8938 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8939 chk->asoc->ctrl_queue_cnt++;
8943 sctp_send_cookie_ack(struct sctp_tcb *stcb)
8945 /* formulate and queue a cookie-ack back to sender */
8946 struct mbuf *cookie_ack;
8947 struct sctp_chunkhdr *hdr;
8948 struct sctp_tmit_chunk *chk;
8951 SCTP_TCB_LOCK_ASSERT(stcb);
8953 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
8954 if (cookie_ack == NULL) {
8958 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
8959 sctp_alloc_a_chunk(stcb, chk);
8962 sctp_m_freem(cookie_ack);
8965 chk->copy_by_ref = 0;
8966 chk->send_size = sizeof(struct sctp_chunkhdr);
8967 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
8968 chk->rec.chunk_id.can_take_data = 1;
8969 chk->sent = SCTP_DATAGRAM_UNSENT;
8972 chk->asoc = &stcb->asoc;
8973 chk->data = cookie_ack;
8974 if (chk->asoc->last_control_chunk_from != NULL) {
8975 chk->whoTo = chk->asoc->last_control_chunk_from;
8976 atomic_add_int(&chk->whoTo->ref_count, 1);
8980 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
8981 hdr->chunk_type = SCTP_COOKIE_ACK;
8982 hdr->chunk_flags = 0;
8983 hdr->chunk_length = htons(chk->send_size);
8984 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
8985 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8986 chk->asoc->ctrl_queue_cnt++;
8992 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
8994 /* formulate and queue a SHUTDOWN-ACK back to the sender */
8995 struct mbuf *m_shutdown_ack;
8996 struct sctp_shutdown_ack_chunk *ack_cp;
8997 struct sctp_tmit_chunk *chk;
8999 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9000 if (m_shutdown_ack == NULL) {
9004 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9005 sctp_alloc_a_chunk(stcb, chk);
9008 sctp_m_freem(m_shutdown_ack);
9011 chk->copy_by_ref = 0;
9012 chk->send_size = sizeof(struct sctp_chunkhdr);
9013 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9014 chk->rec.chunk_id.can_take_data = 1;
9015 chk->sent = SCTP_DATAGRAM_UNSENT;
9018 chk->asoc = &stcb->asoc;
9019 chk->data = m_shutdown_ack;
9022 atomic_add_int(&chk->whoTo->ref_count, 1);
9024 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9025 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9026 ack_cp->ch.chunk_flags = 0;
9027 ack_cp->ch.chunk_length = htons(chk->send_size);
9028 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9029 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9030 chk->asoc->ctrl_queue_cnt++;
9035 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9037 /* formulate and queue a SHUTDOWN to the sender */
9038 struct mbuf *m_shutdown;
9039 struct sctp_shutdown_chunk *shutdown_cp;
9040 struct sctp_tmit_chunk *chk;
9042 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9043 if (m_shutdown == NULL) {
9047 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9048 sctp_alloc_a_chunk(stcb, chk);
9051 sctp_m_freem(m_shutdown);
9054 chk->copy_by_ref = 0;
9055 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9056 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9057 chk->rec.chunk_id.can_take_data = 1;
9058 chk->sent = SCTP_DATAGRAM_UNSENT;
9061 chk->asoc = &stcb->asoc;
9062 chk->data = m_shutdown;
9065 atomic_add_int(&chk->whoTo->ref_count, 1);
9067 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9068 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9069 shutdown_cp->ch.chunk_flags = 0;
9070 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9071 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9072 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9073 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9074 chk->asoc->ctrl_queue_cnt++;
9079 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9082 * formulate and queue an ASCONF to the peer. ASCONF parameters
9083 * should be queued on the assoc queue.
9085 struct sctp_tmit_chunk *chk;
9086 struct mbuf *m_asconf;
9089 SCTP_TCB_LOCK_ASSERT(stcb);
9091 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9092 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9093 /* can't send a new one if there is one in flight already */
9096 /* compose an ASCONF chunk, maximum length is PMTU */
9097 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9098 if (m_asconf == NULL) {
9101 sctp_alloc_a_chunk(stcb, chk);
9104 sctp_m_freem(m_asconf);
9107 chk->copy_by_ref = 0;
9108 chk->data = m_asconf;
9109 chk->send_size = len;
9110 chk->rec.chunk_id.id = SCTP_ASCONF;
9111 chk->rec.chunk_id.can_take_data = 0;
9112 chk->sent = SCTP_DATAGRAM_UNSENT;
9114 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9115 chk->asoc = &stcb->asoc;
9118 atomic_add_int(&chk->whoTo->ref_count, 1);
9120 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9121 chk->asoc->ctrl_queue_cnt++;
9126 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9129 * formulate and queue a asconf-ack back to sender. the asconf-ack
9130 * must be stored in the tcb.
9132 struct sctp_tmit_chunk *chk;
9133 struct sctp_asconf_ack *ack, *latest_ack;
9135 struct sctp_nets *net = NULL;
9137 SCTP_TCB_LOCK_ASSERT(stcb);
9138 /* Get the latest ASCONF-ACK */
9139 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9140 if (latest_ack == NULL) {
9143 if (latest_ack->last_sent_to != NULL &&
9144 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9145 /* we're doing a retransmission */
9146 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9149 if (stcb->asoc.last_control_chunk_from == NULL) {
9150 if (stcb->asoc.alternate) {
9151 net = stcb->asoc.alternate;
9153 net = stcb->asoc.primary_destination;
9156 net = stcb->asoc.last_control_chunk_from;
9161 if (stcb->asoc.last_control_chunk_from == NULL) {
9162 if (stcb->asoc.alternate) {
9163 net = stcb->asoc.alternate;
9165 net = stcb->asoc.primary_destination;
9168 net = stcb->asoc.last_control_chunk_from;
9171 latest_ack->last_sent_to = net;
9173 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9174 if (ack->data == NULL) {
9177 /* copy the asconf_ack */
9178 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9179 if (m_ack == NULL) {
9180 /* couldn't copy it */
9183 #ifdef SCTP_MBUF_LOGGING
9184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9187 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9188 if (SCTP_BUF_IS_EXTENDED(mat)) {
9189 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9195 sctp_alloc_a_chunk(stcb, chk);
9199 sctp_m_freem(m_ack);
9202 chk->copy_by_ref = 0;
9206 atomic_add_int(&chk->whoTo->ref_count, 1);
9211 chk->send_size = ack->len;
9212 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9213 chk->rec.chunk_id.can_take_data = 1;
9214 chk->sent = SCTP_DATAGRAM_UNSENT;
9216 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9217 chk->asoc = &stcb->asoc;
9219 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9220 chk->asoc->ctrl_queue_cnt++;
9227 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9228 struct sctp_tcb *stcb,
9229 struct sctp_association *asoc,
9230 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9231 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9237 * send out one MTU of retransmission. If fast_retransmit is
9238 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9239 * rwnd. For a Cookie or Asconf in the control chunk queue we
9240 * retransmit them by themselves.
9242 * For data chunks we will pick out the lowest TSN's in the sent_queue
9243 * marked for resend and bundle them all together (up to a MTU of
9244 * destination). The address to send to should have been
9245 * selected/changed where the retransmission was marked (i.e. in FR
9246 * or t3-timeout routines).
9248 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9249 struct sctp_tmit_chunk *chk, *fwd;
9250 struct mbuf *m, *endofchain;
9251 struct sctp_nets *net = NULL;
9252 uint32_t tsns_sent = 0;
9253 int no_fragmentflg, bundle_at, cnt_thru;
9255 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9256 struct sctp_auth_chunk *auth = NULL;
9257 uint32_t auth_offset = 0;
9258 uint16_t auth_keyid;
9259 int override_ok = 1;
9260 int data_auth_reqd = 0;
9263 SCTP_TCB_LOCK_ASSERT(stcb);
9264 tmr_started = ctl_cnt = bundle_at = error = 0;
9269 endofchain = m = NULL;
9270 auth_keyid = stcb->asoc.authinfo.active_keyid;
9271 #ifdef SCTP_AUDITING_ENABLED
9272 sctp_audit_log(0xC3, 1);
9274 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9275 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9276 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9277 asoc->sent_queue_retran_cnt);
9278 asoc->sent_queue_cnt = 0;
9279 asoc->sent_queue_cnt_removeable = 0;
9280 /* send back 0/0 so we enter normal transmission */
9284 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9285 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9286 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9287 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9288 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9291 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9292 if (chk != asoc->str_reset) {
9294 * not eligible for retran if its
9301 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9305 * Add an AUTH chunk, if chunk requires it save the
9306 * offset into the chain for AUTH
9308 if ((auth == NULL) &&
9309 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9310 stcb->asoc.peer_auth_chunks))) {
9311 m = sctp_add_auth_chunk(m, &endofchain,
9312 &auth, &auth_offset,
9314 chk->rec.chunk_id.id);
9315 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9317 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9323 /* do we have control chunks to retransmit? */
9325 /* Start a timer no matter if we suceed or fail */
9326 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9327 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9328 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9329 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9330 chk->snd_count++; /* update our count */
9331 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9332 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9333 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9334 no_fragmentflg, 0, 0,
9335 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9336 chk->whoTo->port, NULL,
9339 SCTP_STAT_INCR(sctps_lowlevelerr);
9346 * We don't want to mark the net->sent time here since this
9347 * we use this for HB and retrans cannot measure RTT
9349 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9351 chk->sent = SCTP_DATAGRAM_SENT;
9352 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9356 /* Clean up the fwd-tsn list */
9357 sctp_clean_up_ctl(stcb, asoc, so_locked);
9362 * Ok, it is just data retransmission we need to do or that and a
9363 * fwd-tsn with it all.
9365 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9366 return (SCTP_RETRAN_DONE);
9368 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9369 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9370 /* not yet open, resend the cookie and that is it */
9373 #ifdef SCTP_AUDITING_ENABLED
9374 sctp_auditing(20, inp, stcb, NULL);
9376 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9377 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9378 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9379 /* No, not sent to this net or not ready for rtx */
9382 if (chk->data == NULL) {
9383 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9384 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9387 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9388 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9389 /* Gak, we have exceeded max unlucky retran, abort! */
9390 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9392 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9393 atomic_add_int(&stcb->asoc.refcnt, 1);
9394 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9395 SCTP_TCB_LOCK(stcb);
9396 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9397 return (SCTP_RETRAN_EXIT);
9399 /* pick up the net */
9401 switch (net->ro._l_addr.sa.sa_family) {
9404 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9409 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9418 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9419 /* No room in peers rwnd */
9422 tsn = asoc->last_acked_seq + 1;
9423 if (tsn == chk->rec.data.TSN_seq) {
9425 * we make a special exception for this
9426 * case. The peer has no rwnd but is missing
9427 * the lowest chunk.. which is probably what
9428 * is holding up the rwnd.
9430 goto one_chunk_around;
9435 if (asoc->peers_rwnd < mtu) {
9437 if ((asoc->peers_rwnd == 0) &&
9438 (asoc->total_flight == 0)) {
9439 chk->window_probe = 1;
9440 chk->whoTo->window_probe = 1;
9443 #ifdef SCTP_AUDITING_ENABLED
9444 sctp_audit_log(0xC3, 2);
9448 net->fast_retran_ip = 0;
9449 if (chk->rec.data.doing_fast_retransmit == 0) {
9451 * if no FR in progress skip destination that have
9452 * flight_size > cwnd.
9454 if (net->flight_size >= net->cwnd) {
9459 * Mark the destination net to have FR recovery
9463 net->fast_retran_ip = 1;
9467 * if no AUTH is yet included and this chunk requires it,
9468 * make sure to account for it. We don't apply the size
9469 * until the AUTH chunk is actually added below in case
9470 * there is no room for this chunk.
9472 if (data_auth_reqd && (auth == NULL)) {
9473 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9477 if ((chk->send_size <= (mtu - dmtu)) ||
9478 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9479 /* ok we will add this one */
9480 if (data_auth_reqd) {
9482 m = sctp_add_auth_chunk(m,
9488 auth_keyid = chk->auth_keyid;
9490 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9491 } else if (override_ok) {
9492 auth_keyid = chk->auth_keyid;
9494 } else if (chk->auth_keyid != auth_keyid) {
9495 /* different keyid, so done bundling */
9499 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9501 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9504 /* Do clear IP_DF ? */
9505 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9508 /* upate our MTU size */
9509 if (mtu > (chk->send_size + dmtu))
9510 mtu -= (chk->send_size + dmtu);
9513 data_list[bundle_at++] = chk;
9514 if (one_chunk && (asoc->total_flight <= 0)) {
9515 SCTP_STAT_INCR(sctps_windowprobed);
9518 if (one_chunk == 0) {
9520 * now are there anymore forward from chk to pick
9523 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9524 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9525 /* Nope, not for retran */
9528 if (fwd->whoTo != net) {
9529 /* Nope, not the net in question */
9532 if (data_auth_reqd && (auth == NULL)) {
9533 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9536 if (fwd->send_size <= (mtu - dmtu)) {
9537 if (data_auth_reqd) {
9539 m = sctp_add_auth_chunk(m,
9545 auth_keyid = fwd->auth_keyid;
9547 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9548 } else if (override_ok) {
9549 auth_keyid = fwd->auth_keyid;
9551 } else if (fwd->auth_keyid != auth_keyid) {
9559 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9561 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9564 /* Do clear IP_DF ? */
9565 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9568 /* upate our MTU size */
9569 if (mtu > (fwd->send_size + dmtu))
9570 mtu -= (fwd->send_size + dmtu);
9573 data_list[bundle_at++] = fwd;
9574 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9578 /* can't fit so we are done */
9583 /* Is there something to send for this destination? */
9586 * No matter if we fail/or suceed we should start a
9587 * timer. A failure is like a lost IP packet :-)
9589 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9591 * no timer running on this destination
9594 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9597 /* Now lets send it, if there is anything to send :> */
9598 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9599 (struct sockaddr *)&net->ro._l_addr, m,
9600 auth_offset, auth, auth_keyid,
9601 no_fragmentflg, 0, 0,
9602 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9606 /* error, we could not output */
9607 SCTP_STAT_INCR(sctps_lowlevelerr);
9615 * We don't want to mark the net->sent time here
9616 * since this we use this for HB and retrans cannot
9619 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9621 /* For auto-close */
9623 if (*now_filled == 0) {
9624 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9625 *now = asoc->time_last_sent;
9628 asoc->time_last_sent = *now;
9630 *cnt_out += bundle_at;
9631 #ifdef SCTP_AUDITING_ENABLED
9632 sctp_audit_log(0xC4, bundle_at);
9635 tsns_sent = data_list[0]->rec.data.TSN_seq;
9637 for (i = 0; i < bundle_at; i++) {
9638 SCTP_STAT_INCR(sctps_sendretransdata);
9639 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9641 * When we have a revoked data, and we
9642 * retransmit it, then we clear the revoked
9643 * flag since this flag dictates if we
9644 * subtracted from the fs
9646 if (data_list[i]->rec.data.chunk_was_revoked) {
9647 /* Deflate the cwnd */
9648 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9649 data_list[i]->rec.data.chunk_was_revoked = 0;
9651 data_list[i]->snd_count++;
9652 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9653 /* record the time */
9654 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9655 if (data_list[i]->book_size_scale) {
9657 * need to double the book size on
9660 data_list[i]->book_size_scale = 0;
9662 * Since we double the booksize, we
9663 * must also double the output queue
9664 * size, since this get shrunk when
9665 * we free by this amount.
9667 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9668 data_list[i]->book_size *= 2;
9672 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9673 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9674 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9676 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9677 (uint32_t) (data_list[i]->send_size +
9678 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9680 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9681 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9682 data_list[i]->whoTo->flight_size,
9683 data_list[i]->book_size,
9684 (uintptr_t) data_list[i]->whoTo,
9685 data_list[i]->rec.data.TSN_seq);
9687 sctp_flight_size_increase(data_list[i]);
9688 sctp_total_flight_increase(stcb, data_list[i]);
9689 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9690 /* SWS sender side engages */
9691 asoc->peers_rwnd = 0;
9694 (data_list[i]->rec.data.doing_fast_retransmit)) {
9695 SCTP_STAT_INCR(sctps_sendfastretrans);
9696 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9697 (tmr_started == 0)) {
9699 * ok we just fast-retrans'd
9700 * the lowest TSN, i.e the
9701 * first on the list. In
9702 * this case we want to give
9703 * some more time to get a
9704 * SACK back without a
9707 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9708 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9709 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9714 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9716 #ifdef SCTP_AUDITING_ENABLED
9717 sctp_auditing(21, inp, stcb, NULL);
9723 if (asoc->sent_queue_retran_cnt <= 0) {
9724 /* all done we have no more to retran */
9725 asoc->sent_queue_retran_cnt = 0;
9729 /* No more room in rwnd */
9732 /* stop the for loop here. we sent out a packet */
9739 sctp_timer_validation(struct sctp_inpcb *inp,
9740 struct sctp_tcb *stcb,
9741 struct sctp_association *asoc)
9743 struct sctp_nets *net;
9745 /* Validate that a timer is running somewhere */
9746 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9747 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9748 /* Here is a timer */
9752 SCTP_TCB_LOCK_ASSERT(stcb);
9753 /* Gak, we did not have a timer somewhere */
9754 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9755 if (asoc->alternate) {
9756 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9758 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9764 sctp_chunk_output(struct sctp_inpcb *inp,
9765 struct sctp_tcb *stcb,
9768 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9774 * Ok this is the generic chunk service queue. we must do the
9776 * - See if there are retransmits pending, if so we must
9778 * - Service the stream queue that is next, moving any
9779 * message (note I must get a complete message i.e.
9780 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9782 * - Check to see if the cwnd/rwnd allows any output, if so we
9783 * go ahead and fomulate and send the low level chunks. Making sure
9784 * to combine any control in the control chunk queue also.
9786 struct sctp_association *asoc;
9787 struct sctp_nets *net;
9788 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9789 unsigned int burst_cnt = 0;
9793 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9796 unsigned int tot_frs = 0;
9799 /* The Nagle algorithm is only applied when handling a send call. */
9800 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9801 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9809 SCTP_TCB_LOCK_ASSERT(stcb);
9811 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9813 if ((un_sent <= 0) &&
9814 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9815 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9816 (asoc->sent_queue_retran_cnt == 0)) {
9817 /* Nothing to do unless there is something to be sent left */
9821 * Do we have something to send, data or control AND a sack timer
9822 * running, if so piggy-back the sack.
9824 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9825 sctp_send_sack(stcb, so_locked);
9826 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9828 while (asoc->sent_queue_retran_cnt) {
9830 * Ok, it is retransmission time only, we send out only ONE
9831 * packet with a single call off to the retran code.
9833 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9835 * Special hook for handling cookiess discarded
9836 * by peer that carried data. Send cookie-ack only
9837 * and then the next call with get the retran's.
9839 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9841 &now, &now_filled, frag_point, so_locked);
9843 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9844 /* if its not from a HB then do it */
9846 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9852 * its from any other place, we don't allow retran
9853 * output (only control)
9858 /* Can't send anymore */
9860 * now lets push out control by calling med-level
9861 * output once. this assures that we WILL send HB's
9864 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9866 &now, &now_filled, frag_point, so_locked);
9867 #ifdef SCTP_AUDITING_ENABLED
9868 sctp_auditing(8, inp, stcb, NULL);
9870 sctp_timer_validation(inp, stcb, asoc);
9875 * The count was off.. retran is not happening so do
9876 * the normal retransmission.
9878 #ifdef SCTP_AUDITING_ENABLED
9879 sctp_auditing(9, inp, stcb, NULL);
9881 if (ret == SCTP_RETRAN_EXIT) {
9886 if (from_where == SCTP_OUTPUT_FROM_T3) {
9887 /* Only one transmission allowed out of a timeout */
9888 #ifdef SCTP_AUDITING_ENABLED
9889 sctp_auditing(10, inp, stcb, NULL);
9891 /* Push out any control */
9892 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9893 &now, &now_filled, frag_point, so_locked);
9896 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
9897 /* Hit FR burst limit */
9900 if ((num_out == 0) && (ret == 0)) {
9901 /* No more retrans to send */
9905 #ifdef SCTP_AUDITING_ENABLED
9906 sctp_auditing(12, inp, stcb, NULL);
9908 /* Check for bad destinations, if they exist move chunks around. */
9909 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9910 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
9912 * if possible move things off of this address we
9913 * still may send below due to the dormant state but
9914 * we try to find an alternate address to send to
9915 * and if we have one we move all queued data on the
9916 * out wheel to this alternate address.
9918 if (net->ref_count > 1)
9919 sctp_move_chunks_from_net(stcb, net);
9922 * if ((asoc->sat_network) || (net->addr_is_local))
9923 * { burst_limit = asoc->max_burst *
9924 * SCTP_SAT_NETWORK_BURST_INCR; }
9926 if (asoc->max_burst > 0) {
9927 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9928 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
9930 * JRS - Use the congestion
9931 * control given in the
9932 * congestion control module
9934 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
9935 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9936 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
9938 SCTP_STAT_INCR(sctps_maxburstqueued);
9940 net->fast_retran_ip = 0;
9942 if (net->flight_size == 0) {
9944 * Should be decaying the
9956 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
9957 &reason_code, 0, from_where,
9958 &now, &now_filled, frag_point, so_locked);
9960 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
9961 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9962 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
9964 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9965 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
9966 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
9970 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
9974 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9975 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
9977 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
9982 * When the Nagle algorithm is used, look at how
9983 * much is unsent, then if its smaller than an MTU
9984 * and we have data in flight we stop, except if we
9985 * are handling a fragmented user message.
9987 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9988 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
9989 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
9990 (stcb->asoc.total_flight > 0) &&
9991 ((stcb->asoc.locked_on_sending == NULL) ||
9992 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
9996 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
9997 TAILQ_EMPTY(&asoc->send_queue) &&
9998 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
9999 /* Nothing left to send */
10002 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10003 /* Nothing left to send */
10006 } while (num_out &&
10007 ((asoc->max_burst == 0) ||
10008 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10009 (burst_cnt < asoc->max_burst)));
10011 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10012 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10013 SCTP_STAT_INCR(sctps_maxburstqueued);
10014 asoc->burst_limit_applied = 1;
10015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10016 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10019 asoc->burst_limit_applied = 0;
10022 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10023 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10025 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10029 * Now we need to clean up the control chunk chain if a ECNE is on
10030 * it. It must be marked as UNSENT again so next call will continue
10031 * to send it until such time that we get a CWR, to remove it.
10033 if (stcb->asoc.ecn_echo_cnt_onq)
10034 sctp_fix_ecn_echo(asoc);
10041 struct sctp_inpcb *inp,
10043 struct sockaddr *addr,
10044 struct mbuf *control,
10049 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10052 if (inp->sctp_socket == NULL) {
10053 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10056 return (sctp_sosend(inp->sctp_socket,
10058 (struct uio *)NULL,
10066 send_forward_tsn(struct sctp_tcb *stcb,
10067 struct sctp_association *asoc)
10069 struct sctp_tmit_chunk *chk;
10070 struct sctp_forward_tsn_chunk *fwdtsn;
10071 uint32_t advance_peer_ack_point;
10073 SCTP_TCB_LOCK_ASSERT(stcb);
10074 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10075 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10076 /* mark it to unsent */
10077 chk->sent = SCTP_DATAGRAM_UNSENT;
10078 chk->snd_count = 0;
10079 /* Do we correct its output location? */
10081 sctp_free_remote_addr(chk->whoTo);
10084 goto sctp_fill_in_rest;
10087 /* Ok if we reach here we must build one */
10088 sctp_alloc_a_chunk(stcb, chk);
10092 asoc->fwd_tsn_cnt++;
10093 chk->copy_by_ref = 0;
10094 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10095 chk->rec.chunk_id.can_take_data = 0;
10098 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10099 if (chk->data == NULL) {
10100 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10103 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10104 chk->sent = SCTP_DATAGRAM_UNSENT;
10105 chk->snd_count = 0;
10106 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10107 asoc->ctrl_queue_cnt++;
10110 * Here we go through and fill out the part that deals with
10111 * stream/seq of the ones we skip.
10113 SCTP_BUF_LEN(chk->data) = 0;
10115 struct sctp_tmit_chunk *at, *tp1, *last;
10116 struct sctp_strseq *strseq;
10117 unsigned int cnt_of_space, i, ovh;
10118 unsigned int space_needed;
10119 unsigned int cnt_of_skipped = 0;
10121 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10122 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10123 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10124 /* no more to look at */
10127 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10128 /* We don't report these */
10133 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10134 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10136 cnt_of_space = M_TRAILINGSPACE(chk->data);
10138 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10139 ovh = SCTP_MIN_OVERHEAD;
10141 ovh = SCTP_MIN_V4_OVERHEAD;
10143 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10144 /* trim to a mtu size */
10145 cnt_of_space = asoc->smallest_mtu - ovh;
10147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10148 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10149 0xff, 0, cnt_of_skipped,
10150 asoc->advanced_peer_ack_point);
10153 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10154 if (cnt_of_space < space_needed) {
10156 * ok we must trim down the chunk by lowering the
10157 * advance peer ack point.
10159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10160 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10161 0xff, 0xff, cnt_of_space,
10164 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10165 cnt_of_skipped /= sizeof(struct sctp_strseq);
10167 * Go through and find the TSN that will be the one
10170 at = TAILQ_FIRST(&asoc->sent_queue);
10172 for (i = 0; i < cnt_of_skipped; i++) {
10173 tp1 = TAILQ_NEXT(at, sctp_next);
10180 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10181 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10182 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10183 asoc->advanced_peer_ack_point);
10187 * last now points to last one I can report, update
10191 advance_peer_ack_point = last->rec.data.TSN_seq;
10192 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10193 cnt_of_skipped * sizeof(struct sctp_strseq);
10195 chk->send_size = space_needed;
10196 /* Setup the chunk */
10197 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10198 fwdtsn->ch.chunk_length = htons(chk->send_size);
10199 fwdtsn->ch.chunk_flags = 0;
10200 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10201 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10202 SCTP_BUF_LEN(chk->data) = chk->send_size;
10205 * Move pointer to after the fwdtsn and transfer to the
10208 strseq = (struct sctp_strseq *)fwdtsn;
10210 * Now populate the strseq list. This is done blindly
10211 * without pulling out duplicate stream info. This is
10212 * inefficent but won't harm the process since the peer will
10213 * look at these in sequence and will thus release anything.
10214 * It could mean we exceed the PMTU and chop off some that
10215 * we could have included.. but this is unlikely (aka 1432/4
10216 * would mean 300+ stream seq's would have to be reported in
10217 * one FWD-TSN. With a bit of work we can later FIX this to
10218 * optimize and pull out duplcates.. but it does add more
10219 * overhead. So for now... not!
10221 at = TAILQ_FIRST(&asoc->sent_queue);
10222 for (i = 0; i < cnt_of_skipped; i++) {
10223 tp1 = TAILQ_NEXT(at, sctp_next);
10226 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10227 /* We don't report these */
10232 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10233 at->rec.data.fwd_tsn_cnt = 0;
10235 strseq->stream = ntohs(at->rec.data.stream_number);
10236 strseq->sequence = ntohs(at->rec.data.stream_seq);
10245 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10246 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10252 * Queue up a SACK or NR-SACK in the control queue.
10253 * We must first check to see if a SACK or NR-SACK is
10254 * somehow on the control queue.
10255 * If so, we will take and and remove the old one.
10257 struct sctp_association *asoc;
10258 struct sctp_tmit_chunk *chk, *a_chk;
10259 struct sctp_sack_chunk *sack;
10260 struct sctp_nr_sack_chunk *nr_sack;
10261 struct sctp_gap_ack_block *gap_descriptor;
10262 struct sack_track *selector;
10267 int limit_reached = 0;
10268 unsigned int i, siz, j;
10269 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10272 uint32_t highest_tsn;
10277 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10278 (stcb->asoc.peer_supports_nr_sack == 1)) {
10279 type = SCTP_NR_SELECTIVE_ACK;
10281 type = SCTP_SELECTIVE_ACK;
10284 asoc = &stcb->asoc;
10285 SCTP_TCB_LOCK_ASSERT(stcb);
10286 if (asoc->last_data_chunk_from == NULL) {
10287 /* Hmm we never received anything */
10290 sctp_slide_mapping_arrays(stcb);
10291 sctp_set_rwnd(stcb, asoc);
10292 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10293 if (chk->rec.chunk_id.id == type) {
10294 /* Hmm, found a sack already on queue, remove it */
10295 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10296 asoc->ctrl_queue_cnt--;
10299 sctp_m_freem(a_chk->data);
10300 a_chk->data = NULL;
10302 if (a_chk->whoTo) {
10303 sctp_free_remote_addr(a_chk->whoTo);
10304 a_chk->whoTo = NULL;
10309 if (a_chk == NULL) {
10310 sctp_alloc_a_chunk(stcb, a_chk);
10311 if (a_chk == NULL) {
10312 /* No memory so we drop the idea, and set a timer */
10313 if (stcb->asoc.delayed_ack) {
10314 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10315 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10316 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10317 stcb->sctp_ep, stcb, NULL);
10319 stcb->asoc.send_sack = 1;
10323 a_chk->copy_by_ref = 0;
10324 a_chk->rec.chunk_id.id = type;
10325 a_chk->rec.chunk_id.can_take_data = 1;
10327 /* Clear our pkt counts */
10328 asoc->data_pkts_seen = 0;
10330 a_chk->asoc = asoc;
10331 a_chk->snd_count = 0;
10332 a_chk->send_size = 0; /* fill in later */
10333 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10334 a_chk->whoTo = NULL;
10336 if ((asoc->numduptsns) ||
10337 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10339 * Ok, we have some duplicates or the destination for the
10340 * sack is unreachable, lets see if we can select an
10341 * alternate than asoc->last_data_chunk_from
10343 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10344 (asoc->used_alt_onsack > asoc->numnets)) {
10345 /* We used an alt last time, don't this time */
10346 a_chk->whoTo = NULL;
10348 asoc->used_alt_onsack++;
10349 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10351 if (a_chk->whoTo == NULL) {
10352 /* Nope, no alternate */
10353 a_chk->whoTo = asoc->last_data_chunk_from;
10354 asoc->used_alt_onsack = 0;
10358 * No duplicates so we use the last place we received data
10361 asoc->used_alt_onsack = 0;
10362 a_chk->whoTo = asoc->last_data_chunk_from;
10364 if (a_chk->whoTo) {
10365 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10367 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10368 highest_tsn = asoc->highest_tsn_inside_map;
10370 highest_tsn = asoc->highest_tsn_inside_nr_map;
10372 if (highest_tsn == asoc->cumulative_tsn) {
10374 if (type == SCTP_SELECTIVE_ACK) {
10375 space_req = sizeof(struct sctp_sack_chunk);
10377 space_req = sizeof(struct sctp_nr_sack_chunk);
10380 /* gaps get a cluster */
10381 space_req = MCLBYTES;
10383 /* Ok now lets formulate a MBUF with our sack */
10384 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10385 if ((a_chk->data == NULL) ||
10386 (a_chk->whoTo == NULL)) {
10387 /* rats, no mbuf memory */
10389 /* was a problem with the destination */
10390 sctp_m_freem(a_chk->data);
10391 a_chk->data = NULL;
10393 sctp_free_a_chunk(stcb, a_chk, so_locked);
10394 /* sa_ignore NO_NULL_CHK */
10395 if (stcb->asoc.delayed_ack) {
10396 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10397 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10398 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10399 stcb->sctp_ep, stcb, NULL);
10401 stcb->asoc.send_sack = 1;
10405 /* ok, lets go through and fill it in */
10406 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10407 space = M_TRAILINGSPACE(a_chk->data);
10408 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10409 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10411 limit = mtod(a_chk->data, caddr_t);
10416 if ((asoc->sctp_cmt_on_off > 0) &&
10417 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10419 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10420 * received, then set high bit to 1, else 0. Reset
10423 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10424 asoc->cmt_dac_pkts_rcvd = 0;
10426 #ifdef SCTP_ASOCLOG_OF_TSNS
10427 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10428 stcb->asoc.cumack_log_atsnt++;
10429 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10430 stcb->asoc.cumack_log_atsnt = 0;
10433 /* reset the readers interpretation */
10434 stcb->freed_by_sorcv_sincelast = 0;
10436 if (type == SCTP_SELECTIVE_ACK) {
10437 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10439 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10440 if (highest_tsn > asoc->mapping_array_base_tsn) {
10441 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10443 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10447 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10448 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10449 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10450 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10452 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10456 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10459 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10461 if (((type == SCTP_SELECTIVE_ACK) &&
10462 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10463 ((type == SCTP_NR_SELECTIVE_ACK) &&
10464 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10465 /* we have a gap .. maybe */
10466 for (i = 0; i < siz; i++) {
10467 tsn_map = asoc->mapping_array[i];
10468 if (type == SCTP_SELECTIVE_ACK) {
10469 tsn_map |= asoc->nr_mapping_array[i];
10473 * Clear all bits corresponding to TSNs
10474 * smaller or equal to the cumulative TSN.
10476 tsn_map &= (~0 << (1 - offset));
10478 selector = &sack_array[tsn_map];
10479 if (mergeable && selector->right_edge) {
10481 * Backup, left and right edges were ok to
10487 if (selector->num_entries == 0)
10490 for (j = 0; j < selector->num_entries; j++) {
10491 if (mergeable && selector->right_edge) {
10493 * do a merge by NOT setting
10499 * no merge, set the left
10503 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10505 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10508 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10514 if (selector->left_edge) {
10518 if (limit_reached) {
10519 /* Reached the limit stop */
10525 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10526 (limit_reached == 0)) {
10530 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10531 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10533 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10536 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10539 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10541 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10542 /* we have a gap .. maybe */
10543 for (i = 0; i < siz; i++) {
10544 tsn_map = asoc->nr_mapping_array[i];
10547 * Clear all bits corresponding to
10548 * TSNs smaller or equal to the
10551 tsn_map &= (~0 << (1 - offset));
10553 selector = &sack_array[tsn_map];
10554 if (mergeable && selector->right_edge) {
10556 * Backup, left and right edges were
10559 num_nr_gap_blocks--;
10562 if (selector->num_entries == 0)
10565 for (j = 0; j < selector->num_entries; j++) {
10566 if (mergeable && selector->right_edge) {
10568 * do a merge by NOT
10575 * no merge, set the
10579 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10581 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10582 num_nr_gap_blocks++;
10584 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10590 if (selector->left_edge) {
10594 if (limit_reached) {
10595 /* Reached the limit stop */
10602 /* now we must add any dups we are going to report. */
10603 if ((limit_reached == 0) && (asoc->numduptsns)) {
10604 dup = (uint32_t *) gap_descriptor;
10605 for (i = 0; i < asoc->numduptsns; i++) {
10606 *dup = htonl(asoc->dup_tsns[i]);
10609 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10614 asoc->numduptsns = 0;
10617 * now that the chunk is prepared queue it to the control chunk
10620 if (type == SCTP_SELECTIVE_ACK) {
10621 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10622 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10623 num_dups * sizeof(int32_t);
10624 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10625 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10626 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10627 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10628 sack->sack.num_dup_tsns = htons(num_dups);
10629 sack->ch.chunk_type = type;
10630 sack->ch.chunk_flags = flags;
10631 sack->ch.chunk_length = htons(a_chk->send_size);
10633 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10634 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10635 num_dups * sizeof(int32_t);
10636 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10637 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10638 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10639 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10640 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10641 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10642 nr_sack->nr_sack.reserved = 0;
10643 nr_sack->ch.chunk_type = type;
10644 nr_sack->ch.chunk_flags = flags;
10645 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10647 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10648 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10649 asoc->ctrl_queue_cnt++;
10650 asoc->send_sack = 0;
10651 SCTP_STAT_INCR(sctps_sendsacks);
10656 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10657 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10662 struct mbuf *m_abort, *m, *m_last;
10663 struct mbuf *m_out, *m_end = NULL;
10664 struct sctp_abort_chunk *abort;
10665 struct sctp_auth_chunk *auth = NULL;
10666 struct sctp_nets *net;
10668 uint32_t auth_offset = 0;
10669 uint16_t cause_len, chunk_len, padding_len;
10671 SCTP_TCB_LOCK_ASSERT(stcb);
10673 * Add an AUTH chunk, if chunk requires it and save the offset into
10674 * the chain for AUTH
10676 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10677 stcb->asoc.peer_auth_chunks)) {
10678 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10679 stcb, SCTP_ABORT_ASSOCIATION);
10680 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10684 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10685 if (m_abort == NULL) {
10687 sctp_m_freem(m_out);
10690 sctp_m_freem(operr);
10694 /* link in any error */
10695 SCTP_BUF_NEXT(m_abort) = operr;
10698 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10699 cause_len += (uint16_t) SCTP_BUF_LEN(m);
10700 if (SCTP_BUF_NEXT(m) == NULL) {
10704 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10705 chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
10706 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10707 if (m_out == NULL) {
10708 /* NO Auth chunk prepended, so reserve space in front */
10709 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10712 /* Put AUTH chunk at the front of the chain */
10713 SCTP_BUF_NEXT(m_end) = m_abort;
10715 if (stcb->asoc.alternate) {
10716 net = stcb->asoc.alternate;
10718 net = stcb->asoc.primary_destination;
10720 /* Fill in the ABORT chunk header. */
10721 abort = mtod(m_abort, struct sctp_abort_chunk *);
10722 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10723 if (stcb->asoc.peer_vtag == 0) {
10724 /* This happens iff the assoc is in COOKIE-WAIT state. */
10725 vtag = stcb->asoc.my_vtag;
10726 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10728 vtag = stcb->asoc.peer_vtag;
10729 abort->ch.chunk_flags = 0;
10731 abort->ch.chunk_length = htons(chunk_len);
10732 /* Add padding, if necessary. */
10733 if (padding_len > 0) {
10734 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
10735 sctp_m_freem(m_out);
10739 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10740 (struct sockaddr *)&net->ro._l_addr,
10741 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10742 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10743 stcb->asoc.primary_destination->port, NULL,
10746 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10750 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10751 struct sctp_nets *net,
10754 /* formulate and SEND a SHUTDOWN-COMPLETE */
10755 struct mbuf *m_shutdown_comp;
10756 struct sctp_shutdown_complete_chunk *shutdown_complete;
10760 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
10761 if (m_shutdown_comp == NULL) {
10765 if (reflect_vtag) {
10766 flags = SCTP_HAD_NO_TCB;
10767 vtag = stcb->asoc.my_vtag;
10770 vtag = stcb->asoc.peer_vtag;
10772 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10773 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10774 shutdown_complete->ch.chunk_flags = flags;
10775 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10776 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10777 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10778 (struct sockaddr *)&net->ro._l_addr,
10779 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10780 stcb->sctp_ep->sctp_lport, stcb->rport,
10784 SCTP_SO_NOT_LOCKED);
10785 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10790 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
10791 struct sctphdr *sh, uint32_t vtag,
10792 uint8_t type, struct mbuf *cause,
10793 uint8_t use_mflowid, uint32_t mflowid,
10794 uint32_t vrf_id, uint16_t port)
10796 struct mbuf *o_pak;
10798 struct sctphdr *shout;
10799 struct sctp_chunkhdr *ch;
10800 struct udphdr *udp;
10801 int len, cause_len, padding_len, ret;
10804 struct sockaddr_in *src_sin, *dst_sin;
10809 struct sockaddr_in6 *src_sin6, *dst_sin6;
10810 struct ip6_hdr *ip6;
10814 /* Compute the length of the cause and add final padding. */
10816 if (cause != NULL) {
10817 struct mbuf *m_at, *m_last = NULL;
10819 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
10820 if (SCTP_BUF_NEXT(m_at) == NULL)
10822 cause_len += SCTP_BUF_LEN(m_at);
10824 padding_len = cause_len % 4;
10825 if (padding_len != 0) {
10826 padding_len = 4 - padding_len;
10828 if (padding_len != 0) {
10829 if (sctp_add_pad_tombuf(m_last, padding_len)) {
10830 sctp_m_freem(cause);
10837 /* Get an mbuf for the header. */
10838 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
10839 switch (dst->sa_family) {
10842 len += sizeof(struct ip);
10847 len += sizeof(struct ip6_hdr);
10854 len += sizeof(struct udphdr);
10856 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
10857 if (mout == NULL) {
10859 sctp_m_freem(cause);
10863 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10864 SCTP_BUF_LEN(mout) = len;
10865 SCTP_BUF_NEXT(mout) = cause;
10866 if (use_mflowid != 0) {
10867 mout->m_pkthdr.flowid = mflowid;
10868 mout->m_flags |= M_FLOWID;
10876 switch (dst->sa_family) {
10879 src_sin = (struct sockaddr_in *)src;
10880 dst_sin = (struct sockaddr_in *)dst;
10881 ip = mtod(mout, struct ip *);
10882 ip->ip_v = IPVERSION;
10883 ip->ip_hl = (sizeof(struct ip) >> 2);
10885 ip->ip_id = ip_newid();
10887 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
10889 ip->ip_p = IPPROTO_UDP;
10891 ip->ip_p = IPPROTO_SCTP;
10893 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
10894 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
10896 len = sizeof(struct ip);
10897 shout = (struct sctphdr *)((caddr_t)ip + len);
10902 src_sin6 = (struct sockaddr_in6 *)src;
10903 dst_sin6 = (struct sockaddr_in6 *)dst;
10904 ip6 = mtod(mout, struct ip6_hdr *);
10905 ip6->ip6_flow = htonl(0x60000000);
10906 if (V_ip6_auto_flowlabel) {
10907 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
10909 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10911 ip6->ip6_nxt = IPPROTO_UDP;
10913 ip6->ip6_nxt = IPPROTO_SCTP;
10915 ip6->ip6_src = dst_sin6->sin6_addr;
10916 ip6->ip6_dst = src_sin6->sin6_addr;
10917 len = sizeof(struct ip6_hdr);
10918 shout = (struct sctphdr *)((caddr_t)ip6 + len);
10923 shout = mtod(mout, struct sctphdr *);
10927 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
10928 sctp_m_freem(mout);
10931 udp = (struct udphdr *)shout;
10932 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10933 udp->uh_dport = port;
10935 udp->uh_ulen = htons(sizeof(struct udphdr) +
10936 sizeof(struct sctphdr) +
10937 sizeof(struct sctp_chunkhdr) +
10938 cause_len + padding_len);
10939 len += sizeof(struct udphdr);
10940 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
10944 shout->src_port = sh->dest_port;
10945 shout->dest_port = sh->src_port;
10946 shout->checksum = 0;
10948 shout->v_tag = htonl(vtag);
10950 shout->v_tag = sh->v_tag;
10952 len += sizeof(struct sctphdr);
10953 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
10954 ch->chunk_type = type;
10956 ch->chunk_flags = 0;
10958 ch->chunk_flags = SCTP_HAD_NO_TCB;
10960 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
10961 len += sizeof(struct sctp_chunkhdr);
10962 len += cause_len + padding_len;
10964 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10965 sctp_m_freem(mout);
10968 SCTP_ATTACH_CHAIN(o_pak, mout, len);
10969 switch (dst->sa_family) {
10974 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10979 ip->ip_len = htons(len);
10981 #if defined(SCTP_WITH_NO_CSUM)
10982 SCTP_STAT_INCR(sctps_sendnocrc);
10984 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
10985 SCTP_STAT_INCR(sctps_sendswcrc);
10988 SCTP_ENABLE_UDP_CSUM(o_pak);
10991 #if defined(SCTP_WITH_NO_CSUM)
10992 SCTP_STAT_INCR(sctps_sendnocrc);
10994 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10995 mout->m_pkthdr.csum_data = 0;
10996 SCTP_STAT_INCR(sctps_sendhwcrc);
10999 #ifdef SCTP_PACKET_LOGGING
11000 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11001 sctp_packet_log(o_pak);
11004 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11009 ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11011 #if defined(SCTP_WITH_NO_CSUM)
11012 SCTP_STAT_INCR(sctps_sendnocrc);
11014 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11015 SCTP_STAT_INCR(sctps_sendswcrc);
11017 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11018 udp->uh_sum = 0xffff;
11021 #if defined(SCTP_WITH_NO_CSUM)
11022 SCTP_STAT_INCR(sctps_sendnocrc);
11024 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11025 mout->m_pkthdr.csum_data = 0;
11026 SCTP_STAT_INCR(sctps_sendhwcrc);
11029 #ifdef SCTP_PACKET_LOGGING
11030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11031 sctp_packet_log(o_pak);
11034 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11038 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11040 sctp_m_freem(mout);
11041 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11044 SCTP_STAT_INCR(sctps_sendpackets);
11045 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11046 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11051 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11052 struct sctphdr *sh,
11053 uint8_t use_mflowid, uint32_t mflowid,
11054 uint32_t vrf_id, uint16_t port)
11056 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11057 use_mflowid, mflowid,
11062 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11063 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11068 struct sctp_tmit_chunk *chk;
11069 struct sctp_heartbeat_chunk *hb;
11070 struct timeval now;
11072 SCTP_TCB_LOCK_ASSERT(stcb);
11076 (void)SCTP_GETTIME_TIMEVAL(&now);
11077 switch (net->ro._l_addr.sa.sa_family) {
11089 sctp_alloc_a_chunk(stcb, chk);
11091 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11094 chk->copy_by_ref = 0;
11095 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11096 chk->rec.chunk_id.can_take_data = 1;
11097 chk->asoc = &stcb->asoc;
11098 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11100 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11101 if (chk->data == NULL) {
11102 sctp_free_a_chunk(stcb, chk, so_locked);
11105 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11106 SCTP_BUF_LEN(chk->data) = chk->send_size;
11107 chk->sent = SCTP_DATAGRAM_UNSENT;
11108 chk->snd_count = 0;
11110 atomic_add_int(&chk->whoTo->ref_count, 1);
11111 /* Now we have a mbuf that we can fill in with the details */
11112 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11113 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11114 /* fill out chunk header */
11115 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11116 hb->ch.chunk_flags = 0;
11117 hb->ch.chunk_length = htons(chk->send_size);
11118 /* Fill out hb parameter */
11119 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11120 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11121 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11122 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11123 /* Did our user request this one, put it in */
11124 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11125 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11126 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11128 * we only take from the entropy pool if the address is not
11131 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11132 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11134 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11135 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11137 switch (net->ro._l_addr.sa.sa_family) {
11140 memcpy(hb->heartbeat.hb_info.address,
11141 &net->ro._l_addr.sin.sin_addr,
11142 sizeof(net->ro._l_addr.sin.sin_addr));
11147 memcpy(hb->heartbeat.hb_info.address,
11148 &net->ro._l_addr.sin6.sin6_addr,
11149 sizeof(net->ro._l_addr.sin6.sin6_addr));
11156 net->hb_responded = 0;
11157 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11158 stcb->asoc.ctrl_queue_cnt++;
11159 SCTP_STAT_INCR(sctps_sendheartbeat);
11164 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11167 struct sctp_association *asoc;
11168 struct sctp_ecne_chunk *ecne;
11169 struct sctp_tmit_chunk *chk;
11174 asoc = &stcb->asoc;
11175 SCTP_TCB_LOCK_ASSERT(stcb);
11176 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11177 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11178 /* found a previous ECN_ECHO update it if needed */
11179 uint32_t cnt, ctsn;
11181 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11182 ctsn = ntohl(ecne->tsn);
11183 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11184 ecne->tsn = htonl(high_tsn);
11185 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11187 cnt = ntohl(ecne->num_pkts_since_cwr);
11189 ecne->num_pkts_since_cwr = htonl(cnt);
11193 /* nope could not find one to update so we must build one */
11194 sctp_alloc_a_chunk(stcb, chk);
11198 chk->copy_by_ref = 0;
11199 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11200 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11201 chk->rec.chunk_id.can_take_data = 0;
11202 chk->asoc = &stcb->asoc;
11203 chk->send_size = sizeof(struct sctp_ecne_chunk);
11204 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11205 if (chk->data == NULL) {
11206 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11209 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11210 SCTP_BUF_LEN(chk->data) = chk->send_size;
11211 chk->sent = SCTP_DATAGRAM_UNSENT;
11212 chk->snd_count = 0;
11214 atomic_add_int(&chk->whoTo->ref_count, 1);
11216 stcb->asoc.ecn_echo_cnt_onq++;
11217 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11218 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11219 ecne->ch.chunk_flags = 0;
11220 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11221 ecne->tsn = htonl(high_tsn);
11222 ecne->num_pkts_since_cwr = htonl(1);
11223 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11224 asoc->ctrl_queue_cnt++;
11228 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11229 struct mbuf *m, int len, int iphlen, int bad_crc)
11231 struct sctp_association *asoc;
11232 struct sctp_pktdrop_chunk *drp;
11233 struct sctp_tmit_chunk *chk;
11239 struct sctp_chunkhdr *ch, chunk_buf;
11240 unsigned int chk_length;
11245 asoc = &stcb->asoc;
11246 SCTP_TCB_LOCK_ASSERT(stcb);
11247 if (asoc->peer_supports_pktdrop == 0) {
11249 * peer must declare support before I send one.
11253 if (stcb->sctp_socket == NULL) {
11256 sctp_alloc_a_chunk(stcb, chk);
11260 chk->copy_by_ref = 0;
11262 chk->send_size = len;
11263 /* Validate that we do not have an ABORT in here. */
11264 offset = iphlen + sizeof(struct sctphdr);
11265 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11266 sizeof(*ch), (uint8_t *) & chunk_buf);
11267 while (ch != NULL) {
11268 chk_length = ntohs(ch->chunk_length);
11269 if (chk_length < sizeof(*ch)) {
11270 /* break to abort land */
11273 switch (ch->chunk_type) {
11274 case SCTP_PACKET_DROPPED:
11275 case SCTP_ABORT_ASSOCIATION:
11276 case SCTP_INITIATION_ACK:
11278 * We don't respond with an PKT-DROP to an ABORT
11279 * or PKT-DROP. We also do not respond to an
11280 * INIT-ACK, because we can't know if the initiation
11281 * tag is correct or not.
11283 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11288 offset += SCTP_SIZE32(chk_length);
11289 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11290 sizeof(*ch), (uint8_t *) & chunk_buf);
11293 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11294 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11296 * only send 1 mtu worth, trim off the excess on the end.
11299 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11302 chk->asoc = &stcb->asoc;
11303 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11304 if (chk->data == NULL) {
11306 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11309 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11310 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11312 sctp_m_freem(chk->data);
11316 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11317 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11318 chk->book_size_scale = 0;
11320 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11321 drp->trunc_len = htons(fullsz);
11323 * Len is already adjusted to size minus overhead above take
11324 * out the pkt_drop chunk itself from it.
11326 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11327 len = chk->send_size;
11329 /* no truncation needed */
11330 drp->ch.chunk_flags = 0;
11331 drp->trunc_len = htons(0);
11334 drp->ch.chunk_flags |= SCTP_BADCRC;
11336 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11337 SCTP_BUF_LEN(chk->data) = chk->send_size;
11338 chk->sent = SCTP_DATAGRAM_UNSENT;
11339 chk->snd_count = 0;
11341 /* we should hit here */
11343 atomic_add_int(&chk->whoTo->ref_count, 1);
11347 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11348 chk->rec.chunk_id.can_take_data = 1;
11349 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11350 drp->ch.chunk_length = htons(chk->send_size);
11351 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11355 drp->bottle_bw = htonl(spc);
11356 if (asoc->my_rwnd) {
11357 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11358 asoc->size_on_all_streams +
11359 asoc->my_rwnd_control_len +
11360 stcb->sctp_socket->so_rcv.sb_cc);
11363 * If my rwnd is 0, possibly from mbuf depletion as well as
11364 * space used, tell the peer there is NO space aka onq == bw
11366 drp->current_onq = htonl(spc);
11370 m_copydata(m, iphlen, len, (caddr_t)datap);
11371 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11372 asoc->ctrl_queue_cnt++;
11376 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11378 struct sctp_association *asoc;
11379 struct sctp_cwr_chunk *cwr;
11380 struct sctp_tmit_chunk *chk;
11382 SCTP_TCB_LOCK_ASSERT(stcb);
11386 asoc = &stcb->asoc;
11387 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11388 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11390 * found a previous CWR queued to same destination
11391 * update it if needed
11395 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11396 ctsn = ntohl(cwr->tsn);
11397 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11398 cwr->tsn = htonl(high_tsn);
11400 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11401 /* Make sure override is carried */
11402 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11407 sctp_alloc_a_chunk(stcb, chk);
11411 chk->copy_by_ref = 0;
11412 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11413 chk->rec.chunk_id.can_take_data = 1;
11414 chk->asoc = &stcb->asoc;
11415 chk->send_size = sizeof(struct sctp_cwr_chunk);
11416 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11417 if (chk->data == NULL) {
11418 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11421 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11422 SCTP_BUF_LEN(chk->data) = chk->send_size;
11423 chk->sent = SCTP_DATAGRAM_UNSENT;
11424 chk->snd_count = 0;
11426 atomic_add_int(&chk->whoTo->ref_count, 1);
11427 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11428 cwr->ch.chunk_type = SCTP_ECN_CWR;
11429 cwr->ch.chunk_flags = override;
11430 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11431 cwr->tsn = htonl(high_tsn);
11432 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11433 asoc->ctrl_queue_cnt++;
11437 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11438 int number_entries, uint16_t * list,
11439 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11441 uint16_t len, old_len, i;
11442 struct sctp_stream_reset_out_request *req_out;
11443 struct sctp_chunkhdr *ch;
11445 ch = mtod(chk->data, struct sctp_chunkhdr *);
11446 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11448 /* get to new offset for the param. */
11449 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11450 /* now how long will this param be? */
11451 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11452 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11453 req_out->ph.param_length = htons(len);
11454 req_out->request_seq = htonl(seq);
11455 req_out->response_seq = htonl(resp_seq);
11456 req_out->send_reset_at_tsn = htonl(last_sent);
11457 if (number_entries) {
11458 for (i = 0; i < number_entries; i++) {
11459 req_out->list_of_streams[i] = htons(list[i]);
11462 if (SCTP_SIZE32(len) > len) {
11464 * Need to worry about the pad we may end up adding to the
11465 * end. This is easy since the struct is either aligned to 4
11466 * bytes or 2 bytes off.
11468 req_out->list_of_streams[number_entries] = 0;
11470 /* now fix the chunk length */
11471 ch->chunk_length = htons(len + old_len);
11472 chk->book_size = len + old_len;
11473 chk->book_size_scale = 0;
11474 chk->send_size = SCTP_SIZE32(chk->book_size);
11475 SCTP_BUF_LEN(chk->data) = chk->send_size;
11480 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11481 int number_entries, uint16_t * list,
11484 uint16_t len, old_len, i;
11485 struct sctp_stream_reset_in_request *req_in;
11486 struct sctp_chunkhdr *ch;
11488 ch = mtod(chk->data, struct sctp_chunkhdr *);
11489 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11491 /* get to new offset for the param. */
11492 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11493 /* now how long will this param be? */
11494 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11495 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11496 req_in->ph.param_length = htons(len);
11497 req_in->request_seq = htonl(seq);
11498 if (number_entries) {
11499 for (i = 0; i < number_entries; i++) {
11500 req_in->list_of_streams[i] = htons(list[i]);
11503 if (SCTP_SIZE32(len) > len) {
11505 * Need to worry about the pad we may end up adding to the
11506 * end. This is easy since the struct is either aligned to 4
11507 * bytes or 2 bytes off.
11509 req_in->list_of_streams[number_entries] = 0;
11511 /* now fix the chunk length */
11512 ch->chunk_length = htons(len + old_len);
11513 chk->book_size = len + old_len;
11514 chk->book_size_scale = 0;
11515 chk->send_size = SCTP_SIZE32(chk->book_size);
11516 SCTP_BUF_LEN(chk->data) = chk->send_size;
11521 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11524 uint16_t len, old_len;
11525 struct sctp_stream_reset_tsn_request *req_tsn;
11526 struct sctp_chunkhdr *ch;
11528 ch = mtod(chk->data, struct sctp_chunkhdr *);
11529 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11531 /* get to new offset for the param. */
11532 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11533 /* now how long will this param be? */
11534 len = sizeof(struct sctp_stream_reset_tsn_request);
11535 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11536 req_tsn->ph.param_length = htons(len);
11537 req_tsn->request_seq = htonl(seq);
11539 /* now fix the chunk length */
11540 ch->chunk_length = htons(len + old_len);
11541 chk->send_size = len + old_len;
11542 chk->book_size = SCTP_SIZE32(chk->send_size);
11543 chk->book_size_scale = 0;
11544 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11549 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11550 uint32_t resp_seq, uint32_t result)
11552 uint16_t len, old_len;
11553 struct sctp_stream_reset_response *resp;
11554 struct sctp_chunkhdr *ch;
11556 ch = mtod(chk->data, struct sctp_chunkhdr *);
11557 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11559 /* get to new offset for the param. */
11560 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11561 /* now how long will this param be? */
11562 len = sizeof(struct sctp_stream_reset_response);
11563 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11564 resp->ph.param_length = htons(len);
11565 resp->response_seq = htonl(resp_seq);
11566 resp->result = ntohl(result);
11568 /* now fix the chunk length */
11569 ch->chunk_length = htons(len + old_len);
11570 chk->book_size = len + old_len;
11571 chk->book_size_scale = 0;
11572 chk->send_size = SCTP_SIZE32(chk->book_size);
11573 SCTP_BUF_LEN(chk->data) = chk->send_size;
11578 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11579 uint32_t resp_seq, uint32_t result,
11580 uint32_t send_una, uint32_t recv_next)
11582 uint16_t len, old_len;
11583 struct sctp_stream_reset_response_tsn *resp;
11584 struct sctp_chunkhdr *ch;
11586 ch = mtod(chk->data, struct sctp_chunkhdr *);
11587 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11589 /* get to new offset for the param. */
11590 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11591 /* now how long will this param be? */
11592 len = sizeof(struct sctp_stream_reset_response_tsn);
11593 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11594 resp->ph.param_length = htons(len);
11595 resp->response_seq = htonl(resp_seq);
11596 resp->result = htonl(result);
11597 resp->senders_next_tsn = htonl(send_una);
11598 resp->receivers_next_tsn = htonl(recv_next);
11600 /* now fix the chunk length */
11601 ch->chunk_length = htons(len + old_len);
11602 chk->book_size = len + old_len;
11603 chk->send_size = SCTP_SIZE32(chk->book_size);
11604 chk->book_size_scale = 0;
11605 SCTP_BUF_LEN(chk->data) = chk->send_size;
11610 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11614 uint16_t len, old_len;
11615 struct sctp_chunkhdr *ch;
11616 struct sctp_stream_reset_add_strm *addstr;
11618 ch = mtod(chk->data, struct sctp_chunkhdr *);
11619 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11621 /* get to new offset for the param. */
11622 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11623 /* now how long will this param be? */
11624 len = sizeof(struct sctp_stream_reset_add_strm);
11627 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11628 addstr->ph.param_length = htons(len);
11629 addstr->request_seq = htonl(seq);
11630 addstr->number_of_streams = htons(adding);
11631 addstr->reserved = 0;
11633 /* now fix the chunk length */
11634 ch->chunk_length = htons(len + old_len);
11635 chk->send_size = len + old_len;
11636 chk->book_size = SCTP_SIZE32(chk->send_size);
11637 chk->book_size_scale = 0;
11638 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11643 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11647 uint16_t len, old_len;
11648 struct sctp_chunkhdr *ch;
11649 struct sctp_stream_reset_add_strm *addstr;
11651 ch = mtod(chk->data, struct sctp_chunkhdr *);
11652 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11654 /* get to new offset for the param. */
11655 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11656 /* now how long will this param be? */
11657 len = sizeof(struct sctp_stream_reset_add_strm);
11659 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11660 addstr->ph.param_length = htons(len);
11661 addstr->request_seq = htonl(seq);
11662 addstr->number_of_streams = htons(adding);
11663 addstr->reserved = 0;
11665 /* now fix the chunk length */
11666 ch->chunk_length = htons(len + old_len);
11667 chk->send_size = len + old_len;
11668 chk->book_size = SCTP_SIZE32(chk->send_size);
11669 chk->book_size_scale = 0;
11670 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11675 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11676 int number_entries, uint16_t * list,
11677 uint8_t send_out_req,
11678 uint8_t send_in_req,
11679 uint8_t send_tsn_req,
11680 uint8_t add_stream,
11682 uint16_t adding_i, uint8_t peer_asked)
11685 struct sctp_association *asoc;
11686 struct sctp_tmit_chunk *chk;
11687 struct sctp_chunkhdr *ch;
11690 asoc = &stcb->asoc;
11691 if (asoc->stream_reset_outstanding) {
11693 * Already one pending, must get ACK back to clear the flag.
11695 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11698 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11699 (add_stream == 0)) {
11700 /* nothing to do */
11701 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11704 if (send_tsn_req && (send_out_req || send_in_req)) {
11705 /* error, can't do that */
11706 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11709 sctp_alloc_a_chunk(stcb, chk);
11711 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11714 chk->copy_by_ref = 0;
11715 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11716 chk->rec.chunk_id.can_take_data = 0;
11717 chk->asoc = &stcb->asoc;
11718 chk->book_size = sizeof(struct sctp_chunkhdr);
11719 chk->send_size = SCTP_SIZE32(chk->book_size);
11720 chk->book_size_scale = 0;
11722 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11723 if (chk->data == NULL) {
11724 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11725 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11728 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11730 /* setup chunk parameters */
11731 chk->sent = SCTP_DATAGRAM_UNSENT;
11732 chk->snd_count = 0;
11733 if (stcb->asoc.alternate) {
11734 chk->whoTo = stcb->asoc.alternate;
11736 chk->whoTo = stcb->asoc.primary_destination;
11738 atomic_add_int(&chk->whoTo->ref_count, 1);
11739 ch = mtod(chk->data, struct sctp_chunkhdr *);
11740 ch->chunk_type = SCTP_STREAM_RESET;
11741 ch->chunk_flags = 0;
11742 ch->chunk_length = htons(chk->book_size);
11743 SCTP_BUF_LEN(chk->data) = chk->send_size;
11745 seq = stcb->asoc.str_reset_seq_out;
11746 if (send_out_req) {
11747 sctp_add_stream_reset_out(chk, number_entries, list,
11748 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
11749 asoc->stream_reset_out_is_outstanding = 1;
11751 asoc->stream_reset_outstanding++;
11753 if ((add_stream & 1) &&
11754 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
11755 /* Need to allocate more */
11756 struct sctp_stream_out *oldstream;
11757 struct sctp_stream_queue_pending *sp, *nsp;
11760 oldstream = stcb->asoc.strmout;
11761 /* get some more */
11762 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
11763 ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
11765 if (stcb->asoc.strmout == NULL) {
11768 stcb->asoc.strmout = oldstream;
11769 /* Turn off the bit */
11770 x = add_stream & 0xfe;
11775 * Ok now we proceed with copying the old out stuff and
11776 * initializing the new stuff.
11778 SCTP_TCB_SEND_LOCK(stcb);
11779 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
11780 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11781 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11782 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
11783 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
11784 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
11785 stcb->asoc.strmout[i].stream_no = i;
11786 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
11787 /* now anything on those queues? */
11788 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
11789 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
11790 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
11792 /* Now move assoc pointers too */
11793 if (stcb->asoc.last_out_stream == &oldstream[i]) {
11794 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
11796 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
11797 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
11800 /* now the new streams */
11801 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
11802 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
11803 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11804 stcb->asoc.strmout[i].chunks_on_queues = 0;
11805 stcb->asoc.strmout[i].next_sequence_send = 0x0;
11806 stcb->asoc.strmout[i].stream_no = i;
11807 stcb->asoc.strmout[i].last_msg_incomplete = 0;
11808 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
11810 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
11811 SCTP_FREE(oldstream, SCTP_M_STRMO);
11812 SCTP_TCB_SEND_UNLOCK(stcb);
11815 if ((add_stream & 1) && (adding_o > 0)) {
11816 asoc->strm_pending_add_size = adding_o;
11817 asoc->peer_req_out = peer_asked;
11818 sctp_add_an_out_stream(chk, seq, adding_o);
11820 asoc->stream_reset_outstanding++;
11822 if ((add_stream & 2) && (adding_i > 0)) {
11823 sctp_add_an_in_stream(chk, seq, adding_i);
11825 asoc->stream_reset_outstanding++;
11828 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11830 asoc->stream_reset_outstanding++;
11832 if (send_tsn_req) {
11833 sctp_add_stream_reset_tsn(chk, seq);
11834 asoc->stream_reset_outstanding++;
11836 asoc->str_reset = chk;
11837 /* insert the chunk for sending */
11838 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11841 asoc->ctrl_queue_cnt++;
11842 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11847 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
11848 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
11849 uint8_t use_mflowid, uint32_t mflowid,
11850 uint32_t vrf_id, uint16_t port)
11852 /* Don't respond to an ABORT with an ABORT. */
11853 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11855 sctp_m_freem(cause);
11858 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
11859 use_mflowid, mflowid,
11865 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
11866 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
11867 uint8_t use_mflowid, uint32_t mflowid,
11868 uint32_t vrf_id, uint16_t port)
11870 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
11871 use_mflowid, mflowid,
11876 static struct mbuf *
11877 sctp_copy_resume(struct uio *uio,
11879 int user_marks_eor,
11882 struct mbuf **new_tail)
11886 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
11887 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
11889 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11892 *sndout = m_length(m, NULL);
11893 *new_tail = m_last(m);
11899 sctp_copy_one(struct sctp_stream_queue_pending *sp,
11906 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
11908 if (sp->data == NULL) {
11909 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11912 sp->tail_mbuf = m_last(sp->data);
11918 static struct sctp_stream_queue_pending *
11919 sctp_copy_it_in(struct sctp_tcb *stcb,
11920 struct sctp_association *asoc,
11921 struct sctp_sndrcvinfo *srcv,
11923 struct sctp_nets *net,
11925 int user_marks_eor,
11929 * This routine must be very careful in its work. Protocol
11930 * processing is up and running so care must be taken to spl...()
11931 * when you need to do something that may effect the stcb/asoc. The
11932 * sb is locked however. When data is copied the protocol processing
11933 * should be enabled since this is a slower operation...
11935 struct sctp_stream_queue_pending *sp = NULL;
11939 /* Now can we send this? */
11940 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
11941 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
11942 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
11943 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
11944 /* got data while shutting down */
11945 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
11946 *error = ECONNRESET;
11949 sctp_alloc_a_strmoq(stcb, sp);
11951 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11956 sp->sender_all_done = 0;
11957 sp->sinfo_flags = srcv->sinfo_flags;
11958 sp->timetolive = srcv->sinfo_timetolive;
11959 sp->ppid = srcv->sinfo_ppid;
11960 sp->context = srcv->sinfo_context;
11961 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
11963 sp->stream = srcv->sinfo_stream;
11964 sp->length = min(uio->uio_resid, max_send_len);
11965 if ((sp->length == (uint32_t) uio->uio_resid) &&
11966 ((user_marks_eor == 0) ||
11967 (srcv->sinfo_flags & SCTP_EOF) ||
11968 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
11969 sp->msg_is_complete = 1;
11971 sp->msg_is_complete = 0;
11973 sp->sender_all_done = 0;
11974 sp->some_taken = 0;
11975 sp->put_last_out = 0;
11976 resv_in_first = sizeof(struct sctp_data_chunk);
11977 sp->data = sp->tail_mbuf = NULL;
11978 if (sp->length == 0) {
11982 if (srcv->sinfo_keynumber_valid) {
11983 sp->auth_keyid = srcv->sinfo_keynumber;
11985 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
11987 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
11988 sctp_auth_key_acquire(stcb, sp->auth_keyid);
11989 sp->holds_key_ref = 1;
11991 *error = sctp_copy_one(sp, uio, resv_in_first);
11994 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
11997 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
11999 atomic_add_int(&sp->net->ref_count, 1);
12003 sctp_set_prsctp_policy(sp);
12011 sctp_sosend(struct socket *so,
12012 struct sockaddr *addr,
12015 struct mbuf *control,
12020 int error, use_sndinfo = 0;
12021 struct sctp_sndrcvinfo sndrcvninfo;
12022 struct sockaddr *addr_to_use;
12024 #if defined(INET) && defined(INET6)
12025 struct sockaddr_in sin;
12030 /* process cmsg snd/rcv info (maybe a assoc-id) */
12031 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12032 sizeof(sndrcvninfo))) {
12037 addr_to_use = addr;
12038 #if defined(INET) && defined(INET6)
12039 if ((addr) && (addr->sa_family == AF_INET6)) {
12040 struct sockaddr_in6 *sin6;
12042 sin6 = (struct sockaddr_in6 *)addr;
12043 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12044 in6_sin6_2_sin(&sin, sin6);
12045 addr_to_use = (struct sockaddr *)&sin;
12049 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12052 use_sndinfo ? &sndrcvninfo : NULL
12060 sctp_lower_sosend(struct socket *so,
12061 struct sockaddr *addr,
12063 struct mbuf *i_pak,
12064 struct mbuf *control,
12066 struct sctp_sndrcvinfo *srcv
12071 unsigned int sndlen = 0, max_len;
12073 struct mbuf *top = NULL;
12074 int queue_only = 0, queue_only_for_init = 0;
12075 int free_cnt_applied = 0;
12077 int now_filled = 0;
12078 unsigned int inqueue_bytes = 0;
12079 struct sctp_block_entry be;
12080 struct sctp_inpcb *inp;
12081 struct sctp_tcb *stcb = NULL;
12082 struct timeval now;
12083 struct sctp_nets *net;
12084 struct sctp_association *asoc;
12085 struct sctp_inpcb *t_inp;
12086 int user_marks_eor;
12087 int create_lock_applied = 0;
12088 int nagle_applies = 0;
12089 int some_on_control = 0;
12090 int got_all_of_the_send = 0;
12091 int hold_tcblock = 0;
12092 int non_blocking = 0;
12093 uint32_t local_add_more, local_soresv = 0;
12095 uint16_t sinfo_flags;
12096 sctp_assoc_t sinfo_assoc_id;
12103 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12105 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12108 SCTP_RELEASE_PKT(i_pak);
12112 if ((uio == NULL) && (i_pak == NULL)) {
12113 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12116 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12117 atomic_add_int(&inp->total_sends, 1);
12119 if (uio->uio_resid < 0) {
12120 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12123 sndlen = uio->uio_resid;
12125 top = SCTP_HEADER_TO_CHAIN(i_pak);
12126 sndlen = SCTP_HEADER_LEN(i_pak);
12128 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12131 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12132 (inp->sctp_socket->so_qlimit)) {
12133 /* The listener can NOT send */
12134 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12139 * Pre-screen address, if one is given the sin-len
12140 * must be set correctly!
12143 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12145 switch (raddr->sa.sa_family) {
12148 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12149 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12153 port = raddr->sin.sin_port;
12158 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12159 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12163 port = raddr->sin6.sin6_port;
12167 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12168 error = EAFNOSUPPORT;
12175 sinfo_flags = srcv->sinfo_flags;
12176 sinfo_assoc_id = srcv->sinfo_assoc_id;
12177 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12178 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12179 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12183 if (srcv->sinfo_flags)
12184 SCTP_STAT_INCR(sctps_sends_with_flags);
12186 sinfo_flags = inp->def_send.sinfo_flags;
12187 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12189 if (sinfo_flags & SCTP_SENDALL) {
12190 /* its a sendall */
12191 error = sctp_sendall(inp, uio, top, srcv);
12195 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12196 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12200 /* now we must find the assoc */
12201 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12202 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12203 SCTP_INP_RLOCK(inp);
12204 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12206 SCTP_TCB_LOCK(stcb);
12209 SCTP_INP_RUNLOCK(inp);
12210 } else if (sinfo_assoc_id) {
12211 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12214 * Since we did not use findep we must
12215 * increment it, and if we don't find a tcb
12218 SCTP_INP_WLOCK(inp);
12219 SCTP_INP_INCR_REF(inp);
12220 SCTP_INP_WUNLOCK(inp);
12221 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12222 if (stcb == NULL) {
12223 SCTP_INP_WLOCK(inp);
12224 SCTP_INP_DECR_REF(inp);
12225 SCTP_INP_WUNLOCK(inp);
12230 if ((stcb == NULL) && (addr)) {
12231 /* Possible implicit send? */
12232 SCTP_ASOC_CREATE_LOCK(inp);
12233 create_lock_applied = 1;
12234 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12235 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12236 /* Should I really unlock ? */
12237 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12242 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12243 (addr->sa_family == AF_INET6)) {
12244 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12248 SCTP_INP_WLOCK(inp);
12249 SCTP_INP_INCR_REF(inp);
12250 SCTP_INP_WUNLOCK(inp);
12251 /* With the lock applied look again */
12252 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12253 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12254 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12256 if (stcb == NULL) {
12257 SCTP_INP_WLOCK(inp);
12258 SCTP_INP_DECR_REF(inp);
12259 SCTP_INP_WUNLOCK(inp);
12266 if (t_inp != inp) {
12267 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12272 if (stcb == NULL) {
12273 if (addr == NULL) {
12274 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12278 /* We must go ahead and start the INIT process */
12281 if ((sinfo_flags & SCTP_ABORT) ||
12282 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12284 * User asks to abort a non-existant assoc,
12285 * or EOF a non-existant assoc with no data
12287 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12291 /* get an asoc/stcb struct */
12292 vrf_id = inp->def_vrf_id;
12294 if (create_lock_applied == 0) {
12295 panic("Error, should hold create lock and I don't?");
12298 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12301 if (stcb == NULL) {
12302 /* Error is setup for us in the call */
12305 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12306 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12308 * Set the connected flag so we can queue
12311 soisconnecting(so);
12314 if (create_lock_applied) {
12315 SCTP_ASOC_CREATE_UNLOCK(inp);
12316 create_lock_applied = 0;
12318 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12321 * Turn on queue only flag to prevent data from
12325 asoc = &stcb->asoc;
12326 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12327 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12329 /* initialize authentication params for the assoc */
12330 sctp_initialize_auth_params(inp, stcb);
12333 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12334 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12340 /* out with the INIT */
12341 queue_only_for_init = 1;
12343 * we may want to dig in after this call and adjust the MTU
12344 * value. It defaulted to 1500 (constant) but the ro
12345 * structure may now have an update and thus we may need to
12346 * change it BEFORE we append the message.
12350 asoc = &stcb->asoc;
12352 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12353 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12355 net = sctp_findnet(stcb, addr);
12358 if ((net == NULL) ||
12359 ((port != 0) && (port != stcb->rport))) {
12360 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12365 if (stcb->asoc.alternate) {
12366 net = stcb->asoc.alternate;
12368 net = stcb->asoc.primary_destination;
12371 atomic_add_int(&stcb->total_sends, 1);
12372 /* Keep the stcb from being freed under our feet */
12373 atomic_add_int(&asoc->refcnt, 1);
12374 free_cnt_applied = 1;
12376 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12377 if (sndlen > asoc->smallest_mtu) {
12378 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12383 if (SCTP_SO_IS_NBIO(so)
12384 || (flags & MSG_NBIO)
12388 /* would we block? */
12389 if (non_blocking) {
12390 if (hold_tcblock == 0) {
12391 SCTP_TCB_LOCK(stcb);
12394 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12395 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12396 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12397 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12398 if (sndlen > SCTP_SB_LIMIT_SND(so))
12401 error = EWOULDBLOCK;
12404 stcb->asoc.sb_send_resv += sndlen;
12405 SCTP_TCB_UNLOCK(stcb);
12408 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12410 local_soresv = sndlen;
12411 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12412 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12413 error = ECONNRESET;
12416 if (create_lock_applied) {
12417 SCTP_ASOC_CREATE_UNLOCK(inp);
12418 create_lock_applied = 0;
12420 if (asoc->stream_reset_outstanding) {
12422 * Can't queue any data while stream reset is underway.
12424 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12428 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12429 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12432 /* we are now done with all control */
12434 sctp_m_freem(control);
12437 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12438 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12439 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12440 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12441 if (srcv->sinfo_flags & SCTP_ABORT) {
12444 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12445 error = ECONNRESET;
12449 /* Ok, we will attempt a msgsnd :> */
12451 p->td_ru.ru_msgsnd++;
12453 /* Are we aborting? */
12454 if (srcv->sinfo_flags & SCTP_ABORT) {
12456 int tot_demand, tot_out = 0, max_out;
12458 SCTP_STAT_INCR(sctps_sends_with_abort);
12459 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12460 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12461 /* It has to be up before we abort */
12462 /* how big is the user initiated abort? */
12463 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12467 if (hold_tcblock) {
12468 SCTP_TCB_UNLOCK(stcb);
12472 struct mbuf *cntm = NULL;
12474 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12476 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12477 tot_out += SCTP_BUF_LEN(cntm);
12481 /* Must fit in a MTU */
12483 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12484 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12486 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12490 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
12493 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12497 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12498 max_out -= sizeof(struct sctp_abort_msg);
12499 if (tot_out > max_out) {
12503 struct sctp_paramhdr *ph;
12505 /* now move forward the data pointer */
12506 ph = mtod(mm, struct sctp_paramhdr *);
12507 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12508 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out);
12510 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12512 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12515 * Here if we can't get his data we
12516 * still abort we just don't get to
12517 * send the users note :-0
12524 SCTP_BUF_NEXT(mm) = top;
12528 if (hold_tcblock == 0) {
12529 SCTP_TCB_LOCK(stcb);
12531 atomic_add_int(&stcb->asoc.refcnt, -1);
12532 free_cnt_applied = 0;
12533 /* release this lock, otherwise we hang on ourselves */
12534 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12535 /* now relock the stcb so everything is sane */
12539 * In this case top is already chained to mm avoid double
12540 * free, since we free it below if top != NULL and driver
12541 * would free it after sending the packet out
12548 /* Calculate the maximum we can send */
12549 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12550 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12551 if (non_blocking) {
12552 /* we already checked for non-blocking above. */
12555 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12560 if (hold_tcblock) {
12561 SCTP_TCB_UNLOCK(stcb);
12564 /* Is the stream no. valid? */
12565 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12566 /* Invalid stream number */
12567 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12571 if (asoc->strmout == NULL) {
12572 /* huh? software error */
12573 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12577 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12578 if ((user_marks_eor == 0) &&
12579 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12580 /* It will NEVER fit */
12581 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12585 if ((uio == NULL) && user_marks_eor) {
12587 * We do not support eeor mode for
12588 * sending with mbuf chains (like sendfile).
12590 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12594 if (user_marks_eor) {
12595 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12598 * For non-eeor the whole message must fit in
12599 * the socket send buffer.
12601 local_add_more = sndlen;
12604 if (non_blocking) {
12605 goto skip_preblock;
12607 if (((max_len <= local_add_more) &&
12608 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12610 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12611 /* No room right now ! */
12612 SOCKBUF_LOCK(&so->so_snd);
12613 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12614 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12615 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12616 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12617 (unsigned int)SCTP_SB_LIMIT_SND(so),
12620 stcb->asoc.stream_queue_cnt,
12621 stcb->asoc.chunks_on_out_queue,
12622 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12623 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12624 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
12627 stcb->block_entry = &be;
12628 error = sbwait(&so->so_snd);
12629 stcb->block_entry = NULL;
12630 if (error || so->so_error || be.error) {
12633 error = so->so_error;
12638 SOCKBUF_UNLOCK(&so->so_snd);
12641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12642 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12643 asoc, stcb->asoc.total_output_queue_size);
12645 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12648 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12650 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12651 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12655 SOCKBUF_UNLOCK(&so->so_snd);
12658 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12662 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12663 * case NOTE: uio will be null when top/mbuf is passed
12666 if (srcv->sinfo_flags & SCTP_EOF) {
12667 got_all_of_the_send = 1;
12670 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12676 struct sctp_stream_queue_pending *sp;
12677 struct sctp_stream_out *strm;
12680 SCTP_TCB_SEND_LOCK(stcb);
12681 if ((asoc->stream_locked) &&
12682 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12683 SCTP_TCB_SEND_UNLOCK(stcb);
12684 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12688 SCTP_TCB_SEND_UNLOCK(stcb);
12690 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12691 if (strm->last_msg_incomplete == 0) {
12693 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
12694 if ((sp == NULL) || (error)) {
12697 SCTP_TCB_SEND_LOCK(stcb);
12698 if (sp->msg_is_complete) {
12699 strm->last_msg_incomplete = 0;
12700 asoc->stream_locked = 0;
12703 * Just got locked to this guy in case of an
12706 strm->last_msg_incomplete = 1;
12707 asoc->stream_locked = 1;
12708 asoc->stream_locked_on = srcv->sinfo_stream;
12709 sp->sender_all_done = 0;
12711 sctp_snd_sb_alloc(stcb, sp->length);
12712 atomic_add_int(&asoc->stream_queue_cnt, 1);
12713 if (srcv->sinfo_flags & SCTP_UNORDERED) {
12714 SCTP_STAT_INCR(sctps_sends_with_unord);
12716 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12717 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
12718 SCTP_TCB_SEND_UNLOCK(stcb);
12720 SCTP_TCB_SEND_LOCK(stcb);
12721 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12722 SCTP_TCB_SEND_UNLOCK(stcb);
12724 /* ???? Huh ??? last msg is gone */
12726 panic("Warning: Last msg marked incomplete, yet nothing left?");
12728 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12729 strm->last_msg_incomplete = 0;
12735 while (uio->uio_resid > 0) {
12736 /* How much room do we have? */
12737 struct mbuf *new_tail, *mm;
12739 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12740 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12744 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12745 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12746 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
12749 if (hold_tcblock) {
12750 SCTP_TCB_UNLOCK(stcb);
12753 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
12754 if ((mm == NULL) || error) {
12760 /* Update the mbuf and count */
12761 SCTP_TCB_SEND_LOCK(stcb);
12762 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12764 * we need to get out. Peer probably
12768 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12769 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12770 error = ECONNRESET;
12772 SCTP_TCB_SEND_UNLOCK(stcb);
12775 if (sp->tail_mbuf) {
12776 /* tack it to the end */
12777 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12778 sp->tail_mbuf = new_tail;
12780 /* A stolen mbuf */
12782 sp->tail_mbuf = new_tail;
12784 sctp_snd_sb_alloc(stcb, sndout);
12785 atomic_add_int(&sp->length, sndout);
12788 /* Did we reach EOR? */
12789 if ((uio->uio_resid == 0) &&
12790 ((user_marks_eor == 0) ||
12791 (srcv->sinfo_flags & SCTP_EOF) ||
12792 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12793 sp->msg_is_complete = 1;
12795 sp->msg_is_complete = 0;
12797 SCTP_TCB_SEND_UNLOCK(stcb);
12799 if (uio->uio_resid == 0) {
12804 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12806 * This is ugly but we must assure locking
12809 if (hold_tcblock == 0) {
12810 SCTP_TCB_LOCK(stcb);
12813 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12814 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12815 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12816 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12822 SCTP_TCB_UNLOCK(stcb);
12825 /* wait for space now */
12826 if (non_blocking) {
12827 /* Non-blocking io in place out */
12830 /* What about the INIT, send it maybe */
12831 if (queue_only_for_init) {
12832 if (hold_tcblock == 0) {
12833 SCTP_TCB_LOCK(stcb);
12836 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
12837 /* a collision took us forward? */
12840 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
12841 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12845 if ((net->flight_size > net->cwnd) &&
12846 (asoc->sctp_cmt_on_off == 0)) {
12847 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12849 } else if (asoc->ifp_had_enobuf) {
12850 SCTP_STAT_INCR(sctps_ifnomemqueued);
12851 if (net->flight_size > (2 * net->mtu)) {
12854 asoc->ifp_had_enobuf = 0;
12856 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12857 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12858 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
12859 (stcb->asoc.total_flight > 0) &&
12860 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
12861 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
12864 * Ok, Nagle is set on and we have data outstanding.
12865 * Don't send anything and let SACKs drive out the
12866 * data unless wen have a "full" segment to send.
12868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12869 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
12871 SCTP_STAT_INCR(sctps_naglequeued);
12874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12875 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
12876 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
12878 SCTP_STAT_INCR(sctps_naglesent);
12881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12883 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
12884 nagle_applies, un_sent);
12885 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
12886 stcb->asoc.total_flight,
12887 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
12889 if (queue_only_for_init)
12890 queue_only_for_init = 0;
12891 if ((queue_only == 0) && (nagle_applies == 0)) {
12893 * need to start chunk output
12894 * before blocking.. note that if
12895 * a lock is already applied, then
12896 * the input via the net is happening
12897 * and I don't need to start output :-D
12899 if (hold_tcblock == 0) {
12900 if (SCTP_TCB_TRYLOCK(stcb)) {
12902 sctp_chunk_output(inp,
12904 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12907 sctp_chunk_output(inp,
12909 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12911 if (hold_tcblock == 1) {
12912 SCTP_TCB_UNLOCK(stcb);
12916 SOCKBUF_LOCK(&so->so_snd);
12918 * This is a bit strange, but I think it will
12919 * work. The total_output_queue_size is locked and
12920 * protected by the TCB_LOCK, which we just released.
12921 * There is a race that can occur between releasing it
12922 * above, and me getting the socket lock, where sacks
12923 * come in but we have not put the SB_WAIT on the
12924 * so_snd buffer to get the wakeup. After the LOCK
12925 * is applied the sack_processing will also need to
12926 * LOCK the so->so_snd to do the actual sowwakeup(). So
12927 * once we have the socket buffer lock if we recheck the
12928 * size we KNOW we will get to sleep safely with the
12929 * wakeup flag in place.
12931 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
12932 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
12933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12934 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
12935 asoc, uio->uio_resid);
12938 stcb->block_entry = &be;
12939 error = sbwait(&so->so_snd);
12940 stcb->block_entry = NULL;
12942 if (error || so->so_error || be.error) {
12945 error = so->so_error;
12950 SOCKBUF_UNLOCK(&so->so_snd);
12953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12954 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12955 asoc, stcb->asoc.total_output_queue_size);
12958 SOCKBUF_UNLOCK(&so->so_snd);
12959 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12963 SCTP_TCB_SEND_LOCK(stcb);
12965 if (sp->msg_is_complete == 0) {
12966 strm->last_msg_incomplete = 1;
12967 asoc->stream_locked = 1;
12968 asoc->stream_locked_on = srcv->sinfo_stream;
12970 sp->sender_all_done = 1;
12971 strm->last_msg_incomplete = 0;
12972 asoc->stream_locked = 0;
12975 SCTP_PRINTF("Huh no sp TSNH?\n");
12976 strm->last_msg_incomplete = 0;
12977 asoc->stream_locked = 0;
12979 SCTP_TCB_SEND_UNLOCK(stcb);
12980 if (uio->uio_resid == 0) {
12981 got_all_of_the_send = 1;
12984 /* We send in a 0, since we do NOT have any locks */
12985 error = sctp_msg_append(stcb, net, top, srcv, 0);
12987 if (srcv->sinfo_flags & SCTP_EOF) {
12989 * This should only happen for Panda for the mbuf
12990 * send case, which does NOT yet support EEOR mode.
12991 * Thus, we can just set this flag to do the proper
12994 got_all_of_the_send = 1;
13002 if ((srcv->sinfo_flags & SCTP_EOF) &&
13003 (got_all_of_the_send == 1)) {
13006 SCTP_STAT_INCR(sctps_sends_with_eof);
13008 if (hold_tcblock == 0) {
13009 SCTP_TCB_LOCK(stcb);
13012 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13013 if (TAILQ_EMPTY(&asoc->send_queue) &&
13014 TAILQ_EMPTY(&asoc->sent_queue) &&
13016 if (asoc->locked_on_sending) {
13019 /* there is nothing queued to send, so I'm done... */
13020 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13021 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13022 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13023 struct sctp_nets *netp;
13025 /* only send SHUTDOWN the first time through */
13026 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13027 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13029 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13030 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13031 sctp_stop_timers_for_shutdown(stcb);
13032 if (stcb->asoc.alternate) {
13033 netp = stcb->asoc.alternate;
13035 netp = stcb->asoc.primary_destination;
13037 sctp_send_shutdown(stcb, netp);
13038 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13040 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13041 asoc->primary_destination);
13045 * we still got (or just got) data to send, so set
13049 * XXX sockets draft says that SCTP_EOF should be
13050 * sent with no data. currently, we will allow user
13051 * data to be sent first and move to
13054 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13055 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13056 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13057 if (hold_tcblock == 0) {
13058 SCTP_TCB_LOCK(stcb);
13061 if (asoc->locked_on_sending) {
13062 /* Locked to send out the data */
13063 struct sctp_stream_queue_pending *sp;
13065 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13067 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13068 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13071 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13072 if (TAILQ_EMPTY(&asoc->send_queue) &&
13073 TAILQ_EMPTY(&asoc->sent_queue) &&
13074 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13076 if (free_cnt_applied) {
13077 atomic_add_int(&stcb->asoc.refcnt, -1);
13078 free_cnt_applied = 0;
13080 sctp_abort_an_association(stcb->sctp_ep, stcb,
13081 NULL, SCTP_SO_LOCKED);
13083 * now relock the stcb so everything
13090 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13091 asoc->primary_destination);
13092 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13097 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13098 some_on_control = 1;
13100 if (queue_only_for_init) {
13101 if (hold_tcblock == 0) {
13102 SCTP_TCB_LOCK(stcb);
13105 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13106 /* a collision took us forward? */
13109 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13110 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13114 if ((net->flight_size > net->cwnd) &&
13115 (stcb->asoc.sctp_cmt_on_off == 0)) {
13116 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13118 } else if (asoc->ifp_had_enobuf) {
13119 SCTP_STAT_INCR(sctps_ifnomemqueued);
13120 if (net->flight_size > (2 * net->mtu)) {
13123 asoc->ifp_had_enobuf = 0;
13125 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13126 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13127 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13128 (stcb->asoc.total_flight > 0) &&
13129 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13130 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13132 * Ok, Nagle is set on and we have data outstanding.
13133 * Don't send anything and let SACKs drive out the
13134 * data unless wen have a "full" segment to send.
13136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13137 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13139 SCTP_STAT_INCR(sctps_naglequeued);
13142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13143 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13144 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13146 SCTP_STAT_INCR(sctps_naglesent);
13149 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13150 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13151 nagle_applies, un_sent);
13152 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13153 stcb->asoc.total_flight,
13154 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13156 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13157 /* we can attempt to send too. */
13158 if (hold_tcblock == 0) {
13160 * If there is activity recv'ing sacks no need to
13163 if (SCTP_TCB_TRYLOCK(stcb)) {
13164 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13168 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13170 } else if ((queue_only == 0) &&
13171 (stcb->asoc.peers_rwnd == 0) &&
13172 (stcb->asoc.total_flight == 0)) {
13173 /* We get to have a probe outstanding */
13174 if (hold_tcblock == 0) {
13176 SCTP_TCB_LOCK(stcb);
13178 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13179 } else if (some_on_control) {
13180 int num_out, reason, frag_point;
13182 /* Here we do control only */
13183 if (hold_tcblock == 0) {
13185 SCTP_TCB_LOCK(stcb);
13187 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13188 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13189 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13191 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13192 queue_only, stcb->asoc.peers_rwnd, un_sent,
13193 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13194 stcb->asoc.total_output_queue_size, error);
13199 if (local_soresv && stcb) {
13200 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13202 if (create_lock_applied) {
13203 SCTP_ASOC_CREATE_UNLOCK(inp);
13205 if ((stcb) && hold_tcblock) {
13206 SCTP_TCB_UNLOCK(stcb);
13208 if (stcb && free_cnt_applied) {
13209 atomic_add_int(&stcb->asoc.refcnt, -1);
13213 if (mtx_owned(&stcb->tcb_mtx)) {
13214 panic("Leaving with tcb mtx owned?");
13216 if (mtx_owned(&stcb->tcb_send_mtx)) {
13217 panic("Leaving with tcb send mtx owned?");
13223 sctp_validate_no_locks(inp);
13225 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
13232 sctp_m_freem(control);
13239 * generate an AUTHentication chunk, if required
13242 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13243 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13244 struct sctp_tcb *stcb, uint8_t chunk)
13246 struct mbuf *m_auth;
13247 struct sctp_auth_chunk *auth;
13251 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13255 /* sysctl disabled auth? */
13256 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13259 /* peer doesn't do auth... */
13260 if (!stcb->asoc.peer_supports_auth) {
13263 /* does the requested chunk require auth? */
13264 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13267 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13268 if (m_auth == NULL) {
13272 /* reserve some space if this will be the first mbuf */
13274 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13275 /* fill in the AUTH chunk details */
13276 auth = mtod(m_auth, struct sctp_auth_chunk *);
13277 bzero(auth, sizeof(*auth));
13278 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13279 auth->ch.chunk_flags = 0;
13280 chunk_len = sizeof(*auth) +
13281 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13282 auth->ch.chunk_length = htons(chunk_len);
13283 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13284 /* key id and hmac digest will be computed and filled in upon send */
13286 /* save the offset where the auth was inserted into the chain */
13288 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13289 *offset += SCTP_BUF_LEN(cn);
13292 /* update length and return pointer to the auth chunk */
13293 SCTP_BUF_LEN(m_auth) = chunk_len;
13294 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13295 if (auth_ret != NULL)
13303 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13305 struct nd_prefix *pfx = NULL;
13306 struct nd_pfxrouter *pfxrtr = NULL;
13307 struct sockaddr_in6 gw6;
13309 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13312 /* get prefix entry of address */
13313 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13314 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13316 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13317 &src6->sin6_addr, &pfx->ndpr_mask))
13320 /* no prefix entry in the prefix list */
13322 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13323 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13326 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13327 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13329 /* search installed gateway from prefix entry */
13330 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13331 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13332 gw6.sin6_family = AF_INET6;
13333 gw6.sin6_len = sizeof(struct sockaddr_in6);
13334 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13335 sizeof(struct in6_addr));
13336 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13337 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13338 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13339 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13340 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13341 ro->ro_rt->rt_gateway)) {
13342 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13346 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13353 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13356 struct sockaddr_in *sin, *mask;
13357 struct ifaddr *ifa;
13358 struct in_addr srcnetaddr, gwnetaddr;
13360 if (ro == NULL || ro->ro_rt == NULL ||
13361 sifa->address.sa.sa_family != AF_INET) {
13364 ifa = (struct ifaddr *)sifa->ifa;
13365 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13366 sin = (struct sockaddr_in *)&sifa->address.sin;
13367 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13368 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13369 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13370 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13372 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13373 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13374 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13375 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13376 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13377 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {