2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <netinet/udp_var.h>
55 #include <machine/in_cksum.h>
59 #define SCTP_MAX_GAPS_INARRAY 4
61 uint8_t right_edge; /* mergable on the right edge */
62 uint8_t left_edge; /* mergable on the left edge */
65 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
68 struct sack_track sack_array[256] = {
69 {0, 0, 0, 0, /* 0x00 */
76 {1, 0, 1, 0, /* 0x01 */
83 {0, 0, 1, 0, /* 0x02 */
90 {1, 0, 1, 0, /* 0x03 */
97 {0, 0, 1, 0, /* 0x04 */
104 {1, 0, 2, 0, /* 0x05 */
111 {0, 0, 1, 0, /* 0x06 */
118 {1, 0, 1, 0, /* 0x07 */
125 {0, 0, 1, 0, /* 0x08 */
132 {1, 0, 2, 0, /* 0x09 */
139 {0, 0, 2, 0, /* 0x0a */
146 {1, 0, 2, 0, /* 0x0b */
153 {0, 0, 1, 0, /* 0x0c */
160 {1, 0, 2, 0, /* 0x0d */
167 {0, 0, 1, 0, /* 0x0e */
174 {1, 0, 1, 0, /* 0x0f */
181 {0, 0, 1, 0, /* 0x10 */
188 {1, 0, 2, 0, /* 0x11 */
195 {0, 0, 2, 0, /* 0x12 */
202 {1, 0, 2, 0, /* 0x13 */
209 {0, 0, 2, 0, /* 0x14 */
216 {1, 0, 3, 0, /* 0x15 */
223 {0, 0, 2, 0, /* 0x16 */
230 {1, 0, 2, 0, /* 0x17 */
237 {0, 0, 1, 0, /* 0x18 */
244 {1, 0, 2, 0, /* 0x19 */
251 {0, 0, 2, 0, /* 0x1a */
258 {1, 0, 2, 0, /* 0x1b */
265 {0, 0, 1, 0, /* 0x1c */
272 {1, 0, 2, 0, /* 0x1d */
279 {0, 0, 1, 0, /* 0x1e */
286 {1, 0, 1, 0, /* 0x1f */
293 {0, 0, 1, 0, /* 0x20 */
300 {1, 0, 2, 0, /* 0x21 */
307 {0, 0, 2, 0, /* 0x22 */
314 {1, 0, 2, 0, /* 0x23 */
321 {0, 0, 2, 0, /* 0x24 */
328 {1, 0, 3, 0, /* 0x25 */
335 {0, 0, 2, 0, /* 0x26 */
342 {1, 0, 2, 0, /* 0x27 */
349 {0, 0, 2, 0, /* 0x28 */
356 {1, 0, 3, 0, /* 0x29 */
363 {0, 0, 3, 0, /* 0x2a */
370 {1, 0, 3, 0, /* 0x2b */
377 {0, 0, 2, 0, /* 0x2c */
384 {1, 0, 3, 0, /* 0x2d */
391 {0, 0, 2, 0, /* 0x2e */
398 {1, 0, 2, 0, /* 0x2f */
405 {0, 0, 1, 0, /* 0x30 */
412 {1, 0, 2, 0, /* 0x31 */
419 {0, 0, 2, 0, /* 0x32 */
426 {1, 0, 2, 0, /* 0x33 */
433 {0, 0, 2, 0, /* 0x34 */
440 {1, 0, 3, 0, /* 0x35 */
447 {0, 0, 2, 0, /* 0x36 */
454 {1, 0, 2, 0, /* 0x37 */
461 {0, 0, 1, 0, /* 0x38 */
468 {1, 0, 2, 0, /* 0x39 */
475 {0, 0, 2, 0, /* 0x3a */
482 {1, 0, 2, 0, /* 0x3b */
489 {0, 0, 1, 0, /* 0x3c */
496 {1, 0, 2, 0, /* 0x3d */
503 {0, 0, 1, 0, /* 0x3e */
510 {1, 0, 1, 0, /* 0x3f */
517 {0, 0, 1, 0, /* 0x40 */
524 {1, 0, 2, 0, /* 0x41 */
531 {0, 0, 2, 0, /* 0x42 */
538 {1, 0, 2, 0, /* 0x43 */
545 {0, 0, 2, 0, /* 0x44 */
552 {1, 0, 3, 0, /* 0x45 */
559 {0, 0, 2, 0, /* 0x46 */
566 {1, 0, 2, 0, /* 0x47 */
573 {0, 0, 2, 0, /* 0x48 */
580 {1, 0, 3, 0, /* 0x49 */
587 {0, 0, 3, 0, /* 0x4a */
594 {1, 0, 3, 0, /* 0x4b */
601 {0, 0, 2, 0, /* 0x4c */
608 {1, 0, 3, 0, /* 0x4d */
615 {0, 0, 2, 0, /* 0x4e */
622 {1, 0, 2, 0, /* 0x4f */
629 {0, 0, 2, 0, /* 0x50 */
636 {1, 0, 3, 0, /* 0x51 */
643 {0, 0, 3, 0, /* 0x52 */
650 {1, 0, 3, 0, /* 0x53 */
657 {0, 0, 3, 0, /* 0x54 */
664 {1, 0, 4, 0, /* 0x55 */
671 {0, 0, 3, 0, /* 0x56 */
678 {1, 0, 3, 0, /* 0x57 */
685 {0, 0, 2, 0, /* 0x58 */
692 {1, 0, 3, 0, /* 0x59 */
699 {0, 0, 3, 0, /* 0x5a */
706 {1, 0, 3, 0, /* 0x5b */
713 {0, 0, 2, 0, /* 0x5c */
720 {1, 0, 3, 0, /* 0x5d */
727 {0, 0, 2, 0, /* 0x5e */
734 {1, 0, 2, 0, /* 0x5f */
741 {0, 0, 1, 0, /* 0x60 */
748 {1, 0, 2, 0, /* 0x61 */
755 {0, 0, 2, 0, /* 0x62 */
762 {1, 0, 2, 0, /* 0x63 */
769 {0, 0, 2, 0, /* 0x64 */
776 {1, 0, 3, 0, /* 0x65 */
783 {0, 0, 2, 0, /* 0x66 */
790 {1, 0, 2, 0, /* 0x67 */
797 {0, 0, 2, 0, /* 0x68 */
804 {1, 0, 3, 0, /* 0x69 */
811 {0, 0, 3, 0, /* 0x6a */
818 {1, 0, 3, 0, /* 0x6b */
825 {0, 0, 2, 0, /* 0x6c */
832 {1, 0, 3, 0, /* 0x6d */
839 {0, 0, 2, 0, /* 0x6e */
846 {1, 0, 2, 0, /* 0x6f */
853 {0, 0, 1, 0, /* 0x70 */
860 {1, 0, 2, 0, /* 0x71 */
867 {0, 0, 2, 0, /* 0x72 */
874 {1, 0, 2, 0, /* 0x73 */
881 {0, 0, 2, 0, /* 0x74 */
888 {1, 0, 3, 0, /* 0x75 */
895 {0, 0, 2, 0, /* 0x76 */
902 {1, 0, 2, 0, /* 0x77 */
909 {0, 0, 1, 0, /* 0x78 */
916 {1, 0, 2, 0, /* 0x79 */
923 {0, 0, 2, 0, /* 0x7a */
930 {1, 0, 2, 0, /* 0x7b */
937 {0, 0, 1, 0, /* 0x7c */
944 {1, 0, 2, 0, /* 0x7d */
951 {0, 0, 1, 0, /* 0x7e */
958 {1, 0, 1, 0, /* 0x7f */
965 {0, 1, 1, 0, /* 0x80 */
972 {1, 1, 2, 0, /* 0x81 */
979 {0, 1, 2, 0, /* 0x82 */
986 {1, 1, 2, 0, /* 0x83 */
993 {0, 1, 2, 0, /* 0x84 */
1000 {1, 1, 3, 0, /* 0x85 */
1007 {0, 1, 2, 0, /* 0x86 */
1014 {1, 1, 2, 0, /* 0x87 */
1021 {0, 1, 2, 0, /* 0x88 */
1028 {1, 1, 3, 0, /* 0x89 */
1035 {0, 1, 3, 0, /* 0x8a */
1042 {1, 1, 3, 0, /* 0x8b */
1049 {0, 1, 2, 0, /* 0x8c */
1056 {1, 1, 3, 0, /* 0x8d */
1063 {0, 1, 2, 0, /* 0x8e */
1070 {1, 1, 2, 0, /* 0x8f */
1077 {0, 1, 2, 0, /* 0x90 */
1084 {1, 1, 3, 0, /* 0x91 */
1091 {0, 1, 3, 0, /* 0x92 */
1098 {1, 1, 3, 0, /* 0x93 */
1105 {0, 1, 3, 0, /* 0x94 */
1112 {1, 1, 4, 0, /* 0x95 */
1119 {0, 1, 3, 0, /* 0x96 */
1126 {1, 1, 3, 0, /* 0x97 */
1133 {0, 1, 2, 0, /* 0x98 */
1140 {1, 1, 3, 0, /* 0x99 */
1147 {0, 1, 3, 0, /* 0x9a */
1154 {1, 1, 3, 0, /* 0x9b */
1161 {0, 1, 2, 0, /* 0x9c */
1168 {1, 1, 3, 0, /* 0x9d */
1175 {0, 1, 2, 0, /* 0x9e */
1182 {1, 1, 2, 0, /* 0x9f */
1189 {0, 1, 2, 0, /* 0xa0 */
1196 {1, 1, 3, 0, /* 0xa1 */
1203 {0, 1, 3, 0, /* 0xa2 */
1210 {1, 1, 3, 0, /* 0xa3 */
1217 {0, 1, 3, 0, /* 0xa4 */
1224 {1, 1, 4, 0, /* 0xa5 */
1231 {0, 1, 3, 0, /* 0xa6 */
1238 {1, 1, 3, 0, /* 0xa7 */
1245 {0, 1, 3, 0, /* 0xa8 */
1252 {1, 1, 4, 0, /* 0xa9 */
1259 {0, 1, 4, 0, /* 0xaa */
1266 {1, 1, 4, 0, /* 0xab */
1273 {0, 1, 3, 0, /* 0xac */
1280 {1, 1, 4, 0, /* 0xad */
1287 {0, 1, 3, 0, /* 0xae */
1294 {1, 1, 3, 0, /* 0xaf */
1301 {0, 1, 2, 0, /* 0xb0 */
1308 {1, 1, 3, 0, /* 0xb1 */
1315 {0, 1, 3, 0, /* 0xb2 */
1322 {1, 1, 3, 0, /* 0xb3 */
1329 {0, 1, 3, 0, /* 0xb4 */
1336 {1, 1, 4, 0, /* 0xb5 */
1343 {0, 1, 3, 0, /* 0xb6 */
1350 {1, 1, 3, 0, /* 0xb7 */
1357 {0, 1, 2, 0, /* 0xb8 */
1364 {1, 1, 3, 0, /* 0xb9 */
1371 {0, 1, 3, 0, /* 0xba */
1378 {1, 1, 3, 0, /* 0xbb */
1385 {0, 1, 2, 0, /* 0xbc */
1392 {1, 1, 3, 0, /* 0xbd */
1399 {0, 1, 2, 0, /* 0xbe */
1406 {1, 1, 2, 0, /* 0xbf */
1413 {0, 1, 1, 0, /* 0xc0 */
1420 {1, 1, 2, 0, /* 0xc1 */
1427 {0, 1, 2, 0, /* 0xc2 */
1434 {1, 1, 2, 0, /* 0xc3 */
1441 {0, 1, 2, 0, /* 0xc4 */
1448 {1, 1, 3, 0, /* 0xc5 */
1455 {0, 1, 2, 0, /* 0xc6 */
1462 {1, 1, 2, 0, /* 0xc7 */
1469 {0, 1, 2, 0, /* 0xc8 */
1476 {1, 1, 3, 0, /* 0xc9 */
1483 {0, 1, 3, 0, /* 0xca */
1490 {1, 1, 3, 0, /* 0xcb */
1497 {0, 1, 2, 0, /* 0xcc */
1504 {1, 1, 3, 0, /* 0xcd */
1511 {0, 1, 2, 0, /* 0xce */
1518 {1, 1, 2, 0, /* 0xcf */
1525 {0, 1, 2, 0, /* 0xd0 */
1532 {1, 1, 3, 0, /* 0xd1 */
1539 {0, 1, 3, 0, /* 0xd2 */
1546 {1, 1, 3, 0, /* 0xd3 */
1553 {0, 1, 3, 0, /* 0xd4 */
1560 {1, 1, 4, 0, /* 0xd5 */
1567 {0, 1, 3, 0, /* 0xd6 */
1574 {1, 1, 3, 0, /* 0xd7 */
1581 {0, 1, 2, 0, /* 0xd8 */
1588 {1, 1, 3, 0, /* 0xd9 */
1595 {0, 1, 3, 0, /* 0xda */
1602 {1, 1, 3, 0, /* 0xdb */
1609 {0, 1, 2, 0, /* 0xdc */
1616 {1, 1, 3, 0, /* 0xdd */
1623 {0, 1, 2, 0, /* 0xde */
1630 {1, 1, 2, 0, /* 0xdf */
1637 {0, 1, 1, 0, /* 0xe0 */
1644 {1, 1, 2, 0, /* 0xe1 */
1651 {0, 1, 2, 0, /* 0xe2 */
1658 {1, 1, 2, 0, /* 0xe3 */
1665 {0, 1, 2, 0, /* 0xe4 */
1672 {1, 1, 3, 0, /* 0xe5 */
1679 {0, 1, 2, 0, /* 0xe6 */
1686 {1, 1, 2, 0, /* 0xe7 */
1693 {0, 1, 2, 0, /* 0xe8 */
1700 {1, 1, 3, 0, /* 0xe9 */
1707 {0, 1, 3, 0, /* 0xea */
1714 {1, 1, 3, 0, /* 0xeb */
1721 {0, 1, 2, 0, /* 0xec */
1728 {1, 1, 3, 0, /* 0xed */
1735 {0, 1, 2, 0, /* 0xee */
1742 {1, 1, 2, 0, /* 0xef */
1749 {0, 1, 1, 0, /* 0xf0 */
1756 {1, 1, 2, 0, /* 0xf1 */
1763 {0, 1, 2, 0, /* 0xf2 */
1770 {1, 1, 2, 0, /* 0xf3 */
1777 {0, 1, 2, 0, /* 0xf4 */
1784 {1, 1, 3, 0, /* 0xf5 */
1791 {0, 1, 2, 0, /* 0xf6 */
1798 {1, 1, 2, 0, /* 0xf7 */
1805 {0, 1, 1, 0, /* 0xf8 */
1812 {1, 1, 2, 0, /* 0xf9 */
1819 {0, 1, 2, 0, /* 0xfa */
1826 {1, 1, 2, 0, /* 0xfb */
1833 {0, 1, 1, 0, /* 0xfc */
1840 {1, 1, 2, 0, /* 0xfd */
1847 {0, 1, 1, 0, /* 0xfe */
1854 {1, 1, 1, 0, /* 0xff */
1865 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1866 int ipv4_addr_legal,
1867 int ipv6_addr_legal,
1869 int ipv4_local_scope,
1870 int local_scope SCTP_UNUSED,/* XXX */
1874 if ((loopback_scope == 0) &&
1875 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1877 * skip loopback if not in scope *
1881 switch (ifa->address.sa.sa_family) {
1884 if (ipv4_addr_legal) {
1885 struct sockaddr_in *sin;
1887 sin = (struct sockaddr_in *)&ifa->address.sin;
1888 if (sin->sin_addr.s_addr == 0) {
1889 /* not in scope , unspecified */
1892 if ((ipv4_local_scope == 0) &&
1893 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1894 /* private address not in scope */
1904 if (ipv6_addr_legal) {
1905 struct sockaddr_in6 *sin6;
1908 * Must update the flags, bummer, which means any
1909 * IFA locks must now be applied HERE <->
1912 sctp_gather_internal_ifa_flags(ifa);
1914 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1917 /* ok to use deprecated addresses? */
1918 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1919 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1920 /* skip unspecifed addresses */
1923 if ( /* (local_scope == 0) && */
1924 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1927 if ((site_scope == 0) &&
1928 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1942 static struct mbuf *
1943 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1945 struct sctp_paramhdr *parmh;
1949 switch (ifa->address.sa.sa_family) {
1952 len = sizeof(struct sctp_ipv4addr_param);
1957 len = sizeof(struct sctp_ipv6addr_param);
1963 if (M_TRAILINGSPACE(m) >= len) {
1964 /* easy side we just drop it on the end */
1965 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1968 /* Need more space */
1970 while (SCTP_BUF_NEXT(mret) != NULL) {
1971 mret = SCTP_BUF_NEXT(mret);
1973 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1974 if (SCTP_BUF_NEXT(mret) == NULL) {
1975 /* We are hosed, can't add more addresses */
1978 mret = SCTP_BUF_NEXT(mret);
1979 parmh = mtod(mret, struct sctp_paramhdr *);
1981 /* now add the parameter */
1982 switch (ifa->address.sa.sa_family) {
1986 struct sctp_ipv4addr_param *ipv4p;
1987 struct sockaddr_in *sin;
1989 sin = (struct sockaddr_in *)&ifa->address.sin;
1990 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1991 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1992 parmh->param_length = htons(len);
1993 ipv4p->addr = sin->sin_addr.s_addr;
1994 SCTP_BUF_LEN(mret) += len;
2001 struct sctp_ipv6addr_param *ipv6p;
2002 struct sockaddr_in6 *sin6;
2004 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2005 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2006 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2007 parmh->param_length = htons(len);
2008 memcpy(ipv6p->addr, &sin6->sin6_addr,
2009 sizeof(ipv6p->addr));
2010 /* clear embedded scope in the address */
2011 in6_clearscope((struct in6_addr *)ipv6p->addr);
2012 SCTP_BUF_LEN(mret) += len;
2024 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2025 struct sctp_scoping *scope,
2026 struct mbuf *m_at, int cnt_inits_to)
2028 struct sctp_vrf *vrf = NULL;
2029 int cnt, limit_out = 0, total_count;
2032 vrf_id = inp->def_vrf_id;
2033 SCTP_IPI_ADDR_RLOCK();
2034 vrf = sctp_find_vrf(vrf_id);
2036 SCTP_IPI_ADDR_RUNLOCK();
2039 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2040 struct sctp_ifa *sctp_ifap;
2041 struct sctp_ifn *sctp_ifnp;
2044 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2046 cnt = SCTP_ADDRESS_LIMIT;
2049 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2050 if ((scope->loopback_scope == 0) &&
2051 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2053 * Skip loopback devices if loopback_scope
2058 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2059 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2062 if (sctp_is_address_in_scope(sctp_ifap,
2063 scope->ipv4_addr_legal,
2064 scope->ipv6_addr_legal,
2065 scope->loopback_scope,
2066 scope->ipv4_local_scope,
2068 scope->site_scope, 1) == 0) {
2072 if (cnt > SCTP_ADDRESS_LIMIT) {
2076 if (cnt > SCTP_ADDRESS_LIMIT) {
2083 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2085 if ((scope->loopback_scope == 0) &&
2086 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2088 * Skip loopback devices if
2089 * loopback_scope not set
2093 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2094 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2097 if (sctp_is_address_in_scope(sctp_ifap,
2098 scope->ipv4_addr_legal,
2099 scope->ipv6_addr_legal,
2100 scope->loopback_scope,
2101 scope->ipv4_local_scope,
2103 scope->site_scope, 0) == 0) {
2106 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2117 if (total_count > SCTP_ADDRESS_LIMIT) {
2118 /* No more addresses */
2126 struct sctp_laddr *laddr;
2129 /* First, how many ? */
2130 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2131 if (laddr->ifa == NULL) {
2134 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2136 * Address being deleted by the system, dont
2140 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2142 * Address being deleted on this ep don't
2147 if (sctp_is_address_in_scope(laddr->ifa,
2148 scope->ipv4_addr_legal,
2149 scope->ipv6_addr_legal,
2150 scope->loopback_scope,
2151 scope->ipv4_local_scope,
2153 scope->site_scope, 1) == 0) {
2159 * To get through a NAT we only list addresses if we have
2160 * more than one. That way if you just bind a single address
2161 * we let the source of the init dictate our address.
2165 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2166 if (laddr->ifa == NULL) {
2169 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2172 if (sctp_is_address_in_scope(laddr->ifa,
2173 scope->ipv4_addr_legal,
2174 scope->ipv6_addr_legal,
2175 scope->loopback_scope,
2176 scope->ipv4_local_scope,
2178 scope->site_scope, 0) == 0) {
2181 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2183 if (cnt >= SCTP_ADDRESS_LIMIT) {
2189 SCTP_IPI_ADDR_RUNLOCK();
2193 static struct sctp_ifa *
2194 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2195 uint8_t dest_is_loop,
2196 uint8_t dest_is_priv,
2199 uint8_t dest_is_global = 0;
2201 /* dest_is_priv is true if destination is a private address */
2202 /* dest_is_loop is true if destination is a loopback addresses */
2205 * Here we determine if its a preferred address. A preferred address
2206 * means it is the same scope or higher scope then the destination.
2207 * L = loopback, P = private, G = global
2208 * -----------------------------------------
2209 * src | dest | result
2210 * ----------------------------------------
2212 * -----------------------------------------
2213 * P | L | yes-v4 no-v6
2214 * -----------------------------------------
2215 * G | L | yes-v4 no-v6
2216 * -----------------------------------------
2218 * -----------------------------------------
2220 * -----------------------------------------
2222 * -----------------------------------------
2224 * -----------------------------------------
2226 * -----------------------------------------
2228 * -----------------------------------------
2231 if (ifa->address.sa.sa_family != fam) {
2232 /* forget mis-matched family */
2235 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2238 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2239 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2240 /* Ok the address may be ok */
2242 if (fam == AF_INET6) {
2243 /* ok to use deprecated addresses? no lets not! */
2244 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2245 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2248 if (ifa->src_is_priv && !ifa->src_is_loop) {
2250 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2254 if (ifa->src_is_glob) {
2256 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2263 * Now that we know what is what, implement or table this could in
2264 * theory be done slicker (it used to be), but this is
2265 * straightforward and easier to validate :-)
2267 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2268 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2269 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2270 dest_is_loop, dest_is_priv, dest_is_global);
2272 if ((ifa->src_is_loop) && (dest_is_priv)) {
2273 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2276 if ((ifa->src_is_glob) && (dest_is_priv)) {
2277 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2280 if ((ifa->src_is_loop) && (dest_is_global)) {
2281 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2284 if ((ifa->src_is_priv) && (dest_is_global)) {
2285 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2288 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2289 /* its a preferred address */
2293 static struct sctp_ifa *
2294 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2295 uint8_t dest_is_loop,
2296 uint8_t dest_is_priv,
2299 uint8_t dest_is_global = 0;
2302 * Here we determine if its a acceptable address. A acceptable
2303 * address means it is the same scope or higher scope but we can
2304 * allow for NAT which means its ok to have a global dest and a
2307 * L = loopback, P = private, G = global
2308 * -----------------------------------------
2309 * src | dest | result
2310 * -----------------------------------------
2312 * -----------------------------------------
2313 * P | L | yes-v4 no-v6
2314 * -----------------------------------------
2316 * -----------------------------------------
2318 * -----------------------------------------
2320 * -----------------------------------------
2321 * G | P | yes - May not work
2322 * -----------------------------------------
2324 * -----------------------------------------
2325 * P | G | yes - May not work
2326 * -----------------------------------------
2328 * -----------------------------------------
2331 if (ifa->address.sa.sa_family != fam) {
2332 /* forget non matching family */
2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2334 ifa->address.sa.sa_family, fam);
2337 /* Ok the address may be ok */
2338 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2339 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2340 dest_is_loop, dest_is_priv);
2341 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2345 if (fam == AF_INET6) {
2346 /* ok to use deprecated addresses? */
2347 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2350 if (ifa->src_is_priv) {
2351 /* Special case, linklocal to loop */
2358 * Now that we know what is what, implement our table. This could in
2359 * theory be done slicker (it used to be), but this is
2360 * straightforward and easier to validate :-)
2362 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2365 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2368 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2371 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2374 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2375 /* its an acceptable address */
2380 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2382 struct sctp_laddr *laddr;
2385 /* There are no restrictions, no TCB :-) */
2388 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2389 if (laddr->ifa == NULL) {
2390 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2394 if (laddr->ifa == ifa) {
2395 /* Yes it is on the list */
2404 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2406 struct sctp_laddr *laddr;
2410 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2411 if (laddr->ifa == NULL) {
2412 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2416 if ((laddr->ifa == ifa) && laddr->action == 0)
2425 static struct sctp_ifa *
2426 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2429 int non_asoc_addr_ok,
2430 uint8_t dest_is_priv,
2431 uint8_t dest_is_loop,
2434 struct sctp_laddr *laddr, *starting_point;
2437 struct sctp_ifn *sctp_ifn;
2438 struct sctp_ifa *sctp_ifa, *sifa;
2439 struct sctp_vrf *vrf;
2442 vrf = sctp_find_vrf(vrf_id);
2446 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2447 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2448 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2450 * first question, is the ifn we will emit on in our list, if so, we
2451 * want such an address. Note that we first looked for a preferred
2455 /* is a preferred one on the interface we route out? */
2456 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2457 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2458 (non_asoc_addr_ok == 0))
2460 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2465 if (sctp_is_addr_in_ep(inp, sifa)) {
2466 atomic_add_int(&sifa->refcount, 1);
2472 * ok, now we now need to find one on the list of the addresses. We
2473 * can't get one on the emitting interface so let's find first a
2474 * preferred one. If not that an acceptable one otherwise... we
2477 starting_point = inp->next_addr_touse;
2479 if (inp->next_addr_touse == NULL) {
2480 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2483 for (laddr = inp->next_addr_touse; laddr;
2484 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2485 if (laddr->ifa == NULL) {
2486 /* address has been removed */
2489 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2490 /* address is being deleted */
2493 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2497 atomic_add_int(&sifa->refcount, 1);
2500 if (resettotop == 0) {
2501 inp->next_addr_touse = NULL;
2504 inp->next_addr_touse = starting_point;
2507 if (inp->next_addr_touse == NULL) {
2508 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2511 /* ok, what about an acceptable address in the inp */
2512 for (laddr = inp->next_addr_touse; laddr;
2513 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2514 if (laddr->ifa == NULL) {
2515 /* address has been removed */
2518 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2519 /* address is being deleted */
2522 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2526 atomic_add_int(&sifa->refcount, 1);
2529 if (resettotop == 0) {
2530 inp->next_addr_touse = NULL;
2531 goto once_again_too;
2534 * no address bound can be a source for the destination we are in
2542 static struct sctp_ifa *
2543 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2544 struct sctp_tcb *stcb,
2547 uint8_t dest_is_priv,
2548 uint8_t dest_is_loop,
2549 int non_asoc_addr_ok,
2552 struct sctp_laddr *laddr, *starting_point;
2554 struct sctp_ifn *sctp_ifn;
2555 struct sctp_ifa *sctp_ifa, *sifa;
2556 uint8_t start_at_beginning = 0;
2557 struct sctp_vrf *vrf;
2561 * first question, is the ifn we will emit on in our list, if so, we
2564 vrf = sctp_find_vrf(vrf_id);
2568 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2569 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2570 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2573 * first question, is the ifn we will emit on in our list? If so,
2574 * we want that one. First we look for a preferred. Second, we go
2575 * for an acceptable.
2578 /* first try for a preferred address on the ep */
2579 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2580 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2582 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2583 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2586 if (((non_asoc_addr_ok == 0) &&
2587 (sctp_is_addr_restricted(stcb, sifa))) ||
2588 (non_asoc_addr_ok &&
2589 (sctp_is_addr_restricted(stcb, sifa)) &&
2590 (!sctp_is_addr_pending(stcb, sifa)))) {
2591 /* on the no-no list */
2594 atomic_add_int(&sifa->refcount, 1);
2598 /* next try for an acceptable address on the ep */
2599 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2600 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2602 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2603 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2606 if (((non_asoc_addr_ok == 0) &&
2607 (sctp_is_addr_restricted(stcb, sifa))) ||
2608 (non_asoc_addr_ok &&
2609 (sctp_is_addr_restricted(stcb, sifa)) &&
2610 (!sctp_is_addr_pending(stcb, sifa)))) {
2611 /* on the no-no list */
2614 atomic_add_int(&sifa->refcount, 1);
2621 * if we can't find one like that then we must look at all addresses
2622 * bound to pick one at first preferable then secondly acceptable.
2624 starting_point = stcb->asoc.last_used_address;
2626 if (stcb->asoc.last_used_address == NULL) {
2627 start_at_beginning = 1;
2628 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2630 /* search beginning with the last used address */
2631 for (laddr = stcb->asoc.last_used_address; laddr;
2632 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2633 if (laddr->ifa == NULL) {
2634 /* address has been removed */
2637 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2638 /* address is being deleted */
2641 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2644 if (((non_asoc_addr_ok == 0) &&
2645 (sctp_is_addr_restricted(stcb, sifa))) ||
2646 (non_asoc_addr_ok &&
2647 (sctp_is_addr_restricted(stcb, sifa)) &&
2648 (!sctp_is_addr_pending(stcb, sifa)))) {
2649 /* on the no-no list */
2652 stcb->asoc.last_used_address = laddr;
2653 atomic_add_int(&sifa->refcount, 1);
2656 if (start_at_beginning == 0) {
2657 stcb->asoc.last_used_address = NULL;
2658 goto sctp_from_the_top;
2660 /* now try for any higher scope than the destination */
2661 stcb->asoc.last_used_address = starting_point;
2662 start_at_beginning = 0;
2664 if (stcb->asoc.last_used_address == NULL) {
2665 start_at_beginning = 1;
2666 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2668 /* search beginning with the last used address */
2669 for (laddr = stcb->asoc.last_used_address; laddr;
2670 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2671 if (laddr->ifa == NULL) {
2672 /* address has been removed */
2675 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2676 /* address is being deleted */
2679 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2683 if (((non_asoc_addr_ok == 0) &&
2684 (sctp_is_addr_restricted(stcb, sifa))) ||
2685 (non_asoc_addr_ok &&
2686 (sctp_is_addr_restricted(stcb, sifa)) &&
2687 (!sctp_is_addr_pending(stcb, sifa)))) {
2688 /* on the no-no list */
2691 stcb->asoc.last_used_address = laddr;
2692 atomic_add_int(&sifa->refcount, 1);
2695 if (start_at_beginning == 0) {
2696 stcb->asoc.last_used_address = NULL;
2697 goto sctp_from_the_top2;
2702 static struct sctp_ifa *
2703 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2704 struct sctp_tcb *stcb,
2705 int non_asoc_addr_ok,
2706 uint8_t dest_is_loop,
2707 uint8_t dest_is_priv,
2713 struct sctp_ifa *ifa, *sifa;
2714 int num_eligible_addr = 0;
2717 struct sockaddr_in6 sin6, lsa6;
2719 if (fam == AF_INET6) {
2720 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2721 (void)sa6_recoverscope(&sin6);
2724 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2725 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2726 (non_asoc_addr_ok == 0))
2728 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2733 if (fam == AF_INET6 &&
2735 sifa->src_is_loop && sifa->src_is_priv) {
2737 * don't allow fe80::1 to be a src on loop ::1, we
2738 * don't list it to the peer so we will get an
2743 if (fam == AF_INET6 &&
2744 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2745 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2747 * link-local <-> link-local must belong to the same
2750 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2751 (void)sa6_recoverscope(&lsa6);
2752 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2759 * Check if the IPv6 address matches to next-hop. In the
2760 * mobile case, old IPv6 address may be not deleted from the
2761 * interface. Then, the interface has previous and new
2762 * addresses. We should use one corresponding to the
2763 * next-hop. (by micchie)
2766 if (stcb && fam == AF_INET6 &&
2767 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2768 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2775 /* Avoid topologically incorrect IPv4 address */
2776 if (stcb && fam == AF_INET &&
2777 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2778 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2784 if (sctp_is_address_in_scope(ifa,
2785 stcb->asoc.ipv4_addr_legal,
2786 stcb->asoc.ipv6_addr_legal,
2787 stcb->asoc.loopback_scope,
2788 stcb->asoc.ipv4_local_scope,
2789 stcb->asoc.local_scope,
2790 stcb->asoc.site_scope, 0) == 0) {
2793 if (((non_asoc_addr_ok == 0) &&
2794 (sctp_is_addr_restricted(stcb, sifa))) ||
2795 (non_asoc_addr_ok &&
2796 (sctp_is_addr_restricted(stcb, sifa)) &&
2797 (!sctp_is_addr_pending(stcb, sifa)))) {
2799 * It is restricted for some reason..
2800 * probably not yet added.
2805 if (num_eligible_addr >= addr_wanted) {
2808 num_eligible_addr++;
2815 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2816 struct sctp_tcb *stcb,
2817 int non_asoc_addr_ok,
2818 uint8_t dest_is_loop,
2819 uint8_t dest_is_priv,
2822 struct sctp_ifa *ifa, *sifa;
2823 int num_eligible_addr = 0;
2825 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2826 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2827 (non_asoc_addr_ok == 0)) {
2830 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2836 if (sctp_is_address_in_scope(ifa,
2837 stcb->asoc.ipv4_addr_legal,
2838 stcb->asoc.ipv6_addr_legal,
2839 stcb->asoc.loopback_scope,
2840 stcb->asoc.ipv4_local_scope,
2841 stcb->asoc.local_scope,
2842 stcb->asoc.site_scope, 0) == 0) {
2845 if (((non_asoc_addr_ok == 0) &&
2846 (sctp_is_addr_restricted(stcb, sifa))) ||
2847 (non_asoc_addr_ok &&
2848 (sctp_is_addr_restricted(stcb, sifa)) &&
2849 (!sctp_is_addr_pending(stcb, sifa)))) {
2851 * It is restricted for some reason..
2852 * probably not yet added.
2857 num_eligible_addr++;
2859 return (num_eligible_addr);
2862 static struct sctp_ifa *
2863 sctp_choose_boundall(struct sctp_tcb *stcb,
2864 struct sctp_nets *net,
2867 uint8_t dest_is_priv,
2868 uint8_t dest_is_loop,
2869 int non_asoc_addr_ok,
2872 int cur_addr_num = 0, num_preferred = 0;
2874 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2875 struct sctp_ifa *sctp_ifa, *sifa;
2877 struct sctp_vrf *vrf;
2885 * For boundall we can use any address in the association.
2886 * If non_asoc_addr_ok is set we can use any address (at least in
2887 * theory). So we look for preferred addresses first. If we find one,
2888 * we use it. Otherwise we next try to get an address on the
2889 * interface, which we should be able to do (unless non_asoc_addr_ok
2890 * is false and we are routed out that way). In these cases where we
2891 * can't use the address of the interface we go through all the
2892 * ifn's looking for an address we can use and fill that in. Punting
2893 * means we send back address 0, which will probably cause problems
2894 * actually since then IP will fill in the address of the route ifn,
2895 * which means we probably already rejected it.. i.e. here comes an
2898 vrf = sctp_find_vrf(vrf_id);
2902 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2903 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2904 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2905 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2906 if (sctp_ifn == NULL) {
2907 /* ?? We don't have this guy ?? */
2908 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2909 goto bound_all_plan_b;
2911 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2912 ifn_index, sctp_ifn->ifn_name);
2915 cur_addr_num = net->indx_of_eligible_next_to_use;
2917 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2922 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2923 num_preferred, sctp_ifn->ifn_name);
2924 if (num_preferred == 0) {
2926 * no eligible addresses, we must use some other interface
2927 * address if we can find one.
2929 goto bound_all_plan_b;
2932 * Ok we have num_eligible_addr set with how many we can use, this
2933 * may vary from call to call due to addresses being deprecated
2936 if (cur_addr_num >= num_preferred) {
2940 * select the nth address from the list (where cur_addr_num is the
2941 * nth) and 0 is the first one, 1 is the second one etc...
2943 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2945 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2946 dest_is_priv, cur_addr_num, fam, ro);
2948 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2950 atomic_add_int(&sctp_ifa->refcount, 1);
2952 /* save off where the next one we will want */
2953 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2958 * plan_b: Look at all interfaces and find a preferred address. If
2959 * no preferred fall through to plan_c.
2962 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2963 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2964 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2965 sctp_ifn->ifn_name);
2966 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2967 /* wrong base scope */
2968 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2971 if ((sctp_ifn == looked_at) && looked_at) {
2972 /* already looked at this guy */
2973 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2976 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2977 dest_is_loop, dest_is_priv, fam);
2978 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2979 "Found ifn:%p %d preferred source addresses\n",
2980 ifn, num_preferred);
2981 if (num_preferred == 0) {
2982 /* None on this interface. */
2983 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2986 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2987 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2988 num_preferred, sctp_ifn, cur_addr_num);
2991 * Ok we have num_eligible_addr set with how many we can
2992 * use, this may vary from call to call due to addresses
2993 * being deprecated etc..
2995 if (cur_addr_num >= num_preferred) {
2998 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2999 dest_is_priv, cur_addr_num, fam, ro);
3003 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3004 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3006 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3007 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3008 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3009 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3011 atomic_add_int(&sifa->refcount, 1);
3015 again_with_private_addresses_allowed:
3017 /* plan_c: do we have an acceptable address on the emit interface */
3019 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3020 if (emit_ifn == NULL) {
3021 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3024 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3025 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", sctp_ifa);
3026 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3027 (non_asoc_addr_ok == 0)) {
3028 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3031 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3034 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3038 if (sctp_is_address_in_scope(sifa,
3039 stcb->asoc.ipv4_addr_legal,
3040 stcb->asoc.ipv6_addr_legal,
3041 stcb->asoc.loopback_scope,
3042 stcb->asoc.ipv4_local_scope,
3043 stcb->asoc.local_scope,
3044 stcb->asoc.site_scope, 0) == 0) {
3045 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3049 if (((non_asoc_addr_ok == 0) &&
3050 (sctp_is_addr_restricted(stcb, sifa))) ||
3051 (non_asoc_addr_ok &&
3052 (sctp_is_addr_restricted(stcb, sifa)) &&
3053 (!sctp_is_addr_pending(stcb, sifa)))) {
3055 * It is restricted for some reason..
3056 * probably not yet added.
3058 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3063 SCTP_PRINTF("Stcb is null - no print\n");
3065 atomic_add_int(&sifa->refcount, 1);
3070 * plan_d: We are in trouble. No preferred address on the emit
3071 * interface. And not even a preferred address on all interfaces. Go
3072 * out and see if we can find an acceptable address somewhere
3073 * amongst all interfaces.
3075 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", looked_at);
3076 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3077 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3078 /* wrong base scope */
3081 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3082 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3083 (non_asoc_addr_ok == 0))
3085 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3091 if (sctp_is_address_in_scope(sifa,
3092 stcb->asoc.ipv4_addr_legal,
3093 stcb->asoc.ipv6_addr_legal,
3094 stcb->asoc.loopback_scope,
3095 stcb->asoc.ipv4_local_scope,
3096 stcb->asoc.local_scope,
3097 stcb->asoc.site_scope, 0) == 0) {
3101 if (((non_asoc_addr_ok == 0) &&
3102 (sctp_is_addr_restricted(stcb, sifa))) ||
3103 (non_asoc_addr_ok &&
3104 (sctp_is_addr_restricted(stcb, sifa)) &&
3105 (!sctp_is_addr_pending(stcb, sifa)))) {
3107 * It is restricted for some
3108 * reason.. probably not yet added.
3118 if ((retried == 0) && (stcb->asoc.ipv4_local_scope == 0)) {
3119 stcb->asoc.ipv4_local_scope = 1;
3121 goto again_with_private_addresses_allowed;
3122 } else if (retried == 1) {
3123 stcb->asoc.ipv4_local_scope = 0;
3130 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3131 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3132 /* wrong base scope */
3135 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3136 struct sctp_ifa *tmp_sifa;
3138 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3139 (non_asoc_addr_ok == 0))
3141 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3144 if (tmp_sifa == NULL) {
3147 if (tmp_sifa == sifa) {
3151 if (sctp_is_address_in_scope(tmp_sifa,
3152 stcb->asoc.ipv4_addr_legal,
3153 stcb->asoc.ipv6_addr_legal,
3154 stcb->asoc.loopback_scope,
3155 stcb->asoc.ipv4_local_scope,
3156 stcb->asoc.local_scope,
3157 stcb->asoc.site_scope, 0) == 0) {
3160 if (((non_asoc_addr_ok == 0) &&
3161 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3162 (non_asoc_addr_ok &&
3163 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3164 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3174 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3175 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3176 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3181 atomic_add_int(&sifa->refcount, 1);
3189 /* tcb may be NULL */
3191 sctp_source_address_selection(struct sctp_inpcb *inp,
3192 struct sctp_tcb *stcb,
3194 struct sctp_nets *net,
3195 int non_asoc_addr_ok, uint32_t vrf_id)
3197 struct sctp_ifa *answer;
3198 uint8_t dest_is_priv, dest_is_loop;
3202 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3206 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3211 * Rules: - Find the route if needed, cache if I can. - Look at
3212 * interface address in route, Is it in the bound list. If so we
3213 * have the best source. - If not we must rotate amongst the
3218 * Do we need to pay attention to scope. We can have a private address
3219 * or a global address we are sourcing or sending to. So if we draw
3221 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3223 * ------------------------------------------
3224 * source * dest * result
3225 * -----------------------------------------
3226 * <a> Private * Global * NAT
3227 * -----------------------------------------
3228 * <b> Private * Private * No problem
3229 * -----------------------------------------
3230 * <c> Global * Private * Huh, How will this work?
3231 * -----------------------------------------
3232 * <d> Global * Global * No Problem
3233 *------------------------------------------
3234 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3236 *------------------------------------------
3237 * source * dest * result
3238 * -----------------------------------------
3239 * <a> Linklocal * Global *
3240 * -----------------------------------------
3241 * <b> Linklocal * Linklocal * No problem
3242 * -----------------------------------------
3243 * <c> Global * Linklocal * Huh, How will this work?
3244 * -----------------------------------------
3245 * <d> Global * Global * No Problem
3246 *------------------------------------------
3247 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3249 * And then we add to that what happens if there are multiple addresses
3250 * assigned to an interface. Remember the ifa on a ifn is a linked
3251 * list of addresses. So one interface can have more than one IP
3252 * address. What happens if we have both a private and a global
3253 * address? Do we then use context of destination to sort out which
3254 * one is best? And what about NAT's sending P->G may get you a NAT
3255 * translation, or should you select the G thats on the interface in
3260 * - count the number of addresses on the interface.
3261 * - if it is one, no problem except case <c>.
3262 * For <a> we will assume a NAT out there.
3263 * - if there are more than one, then we need to worry about scope P
3264 * or G. We should prefer G -> G and P -> P if possible.
3265 * Then as a secondary fall back to mixed types G->P being a last
3267 * - The above all works for bound all, but bound specific we need to
3268 * use the same concept but instead only consider the bound
3269 * addresses. If the bound set is NOT assigned to the interface then
3270 * we must use rotation amongst the bound addresses..
3272 if (ro->ro_rt == NULL) {
3274 * Need a route to cache.
3276 SCTP_RTALLOC(ro, vrf_id);
3278 if (ro->ro_rt == NULL) {
3281 fam = ro->ro_dst.sa_family;
3282 dest_is_priv = dest_is_loop = 0;
3283 /* Setup our scopes for the destination */
3287 /* Scope based on outbound address */
3288 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3291 /* mark it as local */
3292 net->addr_is_local = 1;
3294 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3301 /* Scope based on outbound address */
3302 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3303 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3305 * If the address is a loopback address, which
3306 * consists of "::1" OR "fe80::1%lo0", we are
3307 * loopback scope. But we don't use dest_is_priv
3308 * (link local addresses).
3312 /* mark it as local */
3313 net->addr_is_local = 1;
3315 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3321 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3322 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3323 SCTP_IPI_ADDR_RLOCK();
3324 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3328 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3329 dest_is_priv, dest_is_loop,
3330 non_asoc_addr_ok, fam);
3331 SCTP_IPI_ADDR_RUNLOCK();
3338 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3339 vrf_id, dest_is_priv,
3341 non_asoc_addr_ok, fam);
3343 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3348 SCTP_IPI_ADDR_RUNLOCK();
3353 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3356 int tlen, at, found;
3357 struct sctp_sndinfo sndinfo;
3358 struct sctp_prinfo prinfo;
3359 struct sctp_authinfo authinfo;
3361 tlen = SCTP_BUF_LEN(control);
3365 * Independent of how many mbufs, find the c_type inside the control
3366 * structure and copy out the data.
3369 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3370 /* There is not enough room for one more. */
3373 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3374 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3375 /* We dont't have a complete CMSG header. */
3378 if (((int)cmh.cmsg_len + at) > tlen) {
3379 /* We don't have the complete CMSG. */
3382 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3383 ((c_type == cmh.cmsg_type) ||
3384 ((c_type == SCTP_SNDRCV) &&
3385 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3386 (cmh.cmsg_type == SCTP_PRINFO) ||
3387 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3388 if (c_type == cmh.cmsg_type) {
3389 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3392 /* It is exactly what we want. Copy it out. */
3393 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), cpsize, (caddr_t)data);
3396 struct sctp_sndrcvinfo *sndrcvinfo;
3398 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3400 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3403 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3405 switch (cmh.cmsg_type) {
3407 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_sndinfo)) {
3410 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3411 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3412 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3413 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3414 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3415 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3418 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_prinfo)) {
3421 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3422 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3423 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3426 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_authinfo)) {
3429 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3430 sndrcvinfo->sinfo_keynumber_valid = 1;
3431 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3439 at += CMSG_ALIGN(cmh.cmsg_len);
3445 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3449 struct sctp_initmsg initmsg;
3452 struct sockaddr_in sin;
3456 struct sockaddr_in6 sin6;
3460 tlen = SCTP_BUF_LEN(control);
3463 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3464 /* There is not enough room for one more. */
3468 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3469 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3470 /* We dont't have a complete CMSG header. */
3474 if (((int)cmh.cmsg_len + at) > tlen) {
3475 /* We don't have the complete CMSG. */
3479 if (cmh.cmsg_level == IPPROTO_SCTP) {
3480 switch (cmh.cmsg_type) {
3482 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_initmsg)) {
3486 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3487 if (initmsg.sinit_max_attempts)
3488 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3489 if (initmsg.sinit_num_ostreams)
3490 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3491 if (initmsg.sinit_max_instreams)
3492 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3493 if (initmsg.sinit_max_init_timeo)
3494 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3495 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3496 struct sctp_stream_out *tmp_str;
3499 /* Default is NOT correct */
3500 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3501 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3502 SCTP_TCB_UNLOCK(stcb);
3503 SCTP_MALLOC(tmp_str,
3504 struct sctp_stream_out *,
3505 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3507 SCTP_TCB_LOCK(stcb);
3508 if (tmp_str != NULL) {
3509 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3510 stcb->asoc.strmout = tmp_str;
3511 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3513 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3515 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3516 stcb->asoc.strmout[i].next_sequence_sent = 0;
3517 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3518 stcb->asoc.strmout[i].stream_no = i;
3519 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3520 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3525 case SCTP_DSTADDRV4:
3526 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3530 memset(&sin, 0, sizeof(struct sockaddr_in));
3531 sin.sin_family = AF_INET;
3532 sin.sin_len = sizeof(struct sockaddr_in);
3533 sin.sin_port = stcb->rport;
3534 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3535 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3536 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3537 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3541 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3542 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3549 case SCTP_DSTADDRV6:
3550 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3554 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3555 sin6.sin6_family = AF_INET6;
3556 sin6.sin6_len = sizeof(struct sockaddr_in6);
3557 sin6.sin6_port = stcb->rport;
3558 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3559 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3560 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3565 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3566 in6_sin6_2_sin(&sin, &sin6);
3567 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3568 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3569 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3573 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3574 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3580 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3581 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3591 at += CMSG_ALIGN(cmh.cmsg_len);
3596 static struct sctp_tcb *
3597 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3599 struct mbuf *control,
3600 struct sctp_nets **net_p,
3605 struct sctp_tcb *stcb;
3606 struct sockaddr *addr;
3609 struct sockaddr_in sin;
3613 struct sockaddr_in6 sin6;
3617 tlen = SCTP_BUF_LEN(control);
3620 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3621 /* There is not enough room for one more. */
3625 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3626 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3627 /* We dont't have a complete CMSG header. */
3631 if (((int)cmh.cmsg_len + at) > tlen) {
3632 /* We don't have the complete CMSG. */
3636 if (cmh.cmsg_level == IPPROTO_SCTP) {
3637 switch (cmh.cmsg_type) {
3639 case SCTP_DSTADDRV4:
3640 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3644 memset(&sin, 0, sizeof(struct sockaddr_in));
3645 sin.sin_family = AF_INET;
3646 sin.sin_len = sizeof(struct sockaddr_in);
3647 sin.sin_port = port;
3648 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3649 addr = (struct sockaddr *)&sin;
3653 case SCTP_DSTADDRV6:
3654 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3658 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3659 sin6.sin6_family = AF_INET6;
3660 sin6.sin6_len = sizeof(struct sockaddr_in6);
3661 sin6.sin6_port = port;
3662 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3664 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3665 in6_sin6_2_sin(&sin, &sin6);
3666 addr = (struct sockaddr *)&sin;
3669 addr = (struct sockaddr *)&sin6;
3677 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3683 at += CMSG_ALIGN(cmh.cmsg_len);
3688 static struct mbuf *
3689 sctp_add_cookie(struct mbuf *init, int init_offset,
3690 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3692 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3693 struct sctp_state_cookie *stc;
3694 struct sctp_paramhdr *ph;
3700 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3701 sizeof(struct sctp_paramhdr)), 0,
3702 M_DONTWAIT, 1, MT_DATA);
3706 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3707 if (copy_init == NULL) {
3711 #ifdef SCTP_MBUF_LOGGING
3712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3715 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3716 if (SCTP_BUF_IS_EXTENDED(mat)) {
3717 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3722 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3724 if (copy_initack == NULL) {
3726 sctp_m_freem(copy_init);
3729 #ifdef SCTP_MBUF_LOGGING
3730 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3733 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3734 if (SCTP_BUF_IS_EXTENDED(mat)) {
3735 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3740 /* easy side we just drop it on the end */
3741 ph = mtod(mret, struct sctp_paramhdr *);
3742 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3743 sizeof(struct sctp_paramhdr);
3744 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3745 sizeof(struct sctp_paramhdr));
3746 ph->param_type = htons(SCTP_STATE_COOKIE);
3747 ph->param_length = 0; /* fill in at the end */
3748 /* Fill in the stc cookie data */
3749 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3751 /* tack the INIT and then the INIT-ACK onto the chain */
3753 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3754 cookie_sz += SCTP_BUF_LEN(m_at);
3755 if (SCTP_BUF_NEXT(m_at) == NULL) {
3756 SCTP_BUF_NEXT(m_at) = copy_init;
3760 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3761 cookie_sz += SCTP_BUF_LEN(m_at);
3762 if (SCTP_BUF_NEXT(m_at) == NULL) {
3763 SCTP_BUF_NEXT(m_at) = copy_initack;
3767 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3768 cookie_sz += SCTP_BUF_LEN(m_at);
3769 if (SCTP_BUF_NEXT(m_at) == NULL) {
3773 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3775 /* no space, so free the entire chain */
3779 SCTP_BUF_LEN(sig) = 0;
3780 SCTP_BUF_NEXT(m_at) = sig;
3782 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3783 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3785 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3786 cookie_sz += SCTP_SIGNATURE_SIZE;
3787 ph->param_length = htons(cookie_sz);
3793 sctp_get_ect(struct sctp_tcb *stcb)
3795 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3796 return (SCTP_ECT0_BIT);
3802 #if defined(INET) || defined(INET6)
3804 sctp_handle_no_route(struct sctp_tcb *stcb,
3805 struct sctp_nets *net,
3808 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3811 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3812 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3813 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3814 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3815 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3816 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3820 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3821 net->dest_state &= ~SCTP_ADDR_PF;
3825 if (net == stcb->asoc.primary_destination) {
3826 /* need a new primary */
3827 struct sctp_nets *alt;
3829 alt = sctp_find_alternate_net(stcb, net, 0);
3831 if (stcb->asoc.alternate) {
3832 sctp_free_remote_addr(stcb->asoc.alternate);
3834 stcb->asoc.alternate = alt;
3835 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3836 if (net->ro._s_addr) {
3837 sctp_free_ifa(net->ro._s_addr);
3838 net->ro._s_addr = NULL;
3840 net->src_addr_selected = 0;
3850 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3851 struct sctp_tcb *stcb, /* may be NULL */
3852 struct sctp_nets *net,
3853 struct sockaddr *to,
3855 uint32_t auth_offset,
3856 struct sctp_auth_chunk *auth,
3857 uint16_t auth_keyid,
3858 int nofragment_flag,
3865 union sctp_sockstore *over_addr,
3866 uint8_t use_mflowid, uint32_t mflowid,
3867 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3868 int so_locked SCTP_UNUSED
3873 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3876 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3877 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3878 * - fill in the HMAC digest of any AUTH chunk in the packet.
3879 * - calculate and fill in the SCTP checksum.
3880 * - prepend an IP address header.
3881 * - if boundall use INADDR_ANY.
3882 * - if boundspecific do source address selection.
3883 * - set fragmentation option for ipV4.
3884 * - On return from IP output, check/adjust mtu size of output
3885 * interface and smallest_mtu size as well.
3887 /* Will need ifdefs around this */
3889 struct sctphdr *sctphdr;
3894 #if defined(INET) || defined(INET6)
3896 sctp_route_t *ro = NULL;
3897 struct udphdr *udp = NULL;
3902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3903 struct socket *so = NULL;
3907 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3908 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3913 vrf_id = stcb->asoc.vrf_id;
3915 vrf_id = inp->def_vrf_id;
3918 /* fill in the HMAC digest for any AUTH chunk in the packet */
3919 if ((auth != NULL) && (stcb != NULL)) {
3920 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3923 tos_value = net->dscp;
3925 tos_value = stcb->asoc.default_dscp;
3927 tos_value = inp->sctp_ep.default_dscp;
3930 switch (to->sa_family) {
3934 struct ip *ip = NULL;
3935 sctp_route_t iproute;
3938 len = sizeof(struct ip) + sizeof(struct sctphdr);
3940 len += sizeof(struct udphdr);
3942 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3945 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3948 SCTP_ALIGN_TO_END(newm, len);
3949 SCTP_BUF_LEN(newm) = len;
3950 SCTP_BUF_NEXT(newm) = m;
3954 if (net->flowidset == 0) {
3955 panic("Flow ID not set");
3958 m->m_pkthdr.flowid = net->flowid;
3959 m->m_flags |= M_FLOWID;
3961 if (use_mflowid != 0) {
3962 m->m_pkthdr.flowid = mflowid;
3963 m->m_flags |= M_FLOWID;
3966 packet_length = sctp_calculate_len(m);
3967 ip = mtod(m, struct ip *);
3968 ip->ip_v = IPVERSION;
3969 ip->ip_hl = (sizeof(struct ip) >> 2);
3970 if (tos_value == 0) {
3972 * This means especially, that it is not set
3973 * at the SCTP layer. So use the value from
3976 tos_value = inp->ip_inp.inp.inp_ip_tos;
3980 tos_value |= sctp_get_ect(stcb);
3982 if ((nofragment_flag) && (port == 0)) {
3987 /* FreeBSD has a function for ip_id's */
3988 ip->ip_id = ip_newid();
3990 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3991 ip->ip_len = packet_length;
3992 ip->ip_tos = tos_value;
3994 ip->ip_p = IPPROTO_UDP;
3996 ip->ip_p = IPPROTO_SCTP;
4001 memset(&iproute, 0, sizeof(iproute));
4002 memcpy(&ro->ro_dst, to, to->sa_len);
4004 ro = (sctp_route_t *) & net->ro;
4006 /* Now the address selection part */
4007 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4009 /* call the routine to select the src address */
4010 if (net && out_of_asoc_ok == 0) {
4011 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4012 sctp_free_ifa(net->ro._s_addr);
4013 net->ro._s_addr = NULL;
4014 net->src_addr_selected = 0;
4020 if (net->src_addr_selected == 0) {
4021 /* Cache the source address */
4022 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4025 net->src_addr_selected = 1;
4027 if (net->ro._s_addr == NULL) {
4028 /* No route to host */
4029 net->src_addr_selected = 0;
4030 sctp_handle_no_route(stcb, net, so_locked);
4031 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4033 return (EHOSTUNREACH);
4035 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4037 if (over_addr == NULL) {
4038 struct sctp_ifa *_lsrc;
4040 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4044 if (_lsrc == NULL) {
4045 sctp_handle_no_route(stcb, net, so_locked);
4046 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4048 return (EHOSTUNREACH);
4050 ip->ip_src = _lsrc->address.sin.sin_addr;
4051 sctp_free_ifa(_lsrc);
4053 ip->ip_src = over_addr->sin.sin_addr;
4054 SCTP_RTALLOC(ro, vrf_id);
4058 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4059 sctp_handle_no_route(stcb, net, so_locked);
4060 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4062 return (EHOSTUNREACH);
4064 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4065 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4066 udp->uh_dport = port;
4067 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4069 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4073 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4075 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4078 sctphdr->src_port = src_port;
4079 sctphdr->dest_port = dest_port;
4080 sctphdr->v_tag = v_tag;
4081 sctphdr->checksum = 0;
4084 * If source address selection fails and we find no
4085 * route then the ip_output should fail as well with
4086 * a NO_ROUTE_TO_HOST type error. We probably should
4087 * catch that somewhere and abort the association
4088 * right away (assuming this is an INIT being sent).
4090 if (ro->ro_rt == NULL) {
4092 * src addr selection failed to find a route
4093 * (or valid source addr), so we can't get
4094 * there from here (yet)!
4096 sctp_handle_no_route(stcb, net, so_locked);
4097 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4099 return (EHOSTUNREACH);
4101 if (ro != &iproute) {
4102 memcpy(&iproute, ro, sizeof(*ro));
4104 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4105 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4106 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4107 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4108 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4111 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4112 /* failed to prepend data, give up */
4113 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4117 #ifdef SCTP_PACKET_LOGGING
4118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4119 sctp_packet_log(m, packet_length);
4121 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4123 #if defined(SCTP_WITH_NO_CSUM)
4124 SCTP_STAT_INCR(sctps_sendnocrc);
4126 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4127 SCTP_STAT_INCR(sctps_sendswcrc);
4130 SCTP_ENABLE_UDP_CSUM(o_pak);
4133 #if defined(SCTP_WITH_NO_CSUM)
4134 SCTP_STAT_INCR(sctps_sendnocrc);
4136 m->m_pkthdr.csum_flags = CSUM_SCTP;
4137 m->m_pkthdr.csum_data = 0;
4138 SCTP_STAT_INCR(sctps_sendhwcrc);
4141 /* send it out. table id is taken from stcb */
4142 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4143 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4144 so = SCTP_INP_SO(inp);
4145 SCTP_SOCKET_UNLOCK(so, 0);
4148 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4149 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4150 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4151 atomic_add_int(&stcb->asoc.refcnt, 1);
4152 SCTP_TCB_UNLOCK(stcb);
4153 SCTP_SOCKET_LOCK(so, 0);
4154 SCTP_TCB_LOCK(stcb);
4155 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4158 SCTP_STAT_INCR(sctps_sendpackets);
4159 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4161 SCTP_STAT_INCR(sctps_senderrors);
4163 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4165 /* free tempy routes */
4172 * PMTU check versus smallest asoc MTU goes
4175 if ((ro->ro_rt != NULL) &&
4176 (net->ro._s_addr)) {
4179 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4181 mtu -= sizeof(struct udphdr);
4183 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4184 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4187 } else if (ro->ro_rt == NULL) {
4188 /* route was freed */
4189 if (net->ro._s_addr &&
4190 net->src_addr_selected) {
4191 sctp_free_ifa(net->ro._s_addr);
4192 net->ro._s_addr = NULL;
4194 net->src_addr_selected = 0;
4203 uint32_t flowlabel, flowinfo;
4204 struct ip6_hdr *ip6h;
4205 struct route_in6 ip6route;
4207 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4209 struct sockaddr_in6 lsa6_storage;
4211 u_short prev_port = 0;
4215 flowlabel = net->flowlabel;
4217 flowlabel = stcb->asoc.default_flowlabel;
4219 flowlabel = inp->sctp_ep.default_flowlabel;
4221 if (flowlabel == 0) {
4223 * This means especially, that it is not set
4224 * at the SCTP layer. So use the value from
4227 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4229 flowlabel &= 0x000fffff;
4230 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4232 len += sizeof(struct udphdr);
4234 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4237 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4240 SCTP_ALIGN_TO_END(newm, len);
4241 SCTP_BUF_LEN(newm) = len;
4242 SCTP_BUF_NEXT(newm) = m;
4246 if (net->flowidset == 0) {
4247 panic("Flow ID not set");
4250 m->m_pkthdr.flowid = net->flowid;
4251 m->m_flags |= M_FLOWID;
4253 if (use_mflowid != 0) {
4254 m->m_pkthdr.flowid = mflowid;
4255 m->m_flags |= M_FLOWID;
4258 packet_length = sctp_calculate_len(m);
4260 ip6h = mtod(m, struct ip6_hdr *);
4261 /* protect *sin6 from overwrite */
4262 sin6 = (struct sockaddr_in6 *)to;
4266 /* KAME hack: embed scopeid */
4267 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4268 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4272 memset(&ip6route, 0, sizeof(ip6route));
4273 ro = (sctp_route_t *) & ip6route;
4274 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4276 ro = (sctp_route_t *) & net->ro;
4279 * We assume here that inp_flow is in host byte
4280 * order within the TCB!
4282 if (tos_value == 0) {
4284 * This means especially, that it is not set
4285 * at the SCTP layer. So use the value from
4288 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4292 tos_value |= sctp_get_ect(stcb);
4296 flowinfo |= tos_value;
4298 flowinfo |= flowlabel;
4299 ip6h->ip6_flow = htonl(flowinfo);
4301 ip6h->ip6_nxt = IPPROTO_UDP;
4303 ip6h->ip6_nxt = IPPROTO_SCTP;
4305 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4306 ip6h->ip6_dst = sin6->sin6_addr;
4309 * Add SRC address selection here: we can only reuse
4310 * to a limited degree the kame src-addr-sel, since
4311 * we can try their selection but it may not be
4314 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4315 lsa6_tmp.sin6_family = AF_INET6;
4316 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4318 if (net && out_of_asoc_ok == 0) {
4319 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4320 sctp_free_ifa(net->ro._s_addr);
4321 net->ro._s_addr = NULL;
4322 net->src_addr_selected = 0;
4328 if (net->src_addr_selected == 0) {
4329 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4330 /* KAME hack: embed scopeid */
4331 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4332 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4335 /* Cache the source address */
4336 net->ro._s_addr = sctp_source_address_selection(inp,
4342 (void)sa6_recoverscope(sin6);
4343 net->src_addr_selected = 1;
4345 if (net->ro._s_addr == NULL) {
4346 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4347 net->src_addr_selected = 0;
4348 sctp_handle_no_route(stcb, net, so_locked);
4349 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4351 return (EHOSTUNREACH);
4353 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4355 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4356 /* KAME hack: embed scopeid */
4357 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4358 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4361 if (over_addr == NULL) {
4362 struct sctp_ifa *_lsrc;
4364 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4368 if (_lsrc == NULL) {
4369 sctp_handle_no_route(stcb, net, so_locked);
4370 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4372 return (EHOSTUNREACH);
4374 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4375 sctp_free_ifa(_lsrc);
4377 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4378 SCTP_RTALLOC(ro, vrf_id);
4380 (void)sa6_recoverscope(sin6);
4382 lsa6->sin6_port = inp->sctp_lport;
4384 if (ro->ro_rt == NULL) {
4386 * src addr selection failed to find a route
4387 * (or valid source addr), so we can't get
4390 sctp_handle_no_route(stcb, net, so_locked);
4391 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4393 return (EHOSTUNREACH);
4396 * XXX: sa6 may not have a valid sin6_scope_id in
4397 * the non-SCOPEDROUTING case.
4399 bzero(&lsa6_storage, sizeof(lsa6_storage));
4400 lsa6_storage.sin6_family = AF_INET6;
4401 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4402 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4403 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4404 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4409 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4410 lsa6_storage.sin6_port = inp->sctp_lport;
4411 lsa6 = &lsa6_storage;
4412 ip6h->ip6_src = lsa6->sin6_addr;
4415 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4416 sctp_handle_no_route(stcb, net, so_locked);
4417 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4419 return (EHOSTUNREACH);
4421 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4422 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4423 udp->uh_dport = port;
4424 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4426 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4428 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4431 sctphdr->src_port = src_port;
4432 sctphdr->dest_port = dest_port;
4433 sctphdr->v_tag = v_tag;
4434 sctphdr->checksum = 0;
4437 * We set the hop limit now since there is a good
4438 * chance that our ro pointer is now filled
4440 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4441 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4444 /* Copy to be sure something bad is not happening */
4445 sin6->sin6_addr = ip6h->ip6_dst;
4446 lsa6->sin6_addr = ip6h->ip6_src;
4449 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4450 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4451 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4452 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4453 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4455 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4457 * preserve the port and scope for link
4460 prev_scope = sin6->sin6_scope_id;
4461 prev_port = sin6->sin6_port;
4463 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4464 /* failed to prepend data, give up */
4466 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4469 #ifdef SCTP_PACKET_LOGGING
4470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4471 sctp_packet_log(m, packet_length);
4473 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4475 #if defined(SCTP_WITH_NO_CSUM)
4476 SCTP_STAT_INCR(sctps_sendnocrc);
4478 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4479 SCTP_STAT_INCR(sctps_sendswcrc);
4481 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4482 udp->uh_sum = 0xffff;
4485 #if defined(SCTP_WITH_NO_CSUM)
4486 SCTP_STAT_INCR(sctps_sendnocrc);
4488 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4489 m->m_pkthdr.csum_data = 0;
4490 SCTP_STAT_INCR(sctps_sendhwcrc);
4493 /* send it out. table id is taken from stcb */
4494 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4495 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4496 so = SCTP_INP_SO(inp);
4497 SCTP_SOCKET_UNLOCK(so, 0);
4500 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4501 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4502 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4503 atomic_add_int(&stcb->asoc.refcnt, 1);
4504 SCTP_TCB_UNLOCK(stcb);
4505 SCTP_SOCKET_LOCK(so, 0);
4506 SCTP_TCB_LOCK(stcb);
4507 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4511 /* for link local this must be done */
4512 sin6->sin6_scope_id = prev_scope;
4513 sin6->sin6_port = prev_port;
4515 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4516 SCTP_STAT_INCR(sctps_sendpackets);
4517 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4519 SCTP_STAT_INCR(sctps_senderrors);
4522 /* Now if we had a temp route free it */
4528 * PMTU check versus smallest asoc MTU goes
4531 if (ro->ro_rt == NULL) {
4532 /* Route was freed */
4533 if (net->ro._s_addr &&
4534 net->src_addr_selected) {
4535 sctp_free_ifa(net->ro._s_addr);
4536 net->ro._s_addr = NULL;
4538 net->src_addr_selected = 0;
4540 if ((ro->ro_rt != NULL) &&
4541 (net->ro._s_addr)) {
4544 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4546 (stcb->asoc.smallest_mtu > mtu)) {
4547 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4550 net->mtu -= sizeof(struct udphdr);
4554 if (ND_IFINFO(ifp)->linkmtu &&
4555 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4556 sctp_mtu_size_reset(inp,
4558 ND_IFINFO(ifp)->linkmtu);
4566 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4567 ((struct sockaddr *)to)->sa_family);
4569 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4576 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4577 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4582 struct mbuf *m, *m_at, *mp_last;
4583 struct sctp_nets *net;
4584 struct sctp_init_chunk *init;
4585 struct sctp_supported_addr_param *sup_addr;
4586 struct sctp_adaptation_layer_indication *ali;
4587 struct sctp_ecn_supported_param *ecn;
4588 struct sctp_prsctp_supported_param *prsctp;
4589 struct sctp_supported_chunk_types_param *pr_supported;
4590 int cnt_inits_to = 0;
4595 /* INIT's always go to the primary (and usually ONLY address) */
4597 net = stcb->asoc.primary_destination;
4599 net = TAILQ_FIRST(&stcb->asoc.nets);
4604 /* we confirm any address we send an INIT to */
4605 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4606 (void)sctp_set_primary_addr(stcb, NULL, net);
4608 /* we confirm any address we send an INIT to */
4609 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4611 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4613 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4615 * special hook, if we are sending to link local it will not
4616 * show up in our private address count.
4618 struct sockaddr_in6 *sin6l;
4620 sin6l = &net->ro._l_addr.sin6;
4621 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4625 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4626 /* This case should not happen */
4627 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4630 /* start the INIT timer */
4631 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4633 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4635 /* No memory, INIT timer will re-attempt. */
4636 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4639 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4641 * assume peer supports asconf in order to be able to queue local
4642 * address changes while an INIT is in flight and before the assoc
4645 stcb->asoc.peer_supports_asconf = 1;
4646 /* Now lets put the SCTP header in place */
4647 init = mtod(m, struct sctp_init_chunk *);
4648 /* now the chunk header */
4649 init->ch.chunk_type = SCTP_INITIATION;
4650 init->ch.chunk_flags = 0;
4651 /* fill in later from mbuf we build */
4652 init->ch.chunk_length = 0;
4653 /* place in my tag */
4654 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4655 /* set up some of the credits. */
4656 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4657 SCTP_MINIMAL_RWND));
4659 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4660 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4661 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4662 /* now the address restriction */
4663 /* XXX Should we take the address family of the socket into account? */
4664 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4666 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4669 /* we support 2 types: IPv4/IPv6 */
4670 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + 2 * sizeof(uint16_t));
4671 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4672 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4674 /* we support 1 type: IPv6 */
4675 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4676 sup_addr->addr_type[0] = htons(SCTP_IPV6_ADDRESS);
4677 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4680 /* we support 1 type: IPv4 */
4681 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4682 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4683 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4685 SCTP_BUF_LEN(m) += sizeof(struct sctp_supported_addr_param);
4686 /* adaptation layer indication parameter */
4687 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(struct sctp_supported_addr_param));
4688 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4689 ali->ph.param_length = htons(sizeof(*ali));
4690 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4691 SCTP_BUF_LEN(m) += sizeof(*ali);
4692 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4694 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4695 /* Add NAT friendly parameter */
4696 struct sctp_paramhdr *ph;
4698 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4699 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4700 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4701 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4702 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4704 /* now any cookie time extensions */
4705 if (stcb->asoc.cookie_preserve_req) {
4706 struct sctp_cookie_perserve_param *cookie_preserve;
4708 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4709 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4710 cookie_preserve->ph.param_length = htons(
4711 sizeof(*cookie_preserve));
4712 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4713 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4714 ecn = (struct sctp_ecn_supported_param *)(
4715 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4716 stcb->asoc.cookie_preserve_req = 0;
4719 if (stcb->asoc.ecn_allowed == 1) {
4720 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4721 ecn->ph.param_length = htons(sizeof(*ecn));
4722 SCTP_BUF_LEN(m) += sizeof(*ecn);
4723 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4726 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4728 /* And now tell the peer we do pr-sctp */
4729 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4730 prsctp->ph.param_length = htons(sizeof(*prsctp));
4731 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4733 /* And now tell the peer we do all the extensions */
4734 pr_supported = (struct sctp_supported_chunk_types_param *)
4735 ((caddr_t)prsctp + sizeof(*prsctp));
4736 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4738 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4739 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4740 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4741 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4742 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4743 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4744 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4746 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4747 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4749 p_len = sizeof(*pr_supported) + num_ext;
4750 pr_supported->ph.param_length = htons(p_len);
4751 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4752 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4755 /* add authentication parameters */
4756 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4757 struct sctp_auth_random *randp;
4758 struct sctp_auth_hmac_algo *hmacs;
4759 struct sctp_auth_chunk_list *chunks;
4761 /* attach RANDOM parameter, if available */
4762 if (stcb->asoc.authinfo.random != NULL) {
4763 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4764 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4765 /* random key already contains the header */
4766 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4767 /* zero out any padding required */
4768 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4769 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4771 /* add HMAC_ALGO parameter */
4772 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4773 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4774 (uint8_t *) hmacs->hmac_ids);
4776 p_len += sizeof(*hmacs);
4777 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4778 hmacs->ph.param_length = htons(p_len);
4779 /* zero out any padding required */
4780 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4781 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4783 /* add CHUNKS parameter */
4784 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4785 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4786 chunks->chunk_types);
4788 p_len += sizeof(*chunks);
4789 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4790 chunks->ph.param_length = htons(p_len);
4791 /* zero out any padding required */
4792 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4793 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4796 /* now the addresses */
4798 struct sctp_scoping scp;
4801 * To optimize this we could put the scoping stuff into a
4802 * structure and remove the individual uint8's from the
4803 * assoc structure. Then we could just sifa in the address
4804 * within the stcb. But for now this is a quick hack to get
4805 * the address stuff teased apart.
4808 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4809 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4810 scp.loopback_scope = stcb->asoc.loopback_scope;
4811 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4812 scp.local_scope = stcb->asoc.local_scope;
4813 scp.site_scope = stcb->asoc.site_scope;
4815 sctp_add_addresses_to_i_ia(inp, stcb, &scp, m, cnt_inits_to);
4818 /* calulate the size and update pkt header and chunk header */
4820 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4821 if (SCTP_BUF_NEXT(m_at) == NULL)
4823 p_len += SCTP_BUF_LEN(m_at);
4825 init->ch.chunk_length = htons(p_len);
4827 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4828 * here since the timer will drive a retranmission.
4831 /* I don't expect this to execute but we will be safe here */
4833 if ((padval) && (mp_last)) {
4835 * The compiler worries that mp_last may not be set even
4836 * though I think it is impossible :-> however we add
4837 * mp_last here just in case.
4839 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4841 /* Houston we have a problem, no space */
4846 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4847 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4848 (struct sockaddr *)&net->ro._l_addr,
4849 m, 0, NULL, 0, 0, 0, 0,
4850 inp->sctp_lport, stcb->rport, htonl(0),
4854 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4855 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4856 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4860 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4861 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4864 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4865 * being equal to the beginning of the params i.e. (iphlen +
4866 * sizeof(struct sctp_init_msg) parse through the parameters to the
4867 * end of the mbuf verifying that all parameters are known.
4869 * For unknown parameters build and return a mbuf with
4870 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4871 * processing this chunk stop, and set *abort_processing to 1.
4873 * By having param_offset be pre-set to where parameters begin it is
4874 * hoped that this routine may be reused in the future by new
4877 struct sctp_paramhdr *phdr, params;
4879 struct mbuf *mat, *op_err;
4880 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4881 int at, limit, pad_needed;
4882 uint16_t ptype, plen, padded_size;
4885 *abort_processing = 0;
4888 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4891 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4892 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4893 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4894 ptype = ntohs(phdr->param_type);
4895 plen = ntohs(phdr->param_length);
4896 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4897 /* wacked parameter */
4898 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4901 limit -= SCTP_SIZE32(plen);
4903 * All parameters for all chunks that we know/understand are
4904 * listed here. We process them other places and make
4905 * appropriate stop actions per the upper bits. However this
4906 * is the generic routine processor's can call to get back
4907 * an operr.. to either incorporate (init-ack) or send.
4909 padded_size = SCTP_SIZE32(plen);
4911 /* Param's with variable size */
4912 case SCTP_HEARTBEAT_INFO:
4913 case SCTP_STATE_COOKIE:
4914 case SCTP_UNRECOG_PARAM:
4915 case SCTP_ERROR_CAUSE_IND:
4919 /* Param's with variable size within a range */
4920 case SCTP_CHUNK_LIST:
4921 case SCTP_SUPPORTED_CHUNK_EXT:
4922 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4923 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4928 case SCTP_SUPPORTED_ADDRTYPE:
4929 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4930 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4936 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4937 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4942 case SCTP_SET_PRIM_ADDR:
4943 case SCTP_DEL_IP_ADDRESS:
4944 case SCTP_ADD_IP_ADDRESS:
4945 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4946 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4947 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4952 /* Param's with a fixed size */
4953 case SCTP_IPV4_ADDRESS:
4954 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4955 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4960 case SCTP_IPV6_ADDRESS:
4961 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4962 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4967 case SCTP_COOKIE_PRESERVE:
4968 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4969 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4974 case SCTP_HAS_NAT_SUPPORT:
4977 case SCTP_PRSCTP_SUPPORTED:
4979 if (padded_size != sizeof(struct sctp_paramhdr)) {
4980 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4985 case SCTP_ECN_CAPABLE:
4986 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4987 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4992 case SCTP_ULP_ADAPTATION:
4993 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4994 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4999 case SCTP_SUCCESS_REPORT:
5000 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5001 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5006 case SCTP_HOSTNAME_ADDRESS:
5008 /* We can NOT handle HOST NAME addresses!! */
5011 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5012 *abort_processing = 1;
5013 if (op_err == NULL) {
5014 /* Ok need to try to get a mbuf */
5016 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5018 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5021 l_len += sizeof(struct sctp_paramhdr);
5022 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5024 SCTP_BUF_LEN(op_err) = 0;
5026 * pre-reserve space for ip
5027 * and sctp header and
5031 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5033 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5035 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5036 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5040 /* If we have space */
5041 struct sctp_paramhdr s;
5044 uint32_t cpthis = 0;
5046 pad_needed = 4 - (err_at % 4);
5047 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5048 err_at += pad_needed;
5050 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5051 s.param_length = htons(sizeof(s) + plen);
5052 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5053 err_at += sizeof(s);
5054 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5056 sctp_m_freem(op_err);
5058 * we are out of memory but
5059 * we still need to have a
5060 * look at what to do (the
5061 * system is in trouble
5066 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5073 * we do not recognize the parameter figure out what
5076 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5077 if ((ptype & 0x4000) == 0x4000) {
5078 /* Report bit is set?? */
5079 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5080 if (op_err == NULL) {
5083 /* Ok need to try to get an mbuf */
5085 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5087 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5090 l_len += sizeof(struct sctp_paramhdr);
5091 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5093 SCTP_BUF_LEN(op_err) = 0;
5095 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5097 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5099 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5100 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5104 /* If we have space */
5105 struct sctp_paramhdr s;
5108 uint32_t cpthis = 0;
5110 pad_needed = 4 - (err_at % 4);
5111 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5112 err_at += pad_needed;
5114 s.param_type = htons(SCTP_UNRECOG_PARAM);
5115 s.param_length = htons(sizeof(s) + plen);
5116 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5117 err_at += sizeof(s);
5118 if (plen > sizeof(tempbuf)) {
5119 plen = sizeof(tempbuf);
5121 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5123 sctp_m_freem(op_err);
5125 * we are out of memory but
5126 * we still need to have a
5127 * look at what to do (the
5128 * system is in trouble
5132 goto more_processing;
5134 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5139 if ((ptype & 0x8000) == 0x0000) {
5140 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5143 /* skip this chunk and continue processing */
5144 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5145 at += SCTP_SIZE32(plen);
5150 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5154 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5155 *abort_processing = 1;
5156 if ((op_err == NULL) && phdr) {
5160 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5162 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5164 l_len += (2 * sizeof(struct sctp_paramhdr));
5165 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5167 SCTP_BUF_LEN(op_err) = 0;
5169 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5171 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5173 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5174 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5177 if ((op_err) && phdr) {
5178 struct sctp_paramhdr s;
5181 uint32_t cpthis = 0;
5183 pad_needed = 4 - (err_at % 4);
5184 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5185 err_at += pad_needed;
5187 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5188 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5189 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5190 err_at += sizeof(s);
5191 /* Only copy back the p-hdr that caused the issue */
5192 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5198 sctp_are_there_new_addresses(struct sctp_association *asoc,
5199 struct mbuf *in_initpkt, int offset)
5202 * Given a INIT packet, look through the packet to verify that there
5203 * are NO new addresses. As we go through the parameters add reports
5204 * of any un-understood parameters that require an error. Also we
5205 * must return (1) to drop the packet if we see a un-understood
5206 * parameter that tells us to drop the chunk.
5208 struct sockaddr *sa_touse;
5209 struct sockaddr *sa;
5210 struct sctp_paramhdr *phdr, params;
5211 uint16_t ptype, plen;
5213 struct sctp_nets *net;
5217 struct sockaddr_in sin4, *sa4;
5221 struct sockaddr_in6 sin6, *sa6;
5222 struct ip6_hdr *ip6h;
5227 memset(&sin4, 0, sizeof(sin4));
5228 sin4.sin_family = AF_INET;
5229 sin4.sin_len = sizeof(sin4);
5232 memset(&sin6, 0, sizeof(sin6));
5233 sin6.sin6_family = AF_INET6;
5234 sin6.sin6_len = sizeof(sin6);
5237 /* First what about the src address of the pkt ? */
5238 iph = mtod(in_initpkt, struct ip *);
5239 switch (iph->ip_v) {
5242 /* source addr is IPv4 */
5243 sin4.sin_addr = iph->ip_src;
5244 sa_touse = (struct sockaddr *)&sin4;
5248 case IPV6_VERSION >> 4:
5249 /* source addr is IPv6 */
5250 ip6h = mtod(in_initpkt, struct ip6_hdr *);
5251 sin6.sin6_addr = ip6h->ip6_src;
5252 sa_touse = (struct sockaddr *)&sin6;
5260 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5261 sa = (struct sockaddr *)&net->ro._l_addr;
5262 if (sa->sa_family == sa_touse->sa_family) {
5264 if (sa->sa_family == AF_INET) {
5265 sa4 = (struct sockaddr_in *)sa;
5266 if (sa4->sin_addr.s_addr == sin4.sin_addr.s_addr) {
5273 if (sa->sa_family == AF_INET6) {
5274 sa6 = (struct sockaddr_in6 *)sa;
5275 if (SCTP6_ARE_ADDR_EQUAL(sa6, &sin6)) {
5284 /* New address added! no need to look futher. */
5287 /* Ok so far lets munge through the rest of the packet */
5288 offset += sizeof(struct sctp_init_chunk);
5289 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5292 ptype = ntohs(phdr->param_type);
5293 plen = ntohs(phdr->param_length);
5296 case SCTP_IPV4_ADDRESS:
5298 struct sctp_ipv4addr_param *p4, p4_buf;
5300 phdr = sctp_get_next_param(in_initpkt, offset,
5301 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5302 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5306 p4 = (struct sctp_ipv4addr_param *)phdr;
5307 sin4.sin_addr.s_addr = p4->addr;
5308 sa_touse = (struct sockaddr *)&sin4;
5313 case SCTP_IPV6_ADDRESS:
5315 struct sctp_ipv6addr_param *p6, p6_buf;
5317 phdr = sctp_get_next_param(in_initpkt, offset,
5318 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5319 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5323 p6 = (struct sctp_ipv6addr_param *)phdr;
5324 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5326 sa_touse = (struct sockaddr *)&sin6;
5335 /* ok, sa_touse points to one to check */
5337 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5338 sa = (struct sockaddr *)&net->ro._l_addr;
5339 if (sa->sa_family != sa_touse->sa_family) {
5343 if (sa->sa_family == AF_INET) {
5344 sa4 = (struct sockaddr_in *)sa;
5345 if (sa4->sin_addr.s_addr ==
5346 sin4.sin_addr.s_addr) {
5353 if (sa->sa_family == AF_INET6) {
5354 sa6 = (struct sockaddr_in6 *)sa;
5355 if (SCTP6_ARE_ADDR_EQUAL(
5364 /* New addr added! no need to look further */
5368 offset += SCTP_SIZE32(plen);
5369 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5375 * Given a MBUF chain that was sent into us containing an INIT. Build a
5376 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5377 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5378 * message (i.e. the struct sctp_init_msg).
5381 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5382 struct mbuf *init_pkt, int iphlen, int offset,
5383 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5384 uint8_t use_mflowid, uint32_t mflowid,
5385 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5387 struct sctp_association *asoc;
5388 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5389 struct sctp_init_ack_chunk *initack;
5390 struct sctp_adaptation_layer_indication *ali;
5391 struct sctp_ecn_supported_param *ecn;
5392 struct sctp_prsctp_supported_param *prsctp;
5393 struct sctp_supported_chunk_types_param *pr_supported;
5394 union sctp_sockstore store, store1, *over_addr;
5397 struct sockaddr_in *sin, *to_sin;
5401 struct sockaddr_in6 *sin6, *to_sin6;
5407 struct ip6_hdr *ip6;
5410 struct sockaddr *to;
5411 struct sctp_state_cookie stc;
5412 struct sctp_nets *net = NULL;
5413 uint8_t *signature = NULL;
5414 int cnt_inits_to = 0;
5415 uint16_t his_limit, i_want;
5416 int abort_flag, padval;
5419 int nat_friendly = 0;
5427 if ((asoc != NULL) &&
5428 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5429 (sctp_are_there_new_addresses(asoc, init_pkt, offset))) {
5430 /* new addresses, out of here in non-cookie-wait states */
5432 * Send a ABORT, we don't add the new address error clause
5433 * though we even set the T bit and copy in the 0 tag.. this
5434 * looks no different than if no listener was present.
5436 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL,
5437 use_mflowid, mflowid,
5442 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5443 (offset + sizeof(struct sctp_init_chunk)),
5444 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5447 sctp_send_abort(init_pkt, iphlen, sh,
5448 init_chk->init.initiate_tag, op_err,
5449 use_mflowid, mflowid,
5453 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5455 /* No memory, INIT timer will re-attempt. */
5457 sctp_m_freem(op_err);
5460 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5462 /* the time I built cookie */
5463 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5465 /* populate any tie tags */
5467 /* unlock before tag selections */
5468 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5469 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5470 stc.cookie_life = asoc->cookie_life;
5471 net = asoc->primary_destination;
5473 stc.tie_tag_my_vtag = 0;
5474 stc.tie_tag_peer_vtag = 0;
5475 /* life I will award this cookie */
5476 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5479 /* copy in the ports for later check */
5480 stc.myport = sh->dest_port;
5481 stc.peerport = sh->src_port;
5484 * If we wanted to honor cookie life extentions, we would add to
5485 * stc.cookie_life. For now we should NOT honor any extension
5487 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5488 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5489 struct inpcb *in_inp;
5491 /* Its a V6 socket */
5492 in_inp = (struct inpcb *)inp;
5493 stc.ipv6_addr_legal = 1;
5494 /* Now look at the binding flag to see if V4 will be legal */
5495 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5496 stc.ipv4_addr_legal = 1;
5498 /* V4 addresses are NOT legal on the association */
5499 stc.ipv4_addr_legal = 0;
5502 /* Its a V4 socket, no - V6 */
5503 stc.ipv4_addr_legal = 1;
5504 stc.ipv6_addr_legal = 0;
5507 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5512 /* now for scope setup */
5513 memset((caddr_t)&store, 0, sizeof(store));
5514 memset((caddr_t)&store1, 0, sizeof(store1));
5517 to_sin = &store1.sin;
5521 to_sin6 = &store1.sin6;
5523 iph = mtod(init_pkt, struct ip *);
5524 /* establish the to_addr's */
5525 switch (iph->ip_v) {
5528 to_sin->sin_port = sh->dest_port;
5529 to_sin->sin_family = AF_INET;
5530 to_sin->sin_len = sizeof(struct sockaddr_in);
5531 to_sin->sin_addr = iph->ip_dst;
5535 case IPV6_VERSION >> 4:
5536 ip6 = mtod(init_pkt, struct ip6_hdr *);
5537 to_sin6->sin6_addr = ip6->ip6_dst;
5538 to_sin6->sin6_scope_id = 0;
5539 to_sin6->sin6_port = sh->dest_port;
5540 to_sin6->sin6_family = AF_INET6;
5541 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5550 to = (struct sockaddr *)&store;
5551 switch (iph->ip_v) {
5555 sin->sin_family = AF_INET;
5556 sin->sin_len = sizeof(struct sockaddr_in);
5557 sin->sin_port = sh->src_port;
5558 sin->sin_addr = iph->ip_src;
5559 /* lookup address */
5560 stc.address[0] = sin->sin_addr.s_addr;
5564 stc.addr_type = SCTP_IPV4_ADDRESS;
5565 /* local from address */
5566 stc.laddress[0] = to_sin->sin_addr.s_addr;
5567 stc.laddress[1] = 0;
5568 stc.laddress[2] = 0;
5569 stc.laddress[3] = 0;
5570 stc.laddr_type = SCTP_IPV4_ADDRESS;
5571 /* scope_id is only for v6 */
5573 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5574 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5579 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5580 /* Must use the address in this case */
5581 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5582 stc.loopback_scope = 1;
5585 stc.local_scope = 0;
5591 case IPV6_VERSION >> 4:
5593 ip6 = mtod(init_pkt, struct ip6_hdr *);
5594 sin6->sin6_family = AF_INET6;
5595 sin6->sin6_len = sizeof(struct sockaddr_in6);
5596 sin6->sin6_port = sh->src_port;
5597 sin6->sin6_addr = ip6->ip6_src;
5598 /* lookup address */
5599 memcpy(&stc.address, &sin6->sin6_addr,
5600 sizeof(struct in6_addr));
5601 sin6->sin6_scope_id = 0;
5602 stc.addr_type = SCTP_IPV6_ADDRESS;
5604 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5606 * FIX ME: does this have scope from
5609 (void)sa6_recoverscope(sin6);
5610 stc.scope_id = sin6->sin6_scope_id;
5611 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5612 stc.loopback_scope = 1;
5613 stc.local_scope = 0;
5616 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5618 * If the new destination is a
5619 * LINK_LOCAL we must have common
5620 * both site and local scope. Don't
5621 * set local scope though since we
5622 * must depend on the source to be
5623 * added implicitly. We cannot
5624 * assure just because we share one
5625 * link that all links are common.
5627 stc.local_scope = 0;
5631 * we start counting for the private
5632 * address stuff at 1. since the
5633 * link local we source from won't
5634 * show up in our scoped count.
5638 * pull out the scope_id from
5642 * FIX ME: does this have scope from
5645 (void)sa6_recoverscope(sin6);
5646 stc.scope_id = sin6->sin6_scope_id;
5647 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5648 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5650 * If the new destination is
5651 * SITE_LOCAL then we must have site
5656 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5657 stc.laddr_type = SCTP_IPV6_ADDRESS;
5667 /* set the scope per the existing tcb */
5670 struct sctp_nets *lnet;
5674 stc.loopback_scope = asoc->loopback_scope;
5675 stc.ipv4_scope = asoc->ipv4_local_scope;
5676 stc.site_scope = asoc->site_scope;
5677 stc.local_scope = asoc->local_scope;
5679 /* Why do we not consider IPv4 LL addresses? */
5680 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5681 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5682 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5684 * if we have a LL address, start
5692 /* use the net pointer */
5693 to = (struct sockaddr *)&net->ro._l_addr;
5694 switch (to->sa_family) {
5697 sin = (struct sockaddr_in *)to;
5698 stc.address[0] = sin->sin_addr.s_addr;
5702 stc.addr_type = SCTP_IPV4_ADDRESS;
5703 if (net->src_addr_selected == 0) {
5705 * strange case here, the INIT should have
5706 * did the selection.
5708 net->ro._s_addr = sctp_source_address_selection(inp,
5709 stcb, (sctp_route_t *) & net->ro,
5711 if (net->ro._s_addr == NULL)
5714 net->src_addr_selected = 1;
5717 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5718 stc.laddress[1] = 0;
5719 stc.laddress[2] = 0;
5720 stc.laddress[3] = 0;
5721 stc.laddr_type = SCTP_IPV4_ADDRESS;
5722 /* scope_id is only for v6 */
5728 sin6 = (struct sockaddr_in6 *)to;
5729 memcpy(&stc.address, &sin6->sin6_addr,
5730 sizeof(struct in6_addr));
5731 stc.addr_type = SCTP_IPV6_ADDRESS;
5732 stc.scope_id = sin6->sin6_scope_id;
5733 if (net->src_addr_selected == 0) {
5735 * strange case here, the INIT should have
5736 * did the selection.
5738 net->ro._s_addr = sctp_source_address_selection(inp,
5739 stcb, (sctp_route_t *) & net->ro,
5741 if (net->ro._s_addr == NULL)
5744 net->src_addr_selected = 1;
5746 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5747 sizeof(struct in6_addr));
5748 stc.laddr_type = SCTP_IPV6_ADDRESS;
5753 /* Now lets put the SCTP header in place */
5754 initack = mtod(m, struct sctp_init_ack_chunk *);
5755 /* Save it off for quick ref */
5756 stc.peers_vtag = init_chk->init.initiate_tag;
5758 memcpy(stc.identification, SCTP_VERSION_STRING,
5759 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5760 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5761 /* now the chunk header */
5762 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5763 initack->ch.chunk_flags = 0;
5764 /* fill in later from mbuf we build */
5765 initack->ch.chunk_length = 0;
5766 /* place in my tag */
5767 if ((asoc != NULL) &&
5768 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5769 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5770 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5771 /* re-use the v-tags and init-seq here */
5772 initack->init.initiate_tag = htonl(asoc->my_vtag);
5773 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5775 uint32_t vtag, itsn;
5777 if (hold_inp_lock) {
5778 SCTP_INP_INCR_REF(inp);
5779 SCTP_INP_RUNLOCK(inp);
5782 atomic_add_int(&asoc->refcnt, 1);
5783 SCTP_TCB_UNLOCK(stcb);
5785 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5786 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5788 * Got a duplicate vtag on some guy behind a
5789 * nat make sure we don't use it.
5793 initack->init.initiate_tag = htonl(vtag);
5794 /* get a TSN to use too */
5795 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5796 initack->init.initial_tsn = htonl(itsn);
5797 SCTP_TCB_LOCK(stcb);
5798 atomic_add_int(&asoc->refcnt, -1);
5800 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5801 initack->init.initiate_tag = htonl(vtag);
5802 /* get a TSN to use too */
5803 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5805 if (hold_inp_lock) {
5806 SCTP_INP_RLOCK(inp);
5807 SCTP_INP_DECR_REF(inp);
5810 /* save away my tag to */
5811 stc.my_vtag = initack->init.initiate_tag;
5813 /* set up some of the credits. */
5814 so = inp->sctp_socket;
5816 /* memory problem */
5820 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5822 /* set what I want */
5823 his_limit = ntohs(init_chk->init.num_inbound_streams);
5824 /* choose what I want */
5826 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5827 i_want = asoc->streamoutcnt;
5829 i_want = inp->sctp_ep.pre_open_stream_count;
5832 i_want = inp->sctp_ep.pre_open_stream_count;
5834 if (his_limit < i_want) {
5835 /* I Want more :< */
5836 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5838 /* I can have what I want :> */
5839 initack->init.num_outbound_streams = htons(i_want);
5841 /* tell him his limt. */
5842 initack->init.num_inbound_streams =
5843 htons(inp->sctp_ep.max_open_streams_intome);
5845 /* adaptation layer indication parameter */
5846 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5847 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5848 ali->ph.param_length = htons(sizeof(*ali));
5849 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5850 SCTP_BUF_LEN(m) += sizeof(*ali);
5851 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5854 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5855 (inp->sctp_ecn_enable == 1)) {
5856 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5857 ecn->ph.param_length = htons(sizeof(*ecn));
5858 SCTP_BUF_LEN(m) += sizeof(*ecn);
5860 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5863 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5865 /* And now tell the peer we do pr-sctp */
5866 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5867 prsctp->ph.param_length = htons(sizeof(*prsctp));
5868 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5870 /* Add NAT friendly parameter */
5871 struct sctp_paramhdr *ph;
5873 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5874 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5875 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5876 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5878 /* And now tell the peer we do all the extensions */
5879 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5880 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5882 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5883 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5884 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5885 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5886 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5887 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5888 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5889 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5890 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5891 p_len = sizeof(*pr_supported) + num_ext;
5892 pr_supported->ph.param_length = htons(p_len);
5893 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5894 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5896 /* add authentication parameters */
5897 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5898 struct sctp_auth_random *randp;
5899 struct sctp_auth_hmac_algo *hmacs;
5900 struct sctp_auth_chunk_list *chunks;
5901 uint16_t random_len;
5903 /* generate and add RANDOM parameter */
5904 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5905 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5906 randp->ph.param_type = htons(SCTP_RANDOM);
5907 p_len = sizeof(*randp) + random_len;
5908 randp->ph.param_length = htons(p_len);
5909 SCTP_READ_RANDOM(randp->random_data, random_len);
5910 /* zero out any padding required */
5911 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5912 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5914 /* add HMAC_ALGO parameter */
5915 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5916 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5917 (uint8_t *) hmacs->hmac_ids);
5919 p_len += sizeof(*hmacs);
5920 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5921 hmacs->ph.param_length = htons(p_len);
5922 /* zero out any padding required */
5923 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5924 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5926 /* add CHUNKS parameter */
5927 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5928 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5929 chunks->chunk_types);
5931 p_len += sizeof(*chunks);
5932 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5933 chunks->ph.param_length = htons(p_len);
5934 /* zero out any padding required */
5935 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5936 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5940 /* now the addresses */
5942 struct sctp_scoping scp;
5945 * To optimize this we could put the scoping stuff into a
5946 * structure and remove the individual uint8's from the stc
5947 * structure. Then we could just sifa in the address within
5948 * the stc.. but for now this is a quick hack to get the
5949 * address stuff teased apart.
5951 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5952 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5953 scp.loopback_scope = stc.loopback_scope;
5954 scp.ipv4_local_scope = stc.ipv4_scope;
5955 scp.local_scope = stc.local_scope;
5956 scp.site_scope = stc.site_scope;
5957 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to);
5960 /* tack on the operational error if present */
5969 llen += SCTP_BUF_LEN(ol);
5970 ol = SCTP_BUF_NEXT(ol);
5973 /* must add a pad to the param */
5974 uint32_t cpthis = 0;
5977 padlen = 4 - (llen % 4);
5978 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5980 while (SCTP_BUF_NEXT(m_at) != NULL) {
5981 m_at = SCTP_BUF_NEXT(m_at);
5983 SCTP_BUF_NEXT(m_at) = op_err;
5984 while (SCTP_BUF_NEXT(m_at) != NULL) {
5985 m_at = SCTP_BUF_NEXT(m_at);
5988 /* pre-calulate the size and update pkt header and chunk header */
5990 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5991 p_len += SCTP_BUF_LEN(m_tmp);
5992 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5993 /* m_tmp should now point to last one */
5998 /* Now we must build a cookie */
5999 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6000 if (m_cookie == NULL) {
6001 /* memory problem */
6005 /* Now append the cookie to the end and update the space/size */
6006 SCTP_BUF_NEXT(m_tmp) = m_cookie;
6008 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6009 p_len += SCTP_BUF_LEN(m_tmp);
6010 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6011 /* m_tmp should now point to last one */
6017 * Place in the size, but we don't include the last pad (if any) in
6020 initack->ch.chunk_length = htons(p_len);
6023 * Time to sign the cookie, we don't sign over the cookie signature
6024 * though thus we set trailer.
6026 (void)sctp_hmac_m(SCTP_HMAC,
6027 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6028 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6029 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6031 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6032 * here since the timer will drive a retranmission.
6035 if ((padval) && (mp_last)) {
6036 /* see my previous comments on mp_last */
6037 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
6038 /* Houston we have a problem, no space */
6043 if (stc.loopback_scope) {
6044 over_addr = &store1;
6049 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6051 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6053 use_mflowid, mflowid,
6054 SCTP_SO_NOT_LOCKED);
6055 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6060 sctp_prune_prsctp(struct sctp_tcb *stcb,
6061 struct sctp_association *asoc,
6062 struct sctp_sndrcvinfo *srcv,
6066 struct sctp_tmit_chunk *chk, *nchk;
6068 SCTP_TCB_LOCK_ASSERT(stcb);
6069 if ((asoc->peer_supports_prsctp) &&
6070 (asoc->sent_queue_cnt_removeable > 0)) {
6071 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6073 * Look for chunks marked with the PR_SCTP flag AND
6074 * the buffer space flag. If the one being sent is
6075 * equal or greater priority then purge the old one
6076 * and free some space.
6078 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6080 * This one is PR-SCTP AND buffer space
6083 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6085 * Lower numbers equates to higher
6086 * priority so if the one we are
6087 * looking at has a larger or equal
6088 * priority we want to drop the data
6089 * and NOT retransmit it.
6093 * We release the book_size
6094 * if the mbuf is here
6099 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6103 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6106 freed_spc += ret_spc;
6107 if (freed_spc >= dataout) {
6110 } /* if chunk was present */
6111 } /* if of sufficent priority */
6112 } /* if chunk has enabled */
6113 } /* tailqforeach */
6115 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6116 /* Here we must move to the sent queue and mark */
6117 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6118 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6121 * We release the book_size
6122 * if the mbuf is here
6126 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6129 freed_spc += ret_spc;
6130 if (freed_spc >= dataout) {
6133 } /* end if chk->data */
6134 } /* end if right class */
6135 } /* end if chk pr-sctp */
6136 } /* tailqforeachsafe (chk) */
6137 } /* if enabled in asoc */
6141 sctp_get_frag_point(struct sctp_tcb *stcb,
6142 struct sctp_association *asoc)
6147 * For endpoints that have both v6 and v4 addresses we must reserve
6148 * room for the ipv6 header, for those that are only dealing with V4
6149 * we use a larger frag point.
6151 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6152 ovh = SCTP_MED_OVERHEAD;
6154 ovh = SCTP_MED_V4_OVERHEAD;
6157 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6158 siz = asoc->smallest_mtu - ovh;
6160 siz = (stcb->asoc.sctp_frag_point - ovh);
6162 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6164 /* A data chunk MUST fit in a cluster */
6165 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6168 /* adjust for an AUTH chunk if DATA requires auth */
6169 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6170 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6173 /* make it an even word boundary please */
6180 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6184 * We assume that the user wants PR_SCTP_TTL if the user provides a
6185 * positive lifetime but does not specify any PR_SCTP policy. This
6186 * is a BAD assumption and causes problems at least with the
6187 * U-Vancovers MPI folks. I will change this to be no policy means
6190 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6191 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6196 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6197 case CHUNK_FLAGS_PR_SCTP_BUF:
6199 * Time to live is a priority stored in tv_sec when doing
6200 * the buffer drop thing.
6202 sp->ts.tv_sec = sp->timetolive;
6205 case CHUNK_FLAGS_PR_SCTP_TTL:
6209 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6210 tv.tv_sec = sp->timetolive / 1000;
6211 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6213 * TODO sctp_constants.h needs alternative time
6214 * macros when _KERNEL is undefined.
6216 timevaladd(&sp->ts, &tv);
6219 case CHUNK_FLAGS_PR_SCTP_RTX:
6221 * Time to live is a the number or retransmissions stored in
6224 sp->ts.tv_sec = sp->timetolive;
6228 SCTPDBG(SCTP_DEBUG_USRREQ1,
6229 "Unknown PR_SCTP policy %u.\n",
6230 PR_SCTP_POLICY(sp->sinfo_flags));
6236 sctp_msg_append(struct sctp_tcb *stcb,
6237 struct sctp_nets *net,
6239 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6243 struct sctp_stream_queue_pending *sp = NULL;
6244 struct sctp_stream_out *strm;
6247 * Given an mbuf chain, put it into the association send queue and
6248 * place it on the wheel
6250 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6251 /* Invalid stream number */
6252 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6256 if ((stcb->asoc.stream_locked) &&
6257 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6258 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6262 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6263 /* Now can we send this? */
6264 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6265 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6266 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6267 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6268 /* got data while shutting down */
6269 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6273 sctp_alloc_a_strmoq(stcb, sp);
6275 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6279 sp->sinfo_flags = srcv->sinfo_flags;
6280 sp->timetolive = srcv->sinfo_timetolive;
6281 sp->ppid = srcv->sinfo_ppid;
6282 sp->context = srcv->sinfo_context;
6284 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6286 atomic_add_int(&sp->net->ref_count, 1);
6290 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6291 sp->stream = srcv->sinfo_stream;
6292 sp->msg_is_complete = 1;
6293 sp->sender_all_done = 1;
6296 sp->tail_mbuf = NULL;
6297 sctp_set_prsctp_policy(sp);
6299 * We could in theory (for sendall) sifa the length in, but we would
6300 * still have to hunt through the chain since we need to setup the
6304 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6305 if (SCTP_BUF_NEXT(at) == NULL)
6307 sp->length += SCTP_BUF_LEN(at);
6309 if (srcv->sinfo_keynumber_valid) {
6310 sp->auth_keyid = srcv->sinfo_keynumber;
6312 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6314 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6315 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6316 sp->holds_key_ref = 1;
6318 if (hold_stcb_lock == 0) {
6319 SCTP_TCB_SEND_LOCK(stcb);
6321 sctp_snd_sb_alloc(stcb, sp->length);
6322 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6323 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6324 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
6325 sp->strseq = strm->next_sequence_sent;
6326 strm->next_sequence_sent++;
6328 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6330 if (hold_stcb_lock == 0) {
6331 SCTP_TCB_SEND_UNLOCK(stcb);
6341 static struct mbuf *
6342 sctp_copy_mbufchain(struct mbuf *clonechain,
6343 struct mbuf *outchain,
6344 struct mbuf **endofchain,
6347 uint8_t copy_by_ref)
6350 struct mbuf *appendchain;
6354 if (endofchain == NULL) {
6358 sctp_m_freem(outchain);
6361 if (can_take_mbuf) {
6362 appendchain = clonechain;
6365 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6367 /* Its not in a cluster */
6368 if (*endofchain == NULL) {
6369 /* lets get a mbuf cluster */
6370 if (outchain == NULL) {
6371 /* This is the general case */
6373 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6374 if (outchain == NULL) {
6377 SCTP_BUF_LEN(outchain) = 0;
6378 *endofchain = outchain;
6379 /* get the prepend space */
6380 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6383 * We really should not get a NULL
6389 if (SCTP_BUF_NEXT(m) == NULL) {
6393 m = SCTP_BUF_NEXT(m);
6396 if (*endofchain == NULL) {
6398 * huh, TSNH XXX maybe we
6401 sctp_m_freem(outchain);
6405 /* get the new end of length */
6406 len = M_TRAILINGSPACE(*endofchain);
6408 /* how much is left at the end? */
6409 len = M_TRAILINGSPACE(*endofchain);
6411 /* Find the end of the data, for appending */
6412 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6414 /* Now lets copy it out */
6415 if (len >= sizeofcpy) {
6416 /* It all fits, copy it in */
6417 m_copydata(clonechain, 0, sizeofcpy, cp);
6418 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6420 /* fill up the end of the chain */
6422 m_copydata(clonechain, 0, len, cp);
6423 SCTP_BUF_LEN((*endofchain)) += len;
6424 /* now we need another one */
6427 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6432 SCTP_BUF_NEXT((*endofchain)) = m;
6434 cp = mtod((*endofchain), caddr_t);
6435 m_copydata(clonechain, len, sizeofcpy, cp);
6436 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6440 /* copy the old fashion way */
6441 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6442 #ifdef SCTP_MBUF_LOGGING
6443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6446 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6447 if (SCTP_BUF_IS_EXTENDED(mat)) {
6448 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6455 if (appendchain == NULL) {
6458 sctp_m_freem(outchain);
6462 /* tack on to the end */
6463 if (*endofchain != NULL) {
6464 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6468 if (SCTP_BUF_NEXT(m) == NULL) {
6469 SCTP_BUF_NEXT(m) = appendchain;
6472 m = SCTP_BUF_NEXT(m);
6476 * save off the end and update the end-chain postion
6480 if (SCTP_BUF_NEXT(m) == NULL) {
6484 m = SCTP_BUF_NEXT(m);
6488 /* save off the end and update the end-chain postion */
6491 if (SCTP_BUF_NEXT(m) == NULL) {
6495 m = SCTP_BUF_NEXT(m);
6497 return (appendchain);
6502 sctp_med_chunk_output(struct sctp_inpcb *inp,
6503 struct sctp_tcb *stcb,
6504 struct sctp_association *asoc,
6507 int control_only, int from_where,
6508 struct timeval *now, int *now_filled, int frag_point, int so_locked
6509 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6515 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6516 uint32_t val SCTP_UNUSED)
6518 struct sctp_copy_all *ca;
6521 int added_control = 0;
6522 int un_sent, do_chunk_output = 1;
6523 struct sctp_association *asoc;
6524 struct sctp_nets *net;
6526 ca = (struct sctp_copy_all *)ptr;
6527 if (ca->m == NULL) {
6530 if (ca->inp != inp) {
6534 if ((ca->m) && ca->sndlen) {
6535 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6537 /* can't copy so we are done */
6541 #ifdef SCTP_MBUF_LOGGING
6542 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6545 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6546 if (SCTP_BUF_IS_EXTENDED(mat)) {
6547 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6555 SCTP_TCB_LOCK_ASSERT(stcb);
6556 if (stcb->asoc.alternate) {
6557 net = stcb->asoc.alternate;
6559 net = stcb->asoc.primary_destination;
6561 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6562 /* Abort this assoc with m as the user defined reason */
6564 struct sctp_paramhdr *ph;
6566 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6568 ph = mtod(m, struct sctp_paramhdr *);
6569 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6570 ph->param_length = htons(ca->sndlen);
6573 * We add one here to keep the assoc from
6574 * dis-appearing on us.
6576 atomic_add_int(&stcb->asoc.refcnt, 1);
6577 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6579 * sctp_abort_an_association calls sctp_free_asoc()
6580 * free association will NOT free it since we
6581 * incremented the refcnt .. we do this to prevent
6582 * it being freed and things getting tricky since we
6583 * could end up (from free_asoc) calling inpcb_free
6584 * which would get a recursive lock call to the
6585 * iterator lock.. But as a consequence of that the
6586 * stcb will return to us un-locked.. since
6587 * free_asoc returns with either no TCB or the TCB
6588 * unlocked, we must relock.. to unlock in the
6589 * iterator timer :-0
6591 SCTP_TCB_LOCK(stcb);
6592 atomic_add_int(&stcb->asoc.refcnt, -1);
6593 goto no_chunk_output;
6597 ret = sctp_msg_append(stcb, net, m,
6601 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6602 /* shutdown this assoc */
6605 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6607 if (TAILQ_EMPTY(&asoc->send_queue) &&
6608 TAILQ_EMPTY(&asoc->sent_queue) &&
6610 if (asoc->locked_on_sending) {
6614 * there is nothing queued to send, so I'm
6617 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6618 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6619 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6621 * only send SHUTDOWN the first time
6624 sctp_send_shutdown(stcb, net);
6625 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6626 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6628 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6629 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6630 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6632 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6633 asoc->primary_destination);
6635 do_chunk_output = 0;
6639 * we still got (or just got) data to send,
6640 * so set SHUTDOWN_PENDING
6643 * XXX sockets draft says that SCTP_EOF
6644 * should be sent with no data. currently,
6645 * we will allow user data to be sent first
6646 * and move to SHUTDOWN-PENDING
6648 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6649 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6650 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6651 if (asoc->locked_on_sending) {
6653 * Locked to send out the
6656 struct sctp_stream_queue_pending *sp;
6658 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6660 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6661 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6664 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6665 if (TAILQ_EMPTY(&asoc->send_queue) &&
6666 TAILQ_EMPTY(&asoc->sent_queue) &&
6667 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6669 atomic_add_int(&stcb->asoc.refcnt, 1);
6670 sctp_abort_an_association(stcb->sctp_ep, stcb,
6671 NULL, SCTP_SO_NOT_LOCKED);
6672 atomic_add_int(&stcb->asoc.refcnt, -1);
6673 goto no_chunk_output;
6675 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6676 asoc->primary_destination);
6682 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6683 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6685 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6686 (stcb->asoc.total_flight > 0) &&
6687 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6689 do_chunk_output = 0;
6691 if (do_chunk_output)
6692 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6693 else if (added_control) {
6694 int num_out = 0, reason = 0, now_filled = 0;
6698 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6699 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6700 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6711 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6713 struct sctp_copy_all *ca;
6715 ca = (struct sctp_copy_all *)ptr;
6717 * Do a notify here? Kacheong suggests that the notify be done at
6718 * the send time.. so you would push up a notification if any send
6719 * failed. Don't know if this is feasable since the only failures we
6720 * have is "memory" related and if you cannot get an mbuf to send
6721 * the data you surely can't get an mbuf to send up to notify the
6722 * user you can't send the data :->
6725 /* now free everything */
6726 sctp_m_freem(ca->m);
6727 SCTP_FREE(ca, SCTP_M_COPYAL);
6731 #define MC_ALIGN(m, len) do { \
6732 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6737 static struct mbuf *
6738 sctp_copy_out_all(struct uio *uio, int len)
6740 struct mbuf *ret, *at;
6741 int left, willcpy, cancpy, error;
6743 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6749 SCTP_BUF_LEN(ret) = 0;
6750 /* save space for the data chunk header */
6751 cancpy = M_TRAILINGSPACE(ret);
6752 willcpy = min(cancpy, left);
6755 /* Align data to the end */
6756 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6762 SCTP_BUF_LEN(at) = willcpy;
6763 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6766 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6767 if (SCTP_BUF_NEXT(at) == NULL) {
6770 at = SCTP_BUF_NEXT(at);
6771 SCTP_BUF_LEN(at) = 0;
6772 cancpy = M_TRAILINGSPACE(at);
6773 willcpy = min(cancpy, left);
6780 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6781 struct sctp_sndrcvinfo *srcv)
6784 struct sctp_copy_all *ca;
6786 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6790 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6793 memset(ca, 0, sizeof(struct sctp_copy_all));
6797 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6800 * take off the sendall flag, it would be bad if we failed to do
6803 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6804 /* get length and mbuf chain */
6806 ca->sndlen = uio->uio_resid;
6807 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6808 if (ca->m == NULL) {
6809 SCTP_FREE(ca, SCTP_M_COPYAL);
6810 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6814 /* Gather the length of the send */
6820 ca->sndlen += SCTP_BUF_LEN(m);
6821 m = SCTP_BUF_NEXT(m);
6825 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6826 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6827 SCTP_ASOC_ANY_STATE,
6829 sctp_sendall_completes, inp, 1);
6831 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6832 SCTP_FREE(ca, SCTP_M_COPYAL);
6833 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6841 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6843 struct sctp_tmit_chunk *chk, *nchk;
6845 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6846 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6847 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6849 sctp_m_freem(chk->data);
6852 asoc->ctrl_queue_cnt--;
6853 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6859 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6861 struct sctp_association *asoc;
6862 struct sctp_tmit_chunk *chk, *nchk;
6863 struct sctp_asconf_chunk *acp;
6866 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6867 /* find SCTP_ASCONF chunk in queue */
6868 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6870 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6871 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6876 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6878 sctp_m_freem(chk->data);
6881 asoc->ctrl_queue_cnt--;
6882 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6889 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6890 struct sctp_association *asoc,
6891 struct sctp_tmit_chunk **data_list,
6893 struct sctp_nets *net)
6896 struct sctp_tmit_chunk *tp1;
6898 for (i = 0; i < bundle_at; i++) {
6899 /* off of the send queue */
6900 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6901 asoc->send_queue_cnt--;
6904 * Any chunk NOT 0 you zap the time chunk 0 gets
6905 * zapped or set based on if a RTO measurment is
6908 data_list[i]->do_rtt = 0;
6911 data_list[i]->sent_rcv_time = net->last_sent_time;
6912 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6913 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6914 if (data_list[i]->whoTo == NULL) {
6915 data_list[i]->whoTo = net;
6916 atomic_add_int(&net->ref_count, 1);
6918 /* on to the sent queue */
6919 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6920 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6921 struct sctp_tmit_chunk *tpp;
6923 /* need to move back */
6925 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6927 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6931 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6934 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6936 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6941 /* This does not lower until the cum-ack passes it */
6942 asoc->sent_queue_cnt++;
6943 if ((asoc->peers_rwnd <= 0) &&
6944 (asoc->total_flight == 0) &&
6946 /* Mark the chunk as being a window probe */
6947 SCTP_STAT_INCR(sctps_windowprobed);
6949 #ifdef SCTP_AUDITING_ENABLED
6950 sctp_audit_log(0xC2, 3);
6952 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6953 data_list[i]->snd_count = 1;
6954 data_list[i]->rec.data.chunk_was_revoked = 0;
6955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6956 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6957 data_list[i]->whoTo->flight_size,
6958 data_list[i]->book_size,
6959 (uintptr_t) data_list[i]->whoTo,
6960 data_list[i]->rec.data.TSN_seq);
6962 sctp_flight_size_increase(data_list[i]);
6963 sctp_total_flight_increase(stcb, data_list[i]);
6964 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6965 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6966 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6968 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6969 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6970 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6971 /* SWS sender side engages */
6972 asoc->peers_rwnd = 0;
6975 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6976 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6981 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6982 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6987 struct sctp_tmit_chunk *chk, *nchk;
6989 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6990 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6991 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6992 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6993 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6994 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6995 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6996 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6997 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6998 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6999 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7000 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7001 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7002 /* Stray chunks must be cleaned up */
7004 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7006 sctp_m_freem(chk->data);
7009 asoc->ctrl_queue_cnt--;
7010 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7011 asoc->fwd_tsn_cnt--;
7012 sctp_free_a_chunk(stcb, chk, so_locked);
7013 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7014 /* special handling, we must look into the param */
7015 if (chk != asoc->str_reset) {
7016 goto clean_up_anyway;
7024 sctp_can_we_split_this(struct sctp_tcb *stcb,
7026 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7029 * Make a decision on if I should split a msg into multiple parts.
7030 * This is only asked of incomplete messages.
7034 * If we are doing EEOR we need to always send it if its the
7035 * entire thing, since it might be all the guy is putting in
7038 if (goal_mtu >= length) {
7040 * If we have data outstanding,
7041 * we get another chance when the sack
7042 * arrives to transmit - wait for more data
7044 if (stcb->asoc.total_flight == 0) {
7046 * If nothing is in flight, we zero the
7054 /* You can fill the rest */
7059 * For those strange folk that make the send buffer
7060 * smaller than our fragmentation point, we can't
7061 * get a full msg in so we have to allow splitting.
7063 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7066 if ((length <= goal_mtu) ||
7067 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7068 /* Sub-optimial residual don't split in non-eeor mode. */
7072 * If we reach here length is larger than the goal_mtu. Do we wish
7073 * to split it for the sake of packet putting together?
7075 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7076 /* Its ok to split it */
7077 return (min(goal_mtu, frag_point));
7079 /* Nope, can't split */
7085 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7086 struct sctp_stream_out *strq,
7088 uint32_t frag_point,
7094 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7099 /* Move from the stream to the send_queue keeping track of the total */
7100 struct sctp_association *asoc;
7101 struct sctp_stream_queue_pending *sp;
7102 struct sctp_tmit_chunk *chk;
7103 struct sctp_data_chunk *dchkh;
7104 uint32_t to_move, length;
7105 uint8_t rcv_flags = 0;
7107 uint8_t send_lock_up = 0;
7109 SCTP_TCB_LOCK_ASSERT(stcb);
7112 /* sa_ignore FREED_MEMORY */
7113 sp = TAILQ_FIRST(&strq->outqueue);
7116 if (send_lock_up == 0) {
7117 SCTP_TCB_SEND_LOCK(stcb);
7120 sp = TAILQ_FIRST(&strq->outqueue);
7124 if (strq->last_msg_incomplete) {
7125 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7127 strq->last_msg_incomplete);
7128 strq->last_msg_incomplete = 0;
7132 SCTP_TCB_SEND_UNLOCK(stcb);
7137 if ((sp->msg_is_complete) && (sp->length == 0)) {
7138 if (sp->sender_all_done) {
7140 * We are doing differed cleanup. Last time through
7141 * when we took all the data the sender_all_done was
7144 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7145 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7146 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7147 sp->sender_all_done,
7149 sp->msg_is_complete,
7153 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7154 SCTP_TCB_SEND_LOCK(stcb);
7157 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7158 TAILQ_REMOVE(&strq->outqueue, sp, next);
7159 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7161 sctp_free_remote_addr(sp->net);
7165 sctp_m_freem(sp->data);
7168 sctp_free_a_strmoq(stcb, sp, so_locked);
7169 /* we can't be locked to it */
7171 stcb->asoc.locked_on_sending = NULL;
7173 SCTP_TCB_SEND_UNLOCK(stcb);
7176 /* back to get the next msg */
7180 * sender just finished this but still holds a
7189 /* is there some to get */
7190 if (sp->length == 0) {
7196 } else if (sp->discard_rest) {
7197 if (send_lock_up == 0) {
7198 SCTP_TCB_SEND_LOCK(stcb);
7201 /* Whack down the size */
7202 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7203 if ((stcb->sctp_socket != NULL) && \
7204 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7205 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7206 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7209 sctp_m_freem(sp->data);
7211 sp->tail_mbuf = NULL;
7221 some_taken = sp->some_taken;
7222 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7223 sp->msg_is_complete = 1;
7226 length = sp->length;
7227 if (sp->msg_is_complete) {
7228 /* The message is complete */
7229 to_move = min(length, frag_point);
7230 if (to_move == length) {
7231 /* All of it fits in the MTU */
7232 if (sp->some_taken) {
7233 rcv_flags |= SCTP_DATA_LAST_FRAG;
7234 sp->put_last_out = 1;
7236 rcv_flags |= SCTP_DATA_NOT_FRAG;
7237 sp->put_last_out = 1;
7240 /* Not all of it fits, we fragment */
7241 if (sp->some_taken == 0) {
7242 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7247 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7250 * We use a snapshot of length in case it
7251 * is expanding during the compare.
7256 if (to_move >= llen) {
7258 if (send_lock_up == 0) {
7260 * We are taking all of an incomplete msg
7261 * thus we need a send lock.
7263 SCTP_TCB_SEND_LOCK(stcb);
7265 if (sp->msg_is_complete) {
7267 * the sender finished the
7274 if (sp->some_taken == 0) {
7275 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7279 /* Nothing to take. */
7280 if (sp->some_taken) {
7289 /* If we reach here, we can copy out a chunk */
7290 sctp_alloc_a_chunk(stcb, chk);
7292 /* No chunk memory */
7298 * Setup for unordered if needed by looking at the user sent info
7301 if (sp->sinfo_flags & SCTP_UNORDERED) {
7302 rcv_flags |= SCTP_DATA_UNORDERED;
7304 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7305 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7306 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7308 /* clear out the chunk before setting up */
7309 memset(chk, 0, sizeof(*chk));
7310 chk->rec.data.rcv_flags = rcv_flags;
7312 if (to_move >= length) {
7313 /* we think we can steal the whole thing */
7314 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7315 SCTP_TCB_SEND_LOCK(stcb);
7318 if (to_move < sp->length) {
7319 /* bail, it changed */
7322 chk->data = sp->data;
7323 chk->last_mbuf = sp->tail_mbuf;
7324 /* register the stealing */
7325 sp->data = sp->tail_mbuf = NULL;
7330 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
7331 chk->last_mbuf = NULL;
7332 if (chk->data == NULL) {
7333 sp->some_taken = some_taken;
7334 sctp_free_a_chunk(stcb, chk, so_locked);
7339 #ifdef SCTP_MBUF_LOGGING
7340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7343 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7344 if (SCTP_BUF_IS_EXTENDED(mat)) {
7345 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7350 /* Pull off the data */
7351 m_adj(sp->data, to_move);
7352 /* Now lets work our way down and compact it */
7354 while (m && (SCTP_BUF_LEN(m) == 0)) {
7355 sp->data = SCTP_BUF_NEXT(m);
7356 SCTP_BUF_NEXT(m) = NULL;
7357 if (sp->tail_mbuf == m) {
7359 * Freeing tail? TSNH since
7360 * we supposedly were taking less
7361 * than the sp->length.
7364 panic("Huh, freing tail? - TSNH");
7366 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7367 sp->tail_mbuf = sp->data = NULL;
7376 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7377 chk->copy_by_ref = 1;
7379 chk->copy_by_ref = 0;
7382 * get last_mbuf and counts of mb useage This is ugly but hopefully
7383 * its only one mbuf.
7385 if (chk->last_mbuf == NULL) {
7386 chk->last_mbuf = chk->data;
7387 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7388 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7391 if (to_move > length) {
7392 /*- This should not happen either
7393 * since we always lower to_move to the size
7394 * of sp->length if its larger.
7397 panic("Huh, how can to_move be larger?");
7399 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7403 atomic_subtract_int(&sp->length, to_move);
7405 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7406 /* Not enough room for a chunk header, get some */
7409 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7412 * we're in trouble here. _PREPEND below will free
7413 * all the data if there is no leading space, so we
7414 * must put the data back and restore.
7416 if (send_lock_up == 0) {
7417 SCTP_TCB_SEND_LOCK(stcb);
7420 if (chk->data == NULL) {
7421 /* unsteal the data */
7422 sp->data = chk->data;
7423 sp->tail_mbuf = chk->last_mbuf;
7427 /* reassemble the data */
7429 sp->data = chk->data;
7430 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7432 sp->some_taken = some_taken;
7433 atomic_add_int(&sp->length, to_move);
7436 sctp_free_a_chunk(stcb, chk, so_locked);
7440 SCTP_BUF_LEN(m) = 0;
7441 SCTP_BUF_NEXT(m) = chk->data;
7443 M_ALIGN(chk->data, 4);
7446 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7447 if (chk->data == NULL) {
7448 /* HELP, TSNH since we assured it would not above? */
7450 panic("prepend failes HELP?");
7452 SCTP_PRINTF("prepend fails HELP?\n");
7453 sctp_free_a_chunk(stcb, chk, so_locked);
7459 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7460 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7461 chk->book_size_scale = 0;
7462 chk->sent = SCTP_DATAGRAM_UNSENT;
7465 chk->asoc = &stcb->asoc;
7466 chk->pad_inplace = 0;
7467 chk->no_fr_allowed = 0;
7468 chk->rec.data.stream_seq = sp->strseq;
7469 chk->rec.data.stream_number = sp->stream;
7470 chk->rec.data.payloadtype = sp->ppid;
7471 chk->rec.data.context = sp->context;
7472 chk->rec.data.doing_fast_retransmit = 0;
7474 chk->rec.data.timetodrop = sp->ts;
7475 chk->flags = sp->act_flags;
7478 chk->whoTo = sp->net;
7479 atomic_add_int(&chk->whoTo->ref_count, 1);
7483 if (sp->holds_key_ref) {
7484 chk->auth_keyid = sp->auth_keyid;
7485 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7486 chk->holds_key_ref = 1;
7488 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7489 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7490 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7491 (uintptr_t) stcb, sp->length,
7492 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7493 chk->rec.data.TSN_seq);
7495 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7497 * Put the rest of the things in place now. Size was done earlier in
7498 * previous loop prior to padding.
7501 #ifdef SCTP_ASOCLOG_OF_TSNS
7502 SCTP_TCB_LOCK_ASSERT(stcb);
7503 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7504 asoc->tsn_out_at = 0;
7505 asoc->tsn_out_wrapped = 1;
7507 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7508 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7509 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7510 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7511 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7512 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7513 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7514 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7518 dchkh->ch.chunk_type = SCTP_DATA;
7519 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7520 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7521 dchkh->dp.stream_id = htons(strq->stream_no);
7522 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7523 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7524 dchkh->ch.chunk_length = htons(chk->send_size);
7525 /* Now advance the chk->send_size by the actual pad needed. */
7526 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7531 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7532 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7533 chk->pad_inplace = 1;
7535 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7536 /* pad added an mbuf */
7537 chk->last_mbuf = lm;
7539 chk->send_size += pads;
7541 /* We only re-set the policy if it is on */
7542 if (sp->pr_sctp_on) {
7543 sctp_set_prsctp_policy(sp);
7544 asoc->pr_sctp_cnt++;
7545 chk->pr_sctp_on = 1;
7547 chk->pr_sctp_on = 0;
7549 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7550 /* All done pull and kill the message */
7551 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7552 if (sp->put_last_out == 0) {
7553 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7554 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7555 sp->sender_all_done,
7557 sp->msg_is_complete,
7561 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7562 SCTP_TCB_SEND_LOCK(stcb);
7565 TAILQ_REMOVE(&strq->outqueue, sp, next);
7566 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7568 sctp_free_remote_addr(sp->net);
7572 sctp_m_freem(sp->data);
7575 sctp_free_a_strmoq(stcb, sp, so_locked);
7577 /* we can't be locked to it */
7579 stcb->asoc.locked_on_sending = NULL;
7581 /* more to go, we are locked */
7584 asoc->chunks_on_out_queue++;
7585 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7586 asoc->send_queue_cnt++;
7589 SCTP_TCB_SEND_UNLOCK(stcb);
7596 sctp_fill_outqueue(struct sctp_tcb *stcb,
7597 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7598 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7603 struct sctp_association *asoc;
7604 struct sctp_stream_out *strq;
7605 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7608 SCTP_TCB_LOCK_ASSERT(stcb);
7610 switch (net->ro._l_addr.sa.sa_family) {
7613 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7618 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7623 goal_mtu = net->mtu;
7626 /* Need an allowance for the data chunk header too */
7627 goal_mtu -= sizeof(struct sctp_data_chunk);
7629 /* must make even word boundary */
7630 goal_mtu &= 0xfffffffc;
7631 if (asoc->locked_on_sending) {
7632 /* We are stuck on one stream until the message completes. */
7633 strq = asoc->locked_on_sending;
7636 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7639 while ((goal_mtu > 0) && strq) {
7642 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7643 &giveup, eeor_mode, &bail, so_locked);
7645 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7648 asoc->locked_on_sending = strq;
7649 if ((moved_how_much == 0) || (giveup) || bail)
7650 /* no more to move for now */
7653 asoc->locked_on_sending = NULL;
7654 if ((giveup) || bail) {
7657 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7662 total_moved += moved_how_much;
7663 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7664 goal_mtu &= 0xfffffffc;
7669 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7671 if (total_moved == 0) {
7672 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7673 (net == stcb->asoc.primary_destination)) {
7674 /* ran dry for primary network net */
7675 SCTP_STAT_INCR(sctps_primary_randry);
7676 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7677 /* ran dry with CMT on */
7678 SCTP_STAT_INCR(sctps_cmt_randry);
7684 sctp_fix_ecn_echo(struct sctp_association *asoc)
7686 struct sctp_tmit_chunk *chk;
7688 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7689 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7690 chk->sent = SCTP_DATAGRAM_UNSENT;
7696 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7698 struct sctp_association *asoc;
7699 struct sctp_tmit_chunk *chk;
7700 struct sctp_stream_queue_pending *sp;
7707 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7708 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7709 if (sp->net == net) {
7710 sctp_free_remote_addr(sp->net);
7715 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7716 if (chk->whoTo == net) {
7717 sctp_free_remote_addr(chk->whoTo);
7724 sctp_med_chunk_output(struct sctp_inpcb *inp,
7725 struct sctp_tcb *stcb,
7726 struct sctp_association *asoc,
7729 int control_only, int from_where,
7730 struct timeval *now, int *now_filled, int frag_point, int so_locked
7731 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7737 * Ok this is the generic chunk service queue. we must do the
7738 * following: - Service the stream queue that is next, moving any
7739 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7740 * LAST to the out queue in one pass) and assigning TSN's - Check to
7741 * see if the cwnd/rwnd allows any output, if so we go ahead and
7742 * fomulate and send the low level chunks. Making sure to combine
7743 * any control in the control chunk queue also.
7745 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7746 struct mbuf *outchain, *endoutchain;
7747 struct sctp_tmit_chunk *chk, *nchk;
7749 /* temp arrays for unlinking */
7750 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7751 int no_fragmentflg, error;
7752 unsigned int max_rwnd_per_dest, max_send_per_dest;
7753 int one_chunk, hbflag, skip_data_for_this_net;
7754 int asconf, cookie, no_out_cnt;
7755 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7756 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7758 uint32_t auth_offset = 0;
7759 struct sctp_auth_chunk *auth = NULL;
7760 uint16_t auth_keyid;
7761 int override_ok = 1;
7762 int skip_fill_up = 0;
7763 int data_auth_reqd = 0;
7766 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7772 auth_keyid = stcb->asoc.authinfo.active_keyid;
7774 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7775 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7776 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7781 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7783 * First lets prime the pump. For each destination, if there is room
7784 * in the flight size, attempt to pull an MTU's worth out of the
7785 * stream queues into the general send_queue
7787 #ifdef SCTP_AUDITING_ENABLED
7788 sctp_audit_log(0xC2, 2);
7790 SCTP_TCB_LOCK_ASSERT(stcb);
7792 if ((control_only) || (asoc->stream_reset_outstanding))
7797 /* Nothing to possible to send? */
7798 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7799 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7800 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7801 TAILQ_EMPTY(&asoc->send_queue) &&
7802 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7807 if (asoc->peers_rwnd == 0) {
7808 /* No room in peers rwnd */
7810 if (asoc->total_flight > 0) {
7811 /* we are allowed one chunk in flight */
7815 if (stcb->asoc.ecn_echo_cnt_onq) {
7816 /* Record where a sack goes, if any */
7817 if (no_data_chunks &&
7818 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7819 /* Nothing but ECNe to send - we don't do that */
7820 goto nothing_to_send;
7822 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7823 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7824 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7825 sack_goes_to = chk->whoTo;
7830 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7831 if (stcb->sctp_socket)
7832 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7834 max_send_per_dest = 0;
7835 if (no_data_chunks == 0) {
7836 /* How many non-directed chunks are there? */
7837 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7838 if (chk->whoTo == NULL) {
7840 * We already have non-directed chunks on
7841 * the queue, no need to do a fill-up.
7849 if ((no_data_chunks == 0) &&
7850 (skip_fill_up == 0) &&
7851 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7852 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7854 * This for loop we are in takes in each net, if
7855 * its's got space in cwnd and has data sent to it
7856 * (when CMT is off) then it calls
7857 * sctp_fill_outqueue for the net. This gets data on
7858 * the send queue for that network.
7860 * In sctp_fill_outqueue TSN's are assigned and data is
7861 * copied out of the stream buffers. Note mostly
7862 * copy by reference (we hope).
7864 net->window_probe = 0;
7865 if ((net != stcb->asoc.alternate) &&
7866 ((net->dest_state & SCTP_ADDR_PF) ||
7867 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7868 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7869 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7870 sctp_log_cwnd(stcb, net, 1,
7871 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7875 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7876 (net->flight_size == 0)) {
7877 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7879 if (net->flight_size >= net->cwnd) {
7880 /* skip this network, no room - can't fill */
7881 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7882 sctp_log_cwnd(stcb, net, 3,
7883 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7887 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7888 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7890 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7892 /* memory alloc failure */
7898 /* now service each destination and send out what we can for it */
7899 /* Nothing to send? */
7900 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7901 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7902 TAILQ_EMPTY(&asoc->send_queue)) {
7906 if (asoc->sctp_cmt_on_off > 0) {
7907 /* get the last start point */
7908 start_at = asoc->last_net_cmt_send_started;
7909 if (start_at == NULL) {
7910 /* null so to beginning */
7911 start_at = TAILQ_FIRST(&asoc->nets);
7913 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7914 if (start_at == NULL) {
7915 start_at = TAILQ_FIRST(&asoc->nets);
7918 asoc->last_net_cmt_send_started = start_at;
7920 start_at = TAILQ_FIRST(&asoc->nets);
7922 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7923 if (chk->whoTo == NULL) {
7924 if (asoc->alternate) {
7925 chk->whoTo = asoc->alternate;
7927 chk->whoTo = asoc->primary_destination;
7929 atomic_add_int(&chk->whoTo->ref_count, 1);
7932 old_start_at = NULL;
7933 again_one_more_time:
7934 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7935 /* how much can we send? */
7936 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7937 if (old_start_at && (old_start_at == net)) {
7938 /* through list ocmpletely. */
7942 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7943 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7944 (net->flight_size >= net->cwnd)) {
7946 * Nothing on control or asconf and flight is full,
7947 * we can skip even in the CMT case.
7952 endoutchain = outchain = NULL;
7955 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7956 skip_data_for_this_net = 1;
7958 skip_data_for_this_net = 0;
7960 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7962 * if we have a route and an ifp check to see if we
7963 * have room to send to this guy
7967 ifp = net->ro.ro_rt->rt_ifp;
7968 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7969 SCTP_STAT_INCR(sctps_ifnomemqueued);
7970 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7971 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7976 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7979 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7984 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7994 if (mtu > asoc->peers_rwnd) {
7995 if (asoc->total_flight > 0) {
7996 /* We have a packet in flight somewhere */
7997 r_mtu = asoc->peers_rwnd;
7999 /* We are always allowed to send one MTU out */
8006 /************************/
8007 /* ASCONF transmission */
8008 /************************/
8009 /* Now first lets go through the asconf queue */
8010 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8011 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8014 if (chk->whoTo == NULL) {
8015 if (asoc->alternate == NULL) {
8016 if (asoc->primary_destination != net) {
8020 if (asoc->alternate != net) {
8025 if (chk->whoTo != net) {
8029 if (chk->data == NULL) {
8032 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8033 chk->sent != SCTP_DATAGRAM_RESEND) {
8037 * if no AUTH is yet included and this chunk
8038 * requires it, make sure to account for it. We
8039 * don't apply the size until the AUTH chunk is
8040 * actually added below in case there is no room for
8041 * this chunk. NOTE: we overload the use of "omtu"
8044 if ((auth == NULL) &&
8045 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8046 stcb->asoc.peer_auth_chunks)) {
8047 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8050 /* Here we do NOT factor the r_mtu */
8051 if ((chk->send_size < (int)(mtu - omtu)) ||
8052 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8054 * We probably should glom the mbuf chain
8055 * from the chk->data for control but the
8056 * problem is it becomes yet one more level
8057 * of tracking to do if for some reason
8058 * output fails. Then I have got to
8059 * reconstruct the merged control chain.. el
8060 * yucko.. for now we take the easy way and
8064 * Add an AUTH chunk, if chunk requires it
8065 * save the offset into the chain for AUTH
8067 if ((auth == NULL) &&
8068 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8069 stcb->asoc.peer_auth_chunks))) {
8070 outchain = sctp_add_auth_chunk(outchain,
8075 chk->rec.chunk_id.id);
8076 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8078 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8079 (int)chk->rec.chunk_id.can_take_data,
8080 chk->send_size, chk->copy_by_ref);
8081 if (outchain == NULL) {
8083 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8086 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8087 /* update our MTU size */
8088 if (mtu > (chk->send_size + omtu))
8089 mtu -= (chk->send_size + omtu);
8092 to_out += (chk->send_size + omtu);
8093 /* Do clear IP_DF ? */
8094 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8097 if (chk->rec.chunk_id.can_take_data)
8100 * set hb flag since we can use these for
8106 * should sysctl this: don't bundle data
8107 * with ASCONF since it requires AUTH
8110 chk->sent = SCTP_DATAGRAM_SENT;
8111 if (chk->whoTo == NULL) {
8113 atomic_add_int(&net->ref_count, 1);
8118 * Ok we are out of room but we can
8119 * output without effecting the
8120 * flight size since this little guy
8121 * is a control only packet.
8123 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8125 * do NOT clear the asconf flag as
8126 * it is used to do appropriate
8127 * source address selection.
8129 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8130 (struct sockaddr *)&net->ro._l_addr,
8131 outchain, auth_offset, auth,
8132 stcb->asoc.authinfo.active_keyid,
8133 no_fragmentflg, 0, asconf,
8134 inp->sctp_lport, stcb->rport,
8135 htonl(stcb->asoc.peer_vtag),
8139 if (error == ENOBUFS) {
8140 asoc->ifp_had_enobuf = 1;
8141 SCTP_STAT_INCR(sctps_lowlevelerr);
8143 if (from_where == 0) {
8144 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8146 if (*now_filled == 0) {
8147 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8149 *now = net->last_sent_time;
8151 net->last_sent_time = *now;
8154 /* error, could not output */
8155 if (error == EHOSTUNREACH) {
8161 sctp_move_chunks_from_net(stcb, net);
8166 asoc->ifp_had_enobuf = 0;
8167 if (*now_filled == 0) {
8168 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8170 *now = net->last_sent_time;
8172 net->last_sent_time = *now;
8176 * increase the number we sent, if a
8177 * cookie is sent we don't tell them
8180 outchain = endoutchain = NULL;
8184 *num_out += ctl_cnt;
8185 /* recalc a clean slate and setup */
8186 switch (net->ro._l_addr.sa.sa_family) {
8189 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8194 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8207 /************************/
8208 /* Control transmission */
8209 /************************/
8210 /* Now first lets go through the control queue */
8211 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8212 if ((sack_goes_to) &&
8213 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8214 (chk->whoTo != sack_goes_to)) {
8216 * if we have a sack in queue, and we are
8217 * looking at an ecn echo that is NOT queued
8218 * to where the sack is going..
8220 if (chk->whoTo == net) {
8222 * Don't transmit it to where its
8223 * going (current net)
8226 } else if (sack_goes_to == net) {
8228 * But do transmit it to this
8231 goto skip_net_check;
8234 if (chk->whoTo == NULL) {
8235 if (asoc->alternate == NULL) {
8236 if (asoc->primary_destination != net) {
8240 if (asoc->alternate != net) {
8245 if (chk->whoTo != net) {
8250 if (chk->data == NULL) {
8253 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8255 * It must be unsent. Cookies and ASCONF's
8256 * hang around but there timers will force
8257 * when marked for resend.
8262 * if no AUTH is yet included and this chunk
8263 * requires it, make sure to account for it. We
8264 * don't apply the size until the AUTH chunk is
8265 * actually added below in case there is no room for
8266 * this chunk. NOTE: we overload the use of "omtu"
8269 if ((auth == NULL) &&
8270 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8271 stcb->asoc.peer_auth_chunks)) {
8272 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8275 /* Here we do NOT factor the r_mtu */
8276 if ((chk->send_size <= (int)(mtu - omtu)) ||
8277 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8279 * We probably should glom the mbuf chain
8280 * from the chk->data for control but the
8281 * problem is it becomes yet one more level
8282 * of tracking to do if for some reason
8283 * output fails. Then I have got to
8284 * reconstruct the merged control chain.. el
8285 * yucko.. for now we take the easy way and
8289 * Add an AUTH chunk, if chunk requires it
8290 * save the offset into the chain for AUTH
8292 if ((auth == NULL) &&
8293 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8294 stcb->asoc.peer_auth_chunks))) {
8295 outchain = sctp_add_auth_chunk(outchain,
8300 chk->rec.chunk_id.id);
8301 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8303 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8304 (int)chk->rec.chunk_id.can_take_data,
8305 chk->send_size, chk->copy_by_ref);
8306 if (outchain == NULL) {
8308 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8311 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8312 /* update our MTU size */
8313 if (mtu > (chk->send_size + omtu))
8314 mtu -= (chk->send_size + omtu);
8317 to_out += (chk->send_size + omtu);
8318 /* Do clear IP_DF ? */
8319 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8322 if (chk->rec.chunk_id.can_take_data)
8324 /* Mark things to be removed, if needed */
8325 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8326 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8327 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8328 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8329 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8330 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8331 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8332 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8333 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8334 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8335 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8336 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8339 /* remove these chunks at the end */
8340 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8341 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8342 /* turn off the timer */
8343 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8344 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8345 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8351 * Other chunks, since they have
8352 * timers running (i.e. COOKIE) we
8353 * just "trust" that it gets sent or
8357 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8360 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8362 * Increment ecne send count
8363 * here this means we may be
8364 * over-zealous in our
8365 * counting if the send
8366 * fails, but its the best
8367 * place to do it (we used
8368 * to do it in the queue of
8369 * the chunk, but that did
8370 * not tell how many times
8373 SCTP_STAT_INCR(sctps_sendecne);
8375 chk->sent = SCTP_DATAGRAM_SENT;
8376 if (chk->whoTo == NULL) {
8378 atomic_add_int(&net->ref_count, 1);
8384 * Ok we are out of room but we can
8385 * output without effecting the
8386 * flight size since this little guy
8387 * is a control only packet.
8390 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8392 * do NOT clear the asconf
8393 * flag as it is used to do
8394 * appropriate source
8395 * address selection.
8399 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8402 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8403 (struct sockaddr *)&net->ro._l_addr,
8406 stcb->asoc.authinfo.active_keyid,
8407 no_fragmentflg, 0, asconf,
8408 inp->sctp_lport, stcb->rport,
8409 htonl(stcb->asoc.peer_vtag),
8413 if (error == ENOBUFS) {
8414 asoc->ifp_had_enobuf = 1;
8415 SCTP_STAT_INCR(sctps_lowlevelerr);
8417 if (from_where == 0) {
8418 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8420 /* error, could not output */
8422 if (*now_filled == 0) {
8423 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8425 *now = net->last_sent_time;
8427 net->last_sent_time = *now;
8431 if (error == EHOSTUNREACH) {
8437 sctp_move_chunks_from_net(stcb, net);
8442 asoc->ifp_had_enobuf = 0;
8443 /* Only HB or ASCONF advances time */
8445 if (*now_filled == 0) {
8446 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8448 *now = net->last_sent_time;
8450 net->last_sent_time = *now;
8455 * increase the number we sent, if a
8456 * cookie is sent we don't tell them
8459 outchain = endoutchain = NULL;
8463 *num_out += ctl_cnt;
8464 /* recalc a clean slate and setup */
8465 switch (net->ro._l_addr.sa.sa_family) {
8468 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8473 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8486 /* JRI: if dest is in PF state, do not send data to it */
8487 if ((asoc->sctp_cmt_on_off > 0) &&
8488 (net != stcb->asoc.alternate) &&
8489 (net->dest_state & SCTP_ADDR_PF)) {
8492 if (net->flight_size >= net->cwnd) {
8495 if ((asoc->sctp_cmt_on_off > 0) &&
8496 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8497 (net->flight_size > max_rwnd_per_dest)) {
8501 * We need a specific accounting for the usage of the send
8502 * buffer. We also need to check the number of messages per
8503 * net. For now, this is better than nothing and it disabled
8506 if ((asoc->sctp_cmt_on_off > 0) &&
8507 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8508 (max_send_per_dest > 0) &&
8509 (net->flight_size > max_send_per_dest)) {
8512 /*********************/
8513 /* Data transmission */
8514 /*********************/
8516 * if AUTH for DATA is required and no AUTH has been added
8517 * yet, account for this in the mtu now... if no data can be
8518 * bundled, this adjustment won't matter anyways since the
8519 * packet will be going out...
8521 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8522 stcb->asoc.peer_auth_chunks);
8523 if (data_auth_reqd && (auth == NULL)) {
8524 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8526 /* now lets add any data within the MTU constraints */
8527 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8530 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8531 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8538 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8539 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8549 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8550 (skip_data_for_this_net == 0)) ||
8552 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8553 if (no_data_chunks) {
8554 /* let only control go out */
8558 if (net->flight_size >= net->cwnd) {
8559 /* skip this net, no room for data */
8563 if ((chk->whoTo != NULL) &&
8564 (chk->whoTo != net)) {
8565 /* Don't send the chunk on this net */
8568 if (asoc->sctp_cmt_on_off == 0) {
8569 if ((asoc->alternate) &&
8570 (asoc->alternate != net) &&
8571 (chk->whoTo == NULL)) {
8573 } else if ((net != asoc->primary_destination) &&
8574 (asoc->alternate == NULL) &&
8575 (chk->whoTo == NULL)) {
8579 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8581 * strange, we have a chunk that is
8582 * to big for its destination and
8583 * yet no fragment ok flag.
8584 * Something went wrong when the
8585 * PMTU changed...we did not mark
8586 * this chunk for some reason?? I
8587 * will fix it here by letting IP
8588 * fragment it for now and printing
8589 * a warning. This really should not
8592 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8593 chk->send_size, mtu);
8594 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8596 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8597 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8598 struct sctp_data_chunk *dchkh;
8600 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8601 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8603 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8604 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8605 /* ok we will add this one */
8608 * Add an AUTH chunk, if chunk
8609 * requires it, save the offset into
8610 * the chain for AUTH
8612 if (data_auth_reqd) {
8614 outchain = sctp_add_auth_chunk(outchain,
8620 auth_keyid = chk->auth_keyid;
8622 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8623 } else if (override_ok) {
8628 auth_keyid = chk->auth_keyid;
8630 } else if (auth_keyid != chk->auth_keyid) {
8638 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8639 chk->send_size, chk->copy_by_ref);
8640 if (outchain == NULL) {
8641 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8642 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8643 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8646 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8649 /* upate our MTU size */
8650 /* Do clear IP_DF ? */
8651 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8654 /* unsigned subtraction of mtu */
8655 if (mtu > chk->send_size)
8656 mtu -= chk->send_size;
8659 /* unsigned subtraction of r_mtu */
8660 if (r_mtu > chk->send_size)
8661 r_mtu -= chk->send_size;
8665 to_out += chk->send_size;
8666 if ((to_out > mx_mtu) && no_fragmentflg) {
8668 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8670 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8674 chk->window_probe = 0;
8675 data_list[bundle_at++] = chk;
8676 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8679 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8680 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8681 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8683 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8685 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8686 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8696 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8698 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8699 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8700 data_list[0]->window_probe = 1;
8701 net->window_probe = 1;
8707 * Must be sent in order of the
8708 * TSN's (on a network)
8712 } /* for (chunk gather loop for this net) */
8713 } /* if asoc.state OPEN */
8715 /* Is there something to send for this destination? */
8717 /* We may need to start a control timer or two */
8719 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8722 * do NOT clear the asconf flag as it is
8723 * used to do appropriate source address
8728 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8731 /* must start a send timer if data is being sent */
8732 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8734 * no timer running on this destination
8737 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8739 /* Now send it, if there is anything to send :> */
8740 if ((error = sctp_lowlevel_chunk_output(inp,
8743 (struct sockaddr *)&net->ro._l_addr,
8751 inp->sctp_lport, stcb->rport,
8752 htonl(stcb->asoc.peer_vtag),
8756 /* error, we could not output */
8757 if (error == ENOBUFS) {
8758 SCTP_STAT_INCR(sctps_lowlevelerr);
8759 asoc->ifp_had_enobuf = 1;
8761 if (from_where == 0) {
8762 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8764 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8766 if (*now_filled == 0) {
8767 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8769 *now = net->last_sent_time;
8771 net->last_sent_time = *now;
8775 if (error == EHOSTUNREACH) {
8777 * Destination went unreachable
8780 sctp_move_chunks_from_net(stcb, net);
8784 * I add this line to be paranoid. As far as
8785 * I can tell the continue, takes us back to
8786 * the top of the for, but just to make sure
8787 * I will reset these again here.
8789 ctl_cnt = bundle_at = 0;
8790 continue; /* This takes us back to the
8791 * for() for the nets. */
8793 asoc->ifp_had_enobuf = 0;
8798 if (bundle_at || hbflag) {
8799 /* For data/asconf and hb set time */
8800 if (*now_filled == 0) {
8801 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8803 *now = net->last_sent_time;
8805 net->last_sent_time = *now;
8809 *num_out += (ctl_cnt + bundle_at);
8812 /* setup for a RTO measurement */
8813 tsns_sent = data_list[0]->rec.data.TSN_seq;
8814 /* fill time if not already filled */
8815 if (*now_filled == 0) {
8816 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8818 *now = asoc->time_last_sent;
8820 asoc->time_last_sent = *now;
8822 if (net->rto_needed) {
8823 data_list[0]->do_rtt = 1;
8824 net->rto_needed = 0;
8826 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8827 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8833 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8834 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8837 if (old_start_at == NULL) {
8838 old_start_at = start_at;
8839 start_at = TAILQ_FIRST(&asoc->nets);
8841 goto again_one_more_time;
8844 * At the end there should be no NON timed chunks hanging on this
8847 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8848 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8850 if ((*num_out == 0) && (*reason_code == 0)) {
8855 sctp_clean_up_ctl(stcb, asoc, so_locked);
8860 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8863 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8864 * the control chunk queue.
8866 struct sctp_chunkhdr *hdr;
8867 struct sctp_tmit_chunk *chk;
8870 SCTP_TCB_LOCK_ASSERT(stcb);
8871 sctp_alloc_a_chunk(stcb, chk);
8874 sctp_m_freem(op_err);
8877 chk->copy_by_ref = 0;
8878 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8879 if (op_err == NULL) {
8880 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8885 while (mat != NULL) {
8886 chk->send_size += SCTP_BUF_LEN(mat);
8887 mat = SCTP_BUF_NEXT(mat);
8889 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8890 chk->rec.chunk_id.can_take_data = 1;
8891 chk->sent = SCTP_DATAGRAM_UNSENT;
8894 chk->asoc = &stcb->asoc;
8897 hdr = mtod(op_err, struct sctp_chunkhdr *);
8898 hdr->chunk_type = SCTP_OPERATION_ERROR;
8899 hdr->chunk_flags = 0;
8900 hdr->chunk_length = htons(chk->send_size);
8901 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8904 chk->asoc->ctrl_queue_cnt++;
8908 sctp_send_cookie_echo(struct mbuf *m,
8910 struct sctp_tcb *stcb,
8911 struct sctp_nets *net)
8914 * pull out the cookie and put it at the front of the control chunk
8918 struct mbuf *cookie;
8919 struct sctp_paramhdr parm, *phdr;
8920 struct sctp_chunkhdr *hdr;
8921 struct sctp_tmit_chunk *chk;
8922 uint16_t ptype, plen;
8924 /* First find the cookie in the param area */
8926 at = offset + sizeof(struct sctp_init_chunk);
8928 SCTP_TCB_LOCK_ASSERT(stcb);
8930 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8934 ptype = ntohs(phdr->param_type);
8935 plen = ntohs(phdr->param_length);
8936 if (ptype == SCTP_STATE_COOKIE) {
8939 /* found the cookie */
8940 if ((pad = (plen % 4))) {
8943 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8944 if (cookie == NULL) {
8948 #ifdef SCTP_MBUF_LOGGING
8949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8952 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
8953 if (SCTP_BUF_IS_EXTENDED(mat)) {
8954 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8961 at += SCTP_SIZE32(plen);
8963 if (cookie == NULL) {
8964 /* Did not find the cookie */
8967 /* ok, we got the cookie lets change it into a cookie echo chunk */
8969 /* first the change from param to cookie */
8970 hdr = mtod(cookie, struct sctp_chunkhdr *);
8971 hdr->chunk_type = SCTP_COOKIE_ECHO;
8972 hdr->chunk_flags = 0;
8973 /* get the chunk stuff now and place it in the FRONT of the queue */
8974 sctp_alloc_a_chunk(stcb, chk);
8977 sctp_m_freem(cookie);
8980 chk->copy_by_ref = 0;
8981 chk->send_size = plen;
8982 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8983 chk->rec.chunk_id.can_take_data = 0;
8984 chk->sent = SCTP_DATAGRAM_UNSENT;
8986 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8987 chk->asoc = &stcb->asoc;
8990 atomic_add_int(&chk->whoTo->ref_count, 1);
8991 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8992 chk->asoc->ctrl_queue_cnt++;
8997 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9001 struct sctp_nets *net)
9004 * take a HB request and make it into a HB ack and send it.
9006 struct mbuf *outchain;
9007 struct sctp_chunkhdr *chdr;
9008 struct sctp_tmit_chunk *chk;
9012 /* must have a net pointer */
9015 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
9016 if (outchain == NULL) {
9017 /* gak out of memory */
9020 #ifdef SCTP_MBUF_LOGGING
9021 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9024 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
9025 if (SCTP_BUF_IS_EXTENDED(mat)) {
9026 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9031 chdr = mtod(outchain, struct sctp_chunkhdr *);
9032 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9033 chdr->chunk_flags = 0;
9034 if (chk_length % 4) {
9036 uint32_t cpthis = 0;
9039 padlen = 4 - (chk_length % 4);
9040 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9042 sctp_alloc_a_chunk(stcb, chk);
9045 sctp_m_freem(outchain);
9048 chk->copy_by_ref = 0;
9049 chk->send_size = chk_length;
9050 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9051 chk->rec.chunk_id.can_take_data = 1;
9052 chk->sent = SCTP_DATAGRAM_UNSENT;
9055 chk->asoc = &stcb->asoc;
9056 chk->data = outchain;
9058 atomic_add_int(&chk->whoTo->ref_count, 1);
9059 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9060 chk->asoc->ctrl_queue_cnt++;
9064 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9066 /* formulate and queue a cookie-ack back to sender */
9067 struct mbuf *cookie_ack;
9068 struct sctp_chunkhdr *hdr;
9069 struct sctp_tmit_chunk *chk;
9072 SCTP_TCB_LOCK_ASSERT(stcb);
9074 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
9075 if (cookie_ack == NULL) {
9079 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9080 sctp_alloc_a_chunk(stcb, chk);
9083 sctp_m_freem(cookie_ack);
9086 chk->copy_by_ref = 0;
9087 chk->send_size = sizeof(struct sctp_chunkhdr);
9088 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9089 chk->rec.chunk_id.can_take_data = 1;
9090 chk->sent = SCTP_DATAGRAM_UNSENT;
9093 chk->asoc = &stcb->asoc;
9094 chk->data = cookie_ack;
9095 if (chk->asoc->last_control_chunk_from != NULL) {
9096 chk->whoTo = chk->asoc->last_control_chunk_from;
9097 atomic_add_int(&chk->whoTo->ref_count, 1);
9101 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9102 hdr->chunk_type = SCTP_COOKIE_ACK;
9103 hdr->chunk_flags = 0;
9104 hdr->chunk_length = htons(chk->send_size);
9105 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9106 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9107 chk->asoc->ctrl_queue_cnt++;
9113 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9115 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9116 struct mbuf *m_shutdown_ack;
9117 struct sctp_shutdown_ack_chunk *ack_cp;
9118 struct sctp_tmit_chunk *chk;
9120 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9121 if (m_shutdown_ack == NULL) {
9125 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9126 sctp_alloc_a_chunk(stcb, chk);
9129 sctp_m_freem(m_shutdown_ack);
9132 chk->copy_by_ref = 0;
9133 chk->send_size = sizeof(struct sctp_chunkhdr);
9134 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9135 chk->rec.chunk_id.can_take_data = 1;
9136 chk->sent = SCTP_DATAGRAM_UNSENT;
9139 chk->asoc = &stcb->asoc;
9140 chk->data = m_shutdown_ack;
9143 atomic_add_int(&chk->whoTo->ref_count, 1);
9145 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9146 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9147 ack_cp->ch.chunk_flags = 0;
9148 ack_cp->ch.chunk_length = htons(chk->send_size);
9149 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9150 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9151 chk->asoc->ctrl_queue_cnt++;
9156 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9158 /* formulate and queue a SHUTDOWN to the sender */
9159 struct mbuf *m_shutdown;
9160 struct sctp_shutdown_chunk *shutdown_cp;
9161 struct sctp_tmit_chunk *chk;
9163 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9164 if (m_shutdown == NULL) {
9168 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9169 sctp_alloc_a_chunk(stcb, chk);
9172 sctp_m_freem(m_shutdown);
9175 chk->copy_by_ref = 0;
9176 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9177 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9178 chk->rec.chunk_id.can_take_data = 1;
9179 chk->sent = SCTP_DATAGRAM_UNSENT;
9182 chk->asoc = &stcb->asoc;
9183 chk->data = m_shutdown;
9186 atomic_add_int(&chk->whoTo->ref_count, 1);
9188 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9189 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9190 shutdown_cp->ch.chunk_flags = 0;
9191 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9192 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9193 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9194 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9195 chk->asoc->ctrl_queue_cnt++;
9200 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9203 * formulate and queue an ASCONF to the peer. ASCONF parameters
9204 * should be queued on the assoc queue.
9206 struct sctp_tmit_chunk *chk;
9207 struct mbuf *m_asconf;
9210 SCTP_TCB_LOCK_ASSERT(stcb);
9212 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9213 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9214 /* can't send a new one if there is one in flight already */
9217 /* compose an ASCONF chunk, maximum length is PMTU */
9218 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9219 if (m_asconf == NULL) {
9222 sctp_alloc_a_chunk(stcb, chk);
9225 sctp_m_freem(m_asconf);
9228 chk->copy_by_ref = 0;
9229 chk->data = m_asconf;
9230 chk->send_size = len;
9231 chk->rec.chunk_id.id = SCTP_ASCONF;
9232 chk->rec.chunk_id.can_take_data = 0;
9233 chk->sent = SCTP_DATAGRAM_UNSENT;
9235 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9236 chk->asoc = &stcb->asoc;
9239 atomic_add_int(&chk->whoTo->ref_count, 1);
9241 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9242 chk->asoc->ctrl_queue_cnt++;
9247 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9250 * formulate and queue a asconf-ack back to sender. the asconf-ack
9251 * must be stored in the tcb.
9253 struct sctp_tmit_chunk *chk;
9254 struct sctp_asconf_ack *ack, *latest_ack;
9256 struct sctp_nets *net = NULL;
9258 SCTP_TCB_LOCK_ASSERT(stcb);
9259 /* Get the latest ASCONF-ACK */
9260 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9261 if (latest_ack == NULL) {
9264 if (latest_ack->last_sent_to != NULL &&
9265 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9266 /* we're doing a retransmission */
9267 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9270 if (stcb->asoc.last_control_chunk_from == NULL) {
9271 if (stcb->asoc.alternate) {
9272 net = stcb->asoc.alternate;
9274 net = stcb->asoc.primary_destination;
9277 net = stcb->asoc.last_control_chunk_from;
9282 if (stcb->asoc.last_control_chunk_from == NULL) {
9283 if (stcb->asoc.alternate) {
9284 net = stcb->asoc.alternate;
9286 net = stcb->asoc.primary_destination;
9289 net = stcb->asoc.last_control_chunk_from;
9292 latest_ack->last_sent_to = net;
9294 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9295 if (ack->data == NULL) {
9298 /* copy the asconf_ack */
9299 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
9300 if (m_ack == NULL) {
9301 /* couldn't copy it */
9304 #ifdef SCTP_MBUF_LOGGING
9305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9308 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9309 if (SCTP_BUF_IS_EXTENDED(mat)) {
9310 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9316 sctp_alloc_a_chunk(stcb, chk);
9320 sctp_m_freem(m_ack);
9323 chk->copy_by_ref = 0;
9327 atomic_add_int(&chk->whoTo->ref_count, 1);
9332 chk->send_size = ack->len;
9333 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9334 chk->rec.chunk_id.can_take_data = 1;
9335 chk->sent = SCTP_DATAGRAM_UNSENT;
9337 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9338 chk->asoc = &stcb->asoc;
9340 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9341 chk->asoc->ctrl_queue_cnt++;
9348 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9349 struct sctp_tcb *stcb,
9350 struct sctp_association *asoc,
9351 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9352 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9358 * send out one MTU of retransmission. If fast_retransmit is
9359 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9360 * rwnd. For a Cookie or Asconf in the control chunk queue we
9361 * retransmit them by themselves.
9363 * For data chunks we will pick out the lowest TSN's in the sent_queue
9364 * marked for resend and bundle them all together (up to a MTU of
9365 * destination). The address to send to should have been
9366 * selected/changed where the retransmission was marked (i.e. in FR
9367 * or t3-timeout routines).
9369 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9370 struct sctp_tmit_chunk *chk, *fwd;
9371 struct mbuf *m, *endofchain;
9372 struct sctp_nets *net = NULL;
9373 uint32_t tsns_sent = 0;
9374 int no_fragmentflg, bundle_at, cnt_thru;
9376 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9377 struct sctp_auth_chunk *auth = NULL;
9378 uint32_t auth_offset = 0;
9379 uint16_t auth_keyid;
9380 int override_ok = 1;
9381 int data_auth_reqd = 0;
9384 SCTP_TCB_LOCK_ASSERT(stcb);
9385 tmr_started = ctl_cnt = bundle_at = error = 0;
9390 endofchain = m = NULL;
9391 auth_keyid = stcb->asoc.authinfo.active_keyid;
9392 #ifdef SCTP_AUDITING_ENABLED
9393 sctp_audit_log(0xC3, 1);
9395 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9396 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9397 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9398 asoc->sent_queue_retran_cnt);
9399 asoc->sent_queue_cnt = 0;
9400 asoc->sent_queue_cnt_removeable = 0;
9401 /* send back 0/0 so we enter normal transmission */
9405 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9406 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9407 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9408 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9409 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9412 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9413 if (chk != asoc->str_reset) {
9415 * not eligible for retran if its
9422 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9426 * Add an AUTH chunk, if chunk requires it save the
9427 * offset into the chain for AUTH
9429 if ((auth == NULL) &&
9430 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9431 stcb->asoc.peer_auth_chunks))) {
9432 m = sctp_add_auth_chunk(m, &endofchain,
9433 &auth, &auth_offset,
9435 chk->rec.chunk_id.id);
9436 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9438 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9444 /* do we have control chunks to retransmit? */
9446 /* Start a timer no matter if we suceed or fail */
9447 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9448 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9449 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9450 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9451 chk->snd_count++; /* update our count */
9452 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9453 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9454 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9455 no_fragmentflg, 0, 0,
9456 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9457 chk->whoTo->port, NULL,
9460 SCTP_STAT_INCR(sctps_lowlevelerr);
9467 * We don't want to mark the net->sent time here since this
9468 * we use this for HB and retrans cannot measure RTT
9470 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9472 chk->sent = SCTP_DATAGRAM_SENT;
9473 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9477 /* Clean up the fwd-tsn list */
9478 sctp_clean_up_ctl(stcb, asoc, so_locked);
9483 * Ok, it is just data retransmission we need to do or that and a
9484 * fwd-tsn with it all.
9486 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9487 return (SCTP_RETRAN_DONE);
9489 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9490 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9491 /* not yet open, resend the cookie and that is it */
9494 #ifdef SCTP_AUDITING_ENABLED
9495 sctp_auditing(20, inp, stcb, NULL);
9497 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9498 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9499 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9500 /* No, not sent to this net or not ready for rtx */
9503 if (chk->data == NULL) {
9504 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9505 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9508 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9509 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9510 /* Gak, we have exceeded max unlucky retran, abort! */
9511 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9513 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9514 atomic_add_int(&stcb->asoc.refcnt, 1);
9515 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9516 SCTP_TCB_LOCK(stcb);
9517 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9518 return (SCTP_RETRAN_EXIT);
9520 /* pick up the net */
9522 switch (net->ro._l_addr.sa.sa_family) {
9525 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9530 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9539 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9540 /* No room in peers rwnd */
9543 tsn = asoc->last_acked_seq + 1;
9544 if (tsn == chk->rec.data.TSN_seq) {
9546 * we make a special exception for this
9547 * case. The peer has no rwnd but is missing
9548 * the lowest chunk.. which is probably what
9549 * is holding up the rwnd.
9551 goto one_chunk_around;
9556 if (asoc->peers_rwnd < mtu) {
9558 if ((asoc->peers_rwnd == 0) &&
9559 (asoc->total_flight == 0)) {
9560 chk->window_probe = 1;
9561 chk->whoTo->window_probe = 1;
9564 #ifdef SCTP_AUDITING_ENABLED
9565 sctp_audit_log(0xC3, 2);
9569 net->fast_retran_ip = 0;
9570 if (chk->rec.data.doing_fast_retransmit == 0) {
9572 * if no FR in progress skip destination that have
9573 * flight_size > cwnd.
9575 if (net->flight_size >= net->cwnd) {
9580 * Mark the destination net to have FR recovery
9584 net->fast_retran_ip = 1;
9588 * if no AUTH is yet included and this chunk requires it,
9589 * make sure to account for it. We don't apply the size
9590 * until the AUTH chunk is actually added below in case
9591 * there is no room for this chunk.
9593 if (data_auth_reqd && (auth == NULL)) {
9594 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9598 if ((chk->send_size <= (mtu - dmtu)) ||
9599 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9600 /* ok we will add this one */
9601 if (data_auth_reqd) {
9603 m = sctp_add_auth_chunk(m,
9609 auth_keyid = chk->auth_keyid;
9611 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9612 } else if (override_ok) {
9613 auth_keyid = chk->auth_keyid;
9615 } else if (chk->auth_keyid != auth_keyid) {
9616 /* different keyid, so done bundling */
9620 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9622 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9625 /* Do clear IP_DF ? */
9626 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9629 /* upate our MTU size */
9630 if (mtu > (chk->send_size + dmtu))
9631 mtu -= (chk->send_size + dmtu);
9634 data_list[bundle_at++] = chk;
9635 if (one_chunk && (asoc->total_flight <= 0)) {
9636 SCTP_STAT_INCR(sctps_windowprobed);
9639 if (one_chunk == 0) {
9641 * now are there anymore forward from chk to pick
9644 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9645 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9646 /* Nope, not for retran */
9649 if (fwd->whoTo != net) {
9650 /* Nope, not the net in question */
9653 if (data_auth_reqd && (auth == NULL)) {
9654 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9657 if (fwd->send_size <= (mtu - dmtu)) {
9658 if (data_auth_reqd) {
9660 m = sctp_add_auth_chunk(m,
9666 auth_keyid = fwd->auth_keyid;
9668 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9669 } else if (override_ok) {
9670 auth_keyid = fwd->auth_keyid;
9672 } else if (fwd->auth_keyid != auth_keyid) {
9680 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9682 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9685 /* Do clear IP_DF ? */
9686 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9689 /* upate our MTU size */
9690 if (mtu > (fwd->send_size + dmtu))
9691 mtu -= (fwd->send_size + dmtu);
9694 data_list[bundle_at++] = fwd;
9695 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9699 /* can't fit so we are done */
9704 /* Is there something to send for this destination? */
9707 * No matter if we fail/or suceed we should start a
9708 * timer. A failure is like a lost IP packet :-)
9710 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9712 * no timer running on this destination
9715 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9718 /* Now lets send it, if there is anything to send :> */
9719 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9720 (struct sockaddr *)&net->ro._l_addr, m,
9721 auth_offset, auth, auth_keyid,
9722 no_fragmentflg, 0, 0,
9723 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9727 /* error, we could not output */
9728 SCTP_STAT_INCR(sctps_lowlevelerr);
9736 * We don't want to mark the net->sent time here
9737 * since this we use this for HB and retrans cannot
9740 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9742 /* For auto-close */
9744 if (*now_filled == 0) {
9745 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9746 *now = asoc->time_last_sent;
9749 asoc->time_last_sent = *now;
9751 *cnt_out += bundle_at;
9752 #ifdef SCTP_AUDITING_ENABLED
9753 sctp_audit_log(0xC4, bundle_at);
9756 tsns_sent = data_list[0]->rec.data.TSN_seq;
9758 for (i = 0; i < bundle_at; i++) {
9759 SCTP_STAT_INCR(sctps_sendretransdata);
9760 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9762 * When we have a revoked data, and we
9763 * retransmit it, then we clear the revoked
9764 * flag since this flag dictates if we
9765 * subtracted from the fs
9767 if (data_list[i]->rec.data.chunk_was_revoked) {
9768 /* Deflate the cwnd */
9769 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9770 data_list[i]->rec.data.chunk_was_revoked = 0;
9772 data_list[i]->snd_count++;
9773 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9774 /* record the time */
9775 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9776 if (data_list[i]->book_size_scale) {
9778 * need to double the book size on
9781 data_list[i]->book_size_scale = 0;
9783 * Since we double the booksize, we
9784 * must also double the output queue
9785 * size, since this get shrunk when
9786 * we free by this amount.
9788 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9789 data_list[i]->book_size *= 2;
9793 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9794 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9795 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9797 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9798 (uint32_t) (data_list[i]->send_size +
9799 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9801 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9802 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9803 data_list[i]->whoTo->flight_size,
9804 data_list[i]->book_size,
9805 (uintptr_t) data_list[i]->whoTo,
9806 data_list[i]->rec.data.TSN_seq);
9808 sctp_flight_size_increase(data_list[i]);
9809 sctp_total_flight_increase(stcb, data_list[i]);
9810 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9811 /* SWS sender side engages */
9812 asoc->peers_rwnd = 0;
9815 (data_list[i]->rec.data.doing_fast_retransmit)) {
9816 SCTP_STAT_INCR(sctps_sendfastretrans);
9817 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9818 (tmr_started == 0)) {
9820 * ok we just fast-retrans'd
9821 * the lowest TSN, i.e the
9822 * first on the list. In
9823 * this case we want to give
9824 * some more time to get a
9825 * SACK back without a
9828 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9829 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9830 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9835 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9837 #ifdef SCTP_AUDITING_ENABLED
9838 sctp_auditing(21, inp, stcb, NULL);
9844 if (asoc->sent_queue_retran_cnt <= 0) {
9845 /* all done we have no more to retran */
9846 asoc->sent_queue_retran_cnt = 0;
9850 /* No more room in rwnd */
9853 /* stop the for loop here. we sent out a packet */
9860 sctp_timer_validation(struct sctp_inpcb *inp,
9861 struct sctp_tcb *stcb,
9862 struct sctp_association *asoc)
9864 struct sctp_nets *net;
9866 /* Validate that a timer is running somewhere */
9867 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9868 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9869 /* Here is a timer */
9873 SCTP_TCB_LOCK_ASSERT(stcb);
9874 /* Gak, we did not have a timer somewhere */
9875 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9876 if (asoc->alternate) {
9877 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9879 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9885 sctp_chunk_output(struct sctp_inpcb *inp,
9886 struct sctp_tcb *stcb,
9889 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9895 * Ok this is the generic chunk service queue. we must do the
9897 * - See if there are retransmits pending, if so we must
9899 * - Service the stream queue that is next, moving any
9900 * message (note I must get a complete message i.e.
9901 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9903 * - Check to see if the cwnd/rwnd allows any output, if so we
9904 * go ahead and fomulate and send the low level chunks. Making sure
9905 * to combine any control in the control chunk queue also.
9907 struct sctp_association *asoc;
9908 struct sctp_nets *net;
9909 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9910 unsigned int burst_cnt = 0;
9914 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9917 unsigned int tot_frs = 0;
9920 /* The Nagle algorithm is only applied when handling a send call. */
9921 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9922 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9930 SCTP_TCB_LOCK_ASSERT(stcb);
9932 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9934 if ((un_sent <= 0) &&
9935 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9936 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9937 (asoc->sent_queue_retran_cnt == 0)) {
9938 /* Nothing to do unless there is something to be sent left */
9942 * Do we have something to send, data or control AND a sack timer
9943 * running, if so piggy-back the sack.
9945 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9946 sctp_send_sack(stcb, so_locked);
9947 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9949 while (asoc->sent_queue_retran_cnt) {
9951 * Ok, it is retransmission time only, we send out only ONE
9952 * packet with a single call off to the retran code.
9954 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9956 * Special hook for handling cookiess discarded
9957 * by peer that carried data. Send cookie-ack only
9958 * and then the next call with get the retran's.
9960 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9962 &now, &now_filled, frag_point, so_locked);
9964 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9965 /* if its not from a HB then do it */
9967 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9973 * its from any other place, we don't allow retran
9974 * output (only control)
9979 /* Can't send anymore */
9981 * now lets push out control by calling med-level
9982 * output once. this assures that we WILL send HB's
9985 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9987 &now, &now_filled, frag_point, so_locked);
9988 #ifdef SCTP_AUDITING_ENABLED
9989 sctp_auditing(8, inp, stcb, NULL);
9991 sctp_timer_validation(inp, stcb, asoc);
9996 * The count was off.. retran is not happening so do
9997 * the normal retransmission.
9999 #ifdef SCTP_AUDITING_ENABLED
10000 sctp_auditing(9, inp, stcb, NULL);
10002 if (ret == SCTP_RETRAN_EXIT) {
10007 if (from_where == SCTP_OUTPUT_FROM_T3) {
10008 /* Only one transmission allowed out of a timeout */
10009 #ifdef SCTP_AUDITING_ENABLED
10010 sctp_auditing(10, inp, stcb, NULL);
10012 /* Push out any control */
10013 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10014 &now, &now_filled, frag_point, so_locked);
10017 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10018 /* Hit FR burst limit */
10021 if ((num_out == 0) && (ret == 0)) {
10022 /* No more retrans to send */
10026 #ifdef SCTP_AUDITING_ENABLED
10027 sctp_auditing(12, inp, stcb, NULL);
10029 /* Check for bad destinations, if they exist move chunks around. */
10030 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10031 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10033 * if possible move things off of this address we
10034 * still may send below due to the dormant state but
10035 * we try to find an alternate address to send to
10036 * and if we have one we move all queued data on the
10037 * out wheel to this alternate address.
10039 if (net->ref_count > 1)
10040 sctp_move_chunks_from_net(stcb, net);
10043 * if ((asoc->sat_network) || (net->addr_is_local))
10044 * { burst_limit = asoc->max_burst *
10045 * SCTP_SAT_NETWORK_BURST_INCR; }
10047 if (asoc->max_burst > 0) {
10048 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10049 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10051 * JRS - Use the congestion
10052 * control given in the
10053 * congestion control module
10055 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10057 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10059 SCTP_STAT_INCR(sctps_maxburstqueued);
10061 net->fast_retran_ip = 0;
10063 if (net->flight_size == 0) {
10065 * Should be decaying the
10077 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10078 &reason_code, 0, from_where,
10079 &now, &now_filled, frag_point, so_locked);
10081 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10083 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10085 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10086 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10087 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10091 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10093 tot_out += num_out;
10095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10096 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10097 if (num_out == 0) {
10098 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10103 * When the Nagle algorithm is used, look at how
10104 * much is unsent, then if its smaller than an MTU
10105 * and we have data in flight we stop, except if we
10106 * are handling a fragmented user message.
10108 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10109 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10110 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10111 (stcb->asoc.total_flight > 0) &&
10112 ((stcb->asoc.locked_on_sending == NULL) ||
10113 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10117 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10118 TAILQ_EMPTY(&asoc->send_queue) &&
10119 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10120 /* Nothing left to send */
10123 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10124 /* Nothing left to send */
10127 } while (num_out &&
10128 ((asoc->max_burst == 0) ||
10129 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10130 (burst_cnt < asoc->max_burst)));
10132 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10133 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10134 SCTP_STAT_INCR(sctps_maxburstqueued);
10135 asoc->burst_limit_applied = 1;
10136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10137 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10140 asoc->burst_limit_applied = 0;
10143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10144 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10146 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10150 * Now we need to clean up the control chunk chain if a ECNE is on
10151 * it. It must be marked as UNSENT again so next call will continue
10152 * to send it until such time that we get a CWR, to remove it.
10154 if (stcb->asoc.ecn_echo_cnt_onq)
10155 sctp_fix_ecn_echo(asoc);
10162 struct sctp_inpcb *inp,
10164 struct sockaddr *addr,
10165 struct mbuf *control,
10170 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10173 if (inp->sctp_socket == NULL) {
10174 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10177 return (sctp_sosend(inp->sctp_socket,
10179 (struct uio *)NULL,
10187 send_forward_tsn(struct sctp_tcb *stcb,
10188 struct sctp_association *asoc)
10190 struct sctp_tmit_chunk *chk;
10191 struct sctp_forward_tsn_chunk *fwdtsn;
10192 uint32_t advance_peer_ack_point;
10194 SCTP_TCB_LOCK_ASSERT(stcb);
10195 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10196 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10197 /* mark it to unsent */
10198 chk->sent = SCTP_DATAGRAM_UNSENT;
10199 chk->snd_count = 0;
10200 /* Do we correct its output location? */
10202 sctp_free_remote_addr(chk->whoTo);
10205 goto sctp_fill_in_rest;
10208 /* Ok if we reach here we must build one */
10209 sctp_alloc_a_chunk(stcb, chk);
10213 asoc->fwd_tsn_cnt++;
10214 chk->copy_by_ref = 0;
10215 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10216 chk->rec.chunk_id.can_take_data = 0;
10219 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10220 if (chk->data == NULL) {
10221 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10224 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10225 chk->sent = SCTP_DATAGRAM_UNSENT;
10226 chk->snd_count = 0;
10227 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10228 asoc->ctrl_queue_cnt++;
10231 * Here we go through and fill out the part that deals with
10232 * stream/seq of the ones we skip.
10234 SCTP_BUF_LEN(chk->data) = 0;
10236 struct sctp_tmit_chunk *at, *tp1, *last;
10237 struct sctp_strseq *strseq;
10238 unsigned int cnt_of_space, i, ovh;
10239 unsigned int space_needed;
10240 unsigned int cnt_of_skipped = 0;
10242 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10243 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
10244 /* no more to look at */
10247 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10248 /* We don't report these */
10253 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10254 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10256 cnt_of_space = M_TRAILINGSPACE(chk->data);
10258 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10259 ovh = SCTP_MIN_OVERHEAD;
10261 ovh = SCTP_MIN_V4_OVERHEAD;
10263 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10264 /* trim to a mtu size */
10265 cnt_of_space = asoc->smallest_mtu - ovh;
10267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10268 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10269 0xff, 0, cnt_of_skipped,
10270 asoc->advanced_peer_ack_point);
10273 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10274 if (cnt_of_space < space_needed) {
10276 * ok we must trim down the chunk by lowering the
10277 * advance peer ack point.
10279 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10280 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10281 0xff, 0xff, cnt_of_space,
10284 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10285 cnt_of_skipped /= sizeof(struct sctp_strseq);
10287 * Go through and find the TSN that will be the one
10290 at = TAILQ_FIRST(&asoc->sent_queue);
10292 for (i = 0; i < cnt_of_skipped; i++) {
10293 tp1 = TAILQ_NEXT(at, sctp_next);
10300 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10301 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10302 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10303 asoc->advanced_peer_ack_point);
10307 * last now points to last one I can report, update
10311 advance_peer_ack_point = last->rec.data.TSN_seq;
10312 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10313 cnt_of_skipped * sizeof(struct sctp_strseq);
10315 chk->send_size = space_needed;
10316 /* Setup the chunk */
10317 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10318 fwdtsn->ch.chunk_length = htons(chk->send_size);
10319 fwdtsn->ch.chunk_flags = 0;
10320 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10321 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10322 SCTP_BUF_LEN(chk->data) = chk->send_size;
10325 * Move pointer to after the fwdtsn and transfer to the
10328 strseq = (struct sctp_strseq *)fwdtsn;
10330 * Now populate the strseq list. This is done blindly
10331 * without pulling out duplicate stream info. This is
10332 * inefficent but won't harm the process since the peer will
10333 * look at these in sequence and will thus release anything.
10334 * It could mean we exceed the PMTU and chop off some that
10335 * we could have included.. but this is unlikely (aka 1432/4
10336 * would mean 300+ stream seq's would have to be reported in
10337 * one FWD-TSN. With a bit of work we can later FIX this to
10338 * optimize and pull out duplcates.. but it does add more
10339 * overhead. So for now... not!
10341 at = TAILQ_FIRST(&asoc->sent_queue);
10342 for (i = 0; i < cnt_of_skipped; i++) {
10343 tp1 = TAILQ_NEXT(at, sctp_next);
10346 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10347 /* We don't report these */
10352 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10353 at->rec.data.fwd_tsn_cnt = 0;
10355 strseq->stream = ntohs(at->rec.data.stream_number);
10356 strseq->sequence = ntohs(at->rec.data.stream_seq);
10365 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10366 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10372 * Queue up a SACK or NR-SACK in the control queue.
10373 * We must first check to see if a SACK or NR-SACK is
10374 * somehow on the control queue.
10375 * If so, we will take and and remove the old one.
10377 struct sctp_association *asoc;
10378 struct sctp_tmit_chunk *chk, *a_chk;
10379 struct sctp_sack_chunk *sack;
10380 struct sctp_nr_sack_chunk *nr_sack;
10381 struct sctp_gap_ack_block *gap_descriptor;
10382 struct sack_track *selector;
10387 int limit_reached = 0;
10388 unsigned int i, siz, j;
10389 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10392 uint32_t highest_tsn;
10397 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10398 (stcb->asoc.peer_supports_nr_sack == 1)) {
10399 type = SCTP_NR_SELECTIVE_ACK;
10401 type = SCTP_SELECTIVE_ACK;
10404 asoc = &stcb->asoc;
10405 SCTP_TCB_LOCK_ASSERT(stcb);
10406 if (asoc->last_data_chunk_from == NULL) {
10407 /* Hmm we never received anything */
10410 sctp_slide_mapping_arrays(stcb);
10411 sctp_set_rwnd(stcb, asoc);
10412 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10413 if (chk->rec.chunk_id.id == type) {
10414 /* Hmm, found a sack already on queue, remove it */
10415 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10416 asoc->ctrl_queue_cnt--;
10419 sctp_m_freem(a_chk->data);
10420 a_chk->data = NULL;
10422 if (a_chk->whoTo) {
10423 sctp_free_remote_addr(a_chk->whoTo);
10424 a_chk->whoTo = NULL;
10429 if (a_chk == NULL) {
10430 sctp_alloc_a_chunk(stcb, a_chk);
10431 if (a_chk == NULL) {
10432 /* No memory so we drop the idea, and set a timer */
10433 if (stcb->asoc.delayed_ack) {
10434 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10435 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10436 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10437 stcb->sctp_ep, stcb, NULL);
10439 stcb->asoc.send_sack = 1;
10443 a_chk->copy_by_ref = 0;
10444 a_chk->rec.chunk_id.id = type;
10445 a_chk->rec.chunk_id.can_take_data = 1;
10447 /* Clear our pkt counts */
10448 asoc->data_pkts_seen = 0;
10450 a_chk->asoc = asoc;
10451 a_chk->snd_count = 0;
10452 a_chk->send_size = 0; /* fill in later */
10453 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10454 a_chk->whoTo = NULL;
10456 if ((asoc->numduptsns) ||
10457 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10459 * Ok, we have some duplicates or the destination for the
10460 * sack is unreachable, lets see if we can select an
10461 * alternate than asoc->last_data_chunk_from
10463 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10464 (asoc->used_alt_onsack > asoc->numnets)) {
10465 /* We used an alt last time, don't this time */
10466 a_chk->whoTo = NULL;
10468 asoc->used_alt_onsack++;
10469 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10471 if (a_chk->whoTo == NULL) {
10472 /* Nope, no alternate */
10473 a_chk->whoTo = asoc->last_data_chunk_from;
10474 asoc->used_alt_onsack = 0;
10478 * No duplicates so we use the last place we received data
10481 asoc->used_alt_onsack = 0;
10482 a_chk->whoTo = asoc->last_data_chunk_from;
10484 if (a_chk->whoTo) {
10485 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10487 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10488 highest_tsn = asoc->highest_tsn_inside_map;
10490 highest_tsn = asoc->highest_tsn_inside_nr_map;
10492 if (highest_tsn == asoc->cumulative_tsn) {
10494 if (type == SCTP_SELECTIVE_ACK) {
10495 space_req = sizeof(struct sctp_sack_chunk);
10497 space_req = sizeof(struct sctp_nr_sack_chunk);
10500 /* gaps get a cluster */
10501 space_req = MCLBYTES;
10503 /* Ok now lets formulate a MBUF with our sack */
10504 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10505 if ((a_chk->data == NULL) ||
10506 (a_chk->whoTo == NULL)) {
10507 /* rats, no mbuf memory */
10509 /* was a problem with the destination */
10510 sctp_m_freem(a_chk->data);
10511 a_chk->data = NULL;
10513 sctp_free_a_chunk(stcb, a_chk, so_locked);
10514 /* sa_ignore NO_NULL_CHK */
10515 if (stcb->asoc.delayed_ack) {
10516 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10517 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10518 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10519 stcb->sctp_ep, stcb, NULL);
10521 stcb->asoc.send_sack = 1;
10525 /* ok, lets go through and fill it in */
10526 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10527 space = M_TRAILINGSPACE(a_chk->data);
10528 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10529 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10531 limit = mtod(a_chk->data, caddr_t);
10536 if ((asoc->sctp_cmt_on_off > 0) &&
10537 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10539 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10540 * received, then set high bit to 1, else 0. Reset
10543 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10544 asoc->cmt_dac_pkts_rcvd = 0;
10546 #ifdef SCTP_ASOCLOG_OF_TSNS
10547 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10548 stcb->asoc.cumack_log_atsnt++;
10549 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10550 stcb->asoc.cumack_log_atsnt = 0;
10553 /* reset the readers interpretation */
10554 stcb->freed_by_sorcv_sincelast = 0;
10556 if (type == SCTP_SELECTIVE_ACK) {
10557 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10559 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10560 if (highest_tsn > asoc->mapping_array_base_tsn) {
10561 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10563 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10567 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10568 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10569 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10570 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10572 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10576 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10579 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10581 if (((type == SCTP_SELECTIVE_ACK) &&
10582 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10583 ((type == SCTP_NR_SELECTIVE_ACK) &&
10584 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10585 /* we have a gap .. maybe */
10586 for (i = 0; i < siz; i++) {
10587 tsn_map = asoc->mapping_array[i];
10588 if (type == SCTP_SELECTIVE_ACK) {
10589 tsn_map |= asoc->nr_mapping_array[i];
10593 * Clear all bits corresponding to TSNs
10594 * smaller or equal to the cumulative TSN.
10596 tsn_map &= (~0 << (1 - offset));
10598 selector = &sack_array[tsn_map];
10599 if (mergeable && selector->right_edge) {
10601 * Backup, left and right edges were ok to
10607 if (selector->num_entries == 0)
10610 for (j = 0; j < selector->num_entries; j++) {
10611 if (mergeable && selector->right_edge) {
10613 * do a merge by NOT setting
10619 * no merge, set the left
10623 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10625 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10628 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10634 if (selector->left_edge) {
10638 if (limit_reached) {
10639 /* Reached the limit stop */
10645 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10646 (limit_reached == 0)) {
10650 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10651 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10653 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10656 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10659 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10661 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10662 /* we have a gap .. maybe */
10663 for (i = 0; i < siz; i++) {
10664 tsn_map = asoc->nr_mapping_array[i];
10667 * Clear all bits corresponding to
10668 * TSNs smaller or equal to the
10671 tsn_map &= (~0 << (1 - offset));
10673 selector = &sack_array[tsn_map];
10674 if (mergeable && selector->right_edge) {
10676 * Backup, left and right edges were
10679 num_nr_gap_blocks--;
10682 if (selector->num_entries == 0)
10685 for (j = 0; j < selector->num_entries; j++) {
10686 if (mergeable && selector->right_edge) {
10688 * do a merge by NOT
10695 * no merge, set the
10699 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10701 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10702 num_nr_gap_blocks++;
10704 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10710 if (selector->left_edge) {
10714 if (limit_reached) {
10715 /* Reached the limit stop */
10722 /* now we must add any dups we are going to report. */
10723 if ((limit_reached == 0) && (asoc->numduptsns)) {
10724 dup = (uint32_t *) gap_descriptor;
10725 for (i = 0; i < asoc->numduptsns; i++) {
10726 *dup = htonl(asoc->dup_tsns[i]);
10729 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10734 asoc->numduptsns = 0;
10737 * now that the chunk is prepared queue it to the control chunk
10740 if (type == SCTP_SELECTIVE_ACK) {
10741 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10742 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10743 num_dups * sizeof(int32_t);
10744 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10745 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10746 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10747 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10748 sack->sack.num_dup_tsns = htons(num_dups);
10749 sack->ch.chunk_type = type;
10750 sack->ch.chunk_flags = flags;
10751 sack->ch.chunk_length = htons(a_chk->send_size);
10753 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10754 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10755 num_dups * sizeof(int32_t);
10756 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10757 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10758 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10759 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10760 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10761 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10762 nr_sack->nr_sack.reserved = 0;
10763 nr_sack->ch.chunk_type = type;
10764 nr_sack->ch.chunk_flags = flags;
10765 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10767 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10768 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10769 asoc->ctrl_queue_cnt++;
10770 asoc->send_sack = 0;
10771 SCTP_STAT_INCR(sctps_sendsacks);
10776 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10777 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10782 struct mbuf *m_abort;
10783 struct mbuf *m_out = NULL, *m_end = NULL;
10784 struct sctp_abort_chunk *abort = NULL;
10786 uint32_t auth_offset = 0;
10787 struct sctp_auth_chunk *auth = NULL;
10788 struct sctp_nets *net;
10791 * Add an AUTH chunk, if chunk requires it and save the offset into
10792 * the chain for AUTH
10794 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10795 stcb->asoc.peer_auth_chunks)) {
10796 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10797 stcb, SCTP_ABORT_ASSOCIATION);
10798 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10800 SCTP_TCB_LOCK_ASSERT(stcb);
10801 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10802 if (m_abort == NULL) {
10805 sctp_m_freem(m_out);
10808 /* link in any error */
10809 SCTP_BUF_NEXT(m_abort) = operr;
10816 sz += SCTP_BUF_LEN(n);
10817 n = SCTP_BUF_NEXT(n);
10820 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10821 if (m_out == NULL) {
10822 /* NO Auth chunk prepended, so reserve space in front */
10823 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10826 /* Put AUTH chunk at the front of the chain */
10827 SCTP_BUF_NEXT(m_end) = m_abort;
10829 if (stcb->asoc.alternate) {
10830 net = stcb->asoc.alternate;
10832 net = stcb->asoc.primary_destination;
10834 /* fill in the ABORT chunk */
10835 abort = mtod(m_abort, struct sctp_abort_chunk *);
10836 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10837 abort->ch.chunk_flags = 0;
10838 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10840 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10841 (struct sockaddr *)&net->ro._l_addr,
10842 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10843 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10844 stcb->asoc.primary_destination->port, NULL,
10847 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10851 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10852 struct sctp_nets *net,
10855 /* formulate and SEND a SHUTDOWN-COMPLETE */
10856 struct mbuf *m_shutdown_comp;
10857 struct sctp_shutdown_complete_chunk *shutdown_complete;
10861 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10862 if (m_shutdown_comp == NULL) {
10866 if (reflect_vtag) {
10867 flags = SCTP_HAD_NO_TCB;
10868 vtag = stcb->asoc.my_vtag;
10871 vtag = stcb->asoc.peer_vtag;
10873 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10874 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10875 shutdown_complete->ch.chunk_flags = flags;
10876 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10877 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10878 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10879 (struct sockaddr *)&net->ro._l_addr,
10880 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10881 stcb->sctp_ep->sctp_lport, stcb->rport,
10885 SCTP_SO_NOT_LOCKED);
10886 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10891 sctp_send_resp_msg(struct mbuf *m, struct sctphdr *sh, uint32_t vtag,
10892 uint8_t type, struct mbuf *cause,
10893 uint8_t use_mflowid, uint32_t mflowid,
10894 uint32_t vrf_id, uint16_t port)
10896 struct mbuf *o_pak;
10898 struct sctphdr *shout;
10899 struct sctp_chunkhdr *ch;
10901 struct udphdr *udp;
10902 int len, cause_len, padding_len, ret;
10906 struct ip *iph_out;
10910 struct ip6_hdr *ip6, *ip6_out;
10914 /* Compute the length of the cause and add final padding. */
10916 if (cause != NULL) {
10917 struct mbuf *m_at, *m_last = NULL;
10919 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
10920 if (SCTP_BUF_NEXT(m_at) == NULL)
10922 cause_len += SCTP_BUF_LEN(m_at);
10924 padding_len = cause_len % 4;
10925 if (padding_len != 0) {
10926 padding_len = 4 - padding_len;
10928 if (padding_len != 0) {
10929 if (sctp_add_pad_tombuf(m_last, padding_len)) {
10930 sctp_m_freem(cause);
10937 /* Get an mbuf for the header. */
10938 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
10939 iph = mtod(m, struct ip *);
10940 switch (iph->ip_v) {
10943 len += sizeof(struct ip);
10947 case IPV6_VERSION >> 4:
10948 len += sizeof(struct ip6_hdr);
10955 len += sizeof(struct udphdr);
10957 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10958 if (mout == NULL) {
10960 sctp_m_freem(cause);
10964 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10965 SCTP_BUF_LEN(mout) = len;
10966 SCTP_BUF_NEXT(mout) = cause;
10967 if (use_mflowid != 0) {
10968 mout->m_pkthdr.flowid = mflowid;
10969 mout->m_flags |= M_FLOWID;
10977 switch (iph->ip_v) {
10980 iph_out = mtod(mout, struct ip *);
10981 iph_out->ip_v = IPVERSION;
10982 iph_out->ip_hl = (sizeof(struct ip) >> 2);
10983 iph_out->ip_tos = 0;
10984 iph_out->ip_id = ip_newid();
10985 iph_out->ip_off = 0;
10986 iph_out->ip_ttl = MODULE_GLOBAL(ip_defttl);
10988 iph_out->ip_p = IPPROTO_UDP;
10990 iph_out->ip_p = IPPROTO_SCTP;
10992 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10993 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10994 iph_out->ip_sum = 0;
10995 len = sizeof(struct ip);
10996 shout = (struct sctphdr *)((caddr_t)iph_out + len);
11000 case IPV6_VERSION >> 4:
11001 ip6 = (struct ip6_hdr *)iph;
11002 ip6_out = mtod(mout, struct ip6_hdr *);
11003 ip6_out->ip6_flow = htonl(0x60000000);
11004 if (V_ip6_auto_flowlabel) {
11005 ip6_out->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11007 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11009 ip6_out->ip6_nxt = IPPROTO_UDP;
11011 ip6_out->ip6_nxt = IPPROTO_SCTP;
11013 ip6_out->ip6_src = ip6->ip6_dst;
11014 ip6_out->ip6_dst = ip6->ip6_src;
11015 len = sizeof(struct ip6_hdr);
11016 shout = (struct sctphdr *)((caddr_t)ip6_out + len);
11021 shout = mtod(mout, struct sctphdr *);
11025 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11026 sctp_m_freem(mout);
11029 udp = (struct udphdr *)shout;
11030 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11031 udp->uh_dport = port;
11033 udp->uh_ulen = htons(sizeof(struct udphdr) +
11034 sizeof(struct sctphdr) +
11035 sizeof(struct sctp_chunkhdr) +
11036 cause_len + padding_len);
11037 len += sizeof(struct udphdr);
11038 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11042 shout->src_port = sh->dest_port;
11043 shout->dest_port = sh->src_port;
11044 shout->checksum = 0;
11046 shout->v_tag = htonl(vtag);
11048 shout->v_tag = sh->v_tag;
11050 len += sizeof(struct sctphdr);
11051 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11052 ch->chunk_type = type;
11054 ch->chunk_flags = 0;
11056 ch->chunk_flags = SCTP_HAD_NO_TCB;
11058 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11059 len += sizeof(struct sctp_chunkhdr);
11060 len += cause_len + padding_len;
11062 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11063 sctp_m_freem(mout);
11066 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11068 if (iph_out != NULL) {
11069 /* zap the stack pointer to the route */
11070 bzero(&ro, sizeof(sctp_route_t));
11073 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11078 iph_out->ip_len = len;
11079 #ifdef SCTP_PACKET_LOGGING
11080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11081 sctp_packet_log(mout, len);
11085 #if defined(SCTP_WITH_NO_CSUM)
11086 SCTP_STAT_INCR(sctps_sendnocrc);
11088 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11089 SCTP_STAT_INCR(sctps_sendswcrc);
11092 SCTP_ENABLE_UDP_CSUM(o_pak);
11095 #if defined(SCTP_WITH_NO_CSUM)
11096 SCTP_STAT_INCR(sctps_sendnocrc);
11098 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11099 mout->m_pkthdr.csum_data = 0;
11100 SCTP_STAT_INCR(sctps_sendhwcrc);
11103 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11104 /* Free the route if we got one back */
11111 if (ip6_out != NULL) {
11112 ip6_out->ip6_plen = len - sizeof(struct ip6_hdr);
11113 #ifdef SCTP_PACKET_LOGGING
11114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11115 sctp_packet_log(mout, len);
11119 #if defined(SCTP_WITH_NO_CSUM)
11120 SCTP_STAT_INCR(sctps_sendnocrc);
11122 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11123 SCTP_STAT_INCR(sctps_sendswcrc);
11125 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11126 udp->uh_sum = 0xffff;
11129 #if defined(SCTP_WITH_NO_CSUM)
11130 SCTP_STAT_INCR(sctps_sendnocrc);
11132 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11133 mout->m_pkthdr.csum_data = 0;
11134 SCTP_STAT_INCR(sctps_sendhwcrc);
11137 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11140 SCTP_STAT_INCR(sctps_sendpackets);
11141 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11142 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11147 sctp_send_shutdown_complete2(struct mbuf *m, struct sctphdr *sh,
11148 uint8_t use_mflowid, uint32_t mflowid,
11149 uint32_t vrf_id, uint16_t port)
11151 sctp_send_resp_msg(m, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11152 use_mflowid, mflowid,
11157 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11158 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11163 struct sctp_tmit_chunk *chk;
11164 struct sctp_heartbeat_chunk *hb;
11165 struct timeval now;
11167 SCTP_TCB_LOCK_ASSERT(stcb);
11171 (void)SCTP_GETTIME_TIMEVAL(&now);
11172 switch (net->ro._l_addr.sa.sa_family) {
11184 sctp_alloc_a_chunk(stcb, chk);
11186 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11189 chk->copy_by_ref = 0;
11190 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11191 chk->rec.chunk_id.can_take_data = 1;
11192 chk->asoc = &stcb->asoc;
11193 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11195 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11196 if (chk->data == NULL) {
11197 sctp_free_a_chunk(stcb, chk, so_locked);
11200 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11201 SCTP_BUF_LEN(chk->data) = chk->send_size;
11202 chk->sent = SCTP_DATAGRAM_UNSENT;
11203 chk->snd_count = 0;
11205 atomic_add_int(&chk->whoTo->ref_count, 1);
11206 /* Now we have a mbuf that we can fill in with the details */
11207 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11208 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11209 /* fill out chunk header */
11210 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11211 hb->ch.chunk_flags = 0;
11212 hb->ch.chunk_length = htons(chk->send_size);
11213 /* Fill out hb parameter */
11214 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11215 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11216 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11217 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11218 /* Did our user request this one, put it in */
11219 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11220 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11221 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11223 * we only take from the entropy pool if the address is not
11226 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11227 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11229 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11230 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11232 switch (net->ro._l_addr.sa.sa_family) {
11235 memcpy(hb->heartbeat.hb_info.address,
11236 &net->ro._l_addr.sin.sin_addr,
11237 sizeof(net->ro._l_addr.sin.sin_addr));
11242 memcpy(hb->heartbeat.hb_info.address,
11243 &net->ro._l_addr.sin6.sin6_addr,
11244 sizeof(net->ro._l_addr.sin6.sin6_addr));
11251 net->hb_responded = 0;
11252 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11253 stcb->asoc.ctrl_queue_cnt++;
11254 SCTP_STAT_INCR(sctps_sendheartbeat);
11259 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11262 struct sctp_association *asoc;
11263 struct sctp_ecne_chunk *ecne;
11264 struct sctp_tmit_chunk *chk;
11269 asoc = &stcb->asoc;
11270 SCTP_TCB_LOCK_ASSERT(stcb);
11271 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11272 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11273 /* found a previous ECN_ECHO update it if needed */
11274 uint32_t cnt, ctsn;
11276 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11277 ctsn = ntohl(ecne->tsn);
11278 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11279 ecne->tsn = htonl(high_tsn);
11280 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11282 cnt = ntohl(ecne->num_pkts_since_cwr);
11284 ecne->num_pkts_since_cwr = htonl(cnt);
11288 /* nope could not find one to update so we must build one */
11289 sctp_alloc_a_chunk(stcb, chk);
11293 chk->copy_by_ref = 0;
11294 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11295 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11296 chk->rec.chunk_id.can_take_data = 0;
11297 chk->asoc = &stcb->asoc;
11298 chk->send_size = sizeof(struct sctp_ecne_chunk);
11299 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11300 if (chk->data == NULL) {
11301 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11304 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11305 SCTP_BUF_LEN(chk->data) = chk->send_size;
11306 chk->sent = SCTP_DATAGRAM_UNSENT;
11307 chk->snd_count = 0;
11309 atomic_add_int(&chk->whoTo->ref_count, 1);
11311 stcb->asoc.ecn_echo_cnt_onq++;
11312 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11313 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11314 ecne->ch.chunk_flags = 0;
11315 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11316 ecne->tsn = htonl(high_tsn);
11317 ecne->num_pkts_since_cwr = htonl(1);
11318 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11319 asoc->ctrl_queue_cnt++;
11323 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11324 struct mbuf *m, int iphlen, int bad_crc)
11326 struct sctp_association *asoc;
11327 struct sctp_pktdrop_chunk *drp;
11328 struct sctp_tmit_chunk *chk;
11335 struct ip6_hdr *ip6h;
11338 int fullsz = 0, extra = 0;
11341 struct sctp_chunkhdr *ch, chunk_buf;
11342 unsigned int chk_length;
11347 asoc = &stcb->asoc;
11348 SCTP_TCB_LOCK_ASSERT(stcb);
11349 if (asoc->peer_supports_pktdrop == 0) {
11351 * peer must declare support before I send one.
11355 if (stcb->sctp_socket == NULL) {
11358 sctp_alloc_a_chunk(stcb, chk);
11362 chk->copy_by_ref = 0;
11363 iph = mtod(m, struct ip *);
11365 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11368 switch (iph->ip_v) {
11372 len = chk->send_size = iph->ip_len;
11376 case IPV6_VERSION >> 4:
11378 ip6h = mtod(m, struct ip6_hdr *);
11379 len = chk->send_size = htons(ip6h->ip6_plen);
11385 /* Validate that we do not have an ABORT in here. */
11386 offset = iphlen + sizeof(struct sctphdr);
11387 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11388 sizeof(*ch), (uint8_t *) & chunk_buf);
11389 while (ch != NULL) {
11390 chk_length = ntohs(ch->chunk_length);
11391 if (chk_length < sizeof(*ch)) {
11392 /* break to abort land */
11395 switch (ch->chunk_type) {
11396 case SCTP_PACKET_DROPPED:
11397 case SCTP_ABORT_ASSOCIATION:
11398 case SCTP_INITIATION_ACK:
11400 * We don't respond with an PKT-DROP to an ABORT
11401 * or PKT-DROP. We also do not respond to an
11402 * INIT-ACK, because we can't know if the initiation
11403 * tag is correct or not.
11405 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11410 offset += SCTP_SIZE32(chk_length);
11411 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11412 sizeof(*ch), (uint8_t *) & chunk_buf);
11415 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11416 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11418 * only send 1 mtu worth, trim off the excess on the end.
11420 fullsz = len - extra;
11421 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11424 chk->asoc = &stcb->asoc;
11425 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11426 if (chk->data == NULL) {
11428 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11431 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11432 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11434 sctp_m_freem(chk->data);
11438 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11439 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11440 chk->book_size_scale = 0;
11442 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11443 drp->trunc_len = htons(fullsz);
11445 * Len is already adjusted to size minus overhead above take
11446 * out the pkt_drop chunk itself from it.
11448 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11449 len = chk->send_size;
11451 /* no truncation needed */
11452 drp->ch.chunk_flags = 0;
11453 drp->trunc_len = htons(0);
11456 drp->ch.chunk_flags |= SCTP_BADCRC;
11458 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11459 SCTP_BUF_LEN(chk->data) = chk->send_size;
11460 chk->sent = SCTP_DATAGRAM_UNSENT;
11461 chk->snd_count = 0;
11463 /* we should hit here */
11465 atomic_add_int(&chk->whoTo->ref_count, 1);
11469 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11470 chk->rec.chunk_id.can_take_data = 1;
11471 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11472 drp->ch.chunk_length = htons(chk->send_size);
11473 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11477 drp->bottle_bw = htonl(spc);
11478 if (asoc->my_rwnd) {
11479 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11480 asoc->size_on_all_streams +
11481 asoc->my_rwnd_control_len +
11482 stcb->sctp_socket->so_rcv.sb_cc);
11485 * If my rwnd is 0, possibly from mbuf depletion as well as
11486 * space used, tell the peer there is NO space aka onq == bw
11488 drp->current_onq = htonl(spc);
11492 m_copydata(m, iphlen, len, (caddr_t)datap);
11493 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11494 asoc->ctrl_queue_cnt++;
11498 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11500 struct sctp_association *asoc;
11501 struct sctp_cwr_chunk *cwr;
11502 struct sctp_tmit_chunk *chk;
11504 asoc = &stcb->asoc;
11505 SCTP_TCB_LOCK_ASSERT(stcb);
11509 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11510 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11512 * found a previous CWR queued to same destination
11513 * update it if needed
11517 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11518 ctsn = ntohl(cwr->tsn);
11519 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11520 cwr->tsn = htonl(high_tsn);
11522 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11523 /* Make sure override is carried */
11524 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11529 sctp_alloc_a_chunk(stcb, chk);
11533 chk->copy_by_ref = 0;
11534 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11535 chk->rec.chunk_id.can_take_data = 1;
11536 chk->asoc = &stcb->asoc;
11537 chk->send_size = sizeof(struct sctp_cwr_chunk);
11538 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11539 if (chk->data == NULL) {
11540 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11543 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11544 SCTP_BUF_LEN(chk->data) = chk->send_size;
11545 chk->sent = SCTP_DATAGRAM_UNSENT;
11546 chk->snd_count = 0;
11548 atomic_add_int(&chk->whoTo->ref_count, 1);
11549 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11550 cwr->ch.chunk_type = SCTP_ECN_CWR;
11551 cwr->ch.chunk_flags = override;
11552 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11553 cwr->tsn = htonl(high_tsn);
11554 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11555 asoc->ctrl_queue_cnt++;
11559 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11560 int number_entries, uint16_t * list,
11561 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11563 int len, old_len, i;
11564 struct sctp_stream_reset_out_request *req_out;
11565 struct sctp_chunkhdr *ch;
11567 ch = mtod(chk->data, struct sctp_chunkhdr *);
11570 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11572 /* get to new offset for the param. */
11573 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11574 /* now how long will this param be? */
11575 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11576 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11577 req_out->ph.param_length = htons(len);
11578 req_out->request_seq = htonl(seq);
11579 req_out->response_seq = htonl(resp_seq);
11580 req_out->send_reset_at_tsn = htonl(last_sent);
11581 if (number_entries) {
11582 for (i = 0; i < number_entries; i++) {
11583 req_out->list_of_streams[i] = htons(list[i]);
11586 if (SCTP_SIZE32(len) > len) {
11588 * Need to worry about the pad we may end up adding to the
11589 * end. This is easy since the struct is either aligned to 4
11590 * bytes or 2 bytes off.
11592 req_out->list_of_streams[number_entries] = 0;
11594 /* now fix the chunk length */
11595 ch->chunk_length = htons(len + old_len);
11596 chk->book_size = len + old_len;
11597 chk->book_size_scale = 0;
11598 chk->send_size = SCTP_SIZE32(chk->book_size);
11599 SCTP_BUF_LEN(chk->data) = chk->send_size;
11605 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11606 int number_entries, uint16_t * list,
11609 int len, old_len, i;
11610 struct sctp_stream_reset_in_request *req_in;
11611 struct sctp_chunkhdr *ch;
11613 ch = mtod(chk->data, struct sctp_chunkhdr *);
11616 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11618 /* get to new offset for the param. */
11619 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11620 /* now how long will this param be? */
11621 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11622 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11623 req_in->ph.param_length = htons(len);
11624 req_in->request_seq = htonl(seq);
11625 if (number_entries) {
11626 for (i = 0; i < number_entries; i++) {
11627 req_in->list_of_streams[i] = htons(list[i]);
11630 if (SCTP_SIZE32(len) > len) {
11632 * Need to worry about the pad we may end up adding to the
11633 * end. This is easy since the struct is either aligned to 4
11634 * bytes or 2 bytes off.
11636 req_in->list_of_streams[number_entries] = 0;
11638 /* now fix the chunk length */
11639 ch->chunk_length = htons(len + old_len);
11640 chk->book_size = len + old_len;
11641 chk->book_size_scale = 0;
11642 chk->send_size = SCTP_SIZE32(chk->book_size);
11643 SCTP_BUF_LEN(chk->data) = chk->send_size;
11649 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11653 struct sctp_stream_reset_tsn_request *req_tsn;
11654 struct sctp_chunkhdr *ch;
11656 ch = mtod(chk->data, struct sctp_chunkhdr *);
11659 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11661 /* get to new offset for the param. */
11662 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11663 /* now how long will this param be? */
11664 len = sizeof(struct sctp_stream_reset_tsn_request);
11665 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11666 req_tsn->ph.param_length = htons(len);
11667 req_tsn->request_seq = htonl(seq);
11669 /* now fix the chunk length */
11670 ch->chunk_length = htons(len + old_len);
11671 chk->send_size = len + old_len;
11672 chk->book_size = SCTP_SIZE32(chk->send_size);
11673 chk->book_size_scale = 0;
11674 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11679 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11680 uint32_t resp_seq, uint32_t result)
11683 struct sctp_stream_reset_response *resp;
11684 struct sctp_chunkhdr *ch;
11686 ch = mtod(chk->data, struct sctp_chunkhdr *);
11689 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11691 /* get to new offset for the param. */
11692 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11693 /* now how long will this param be? */
11694 len = sizeof(struct sctp_stream_reset_response);
11695 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11696 resp->ph.param_length = htons(len);
11697 resp->response_seq = htonl(resp_seq);
11698 resp->result = ntohl(result);
11700 /* now fix the chunk length */
11701 ch->chunk_length = htons(len + old_len);
11702 chk->book_size = len + old_len;
11703 chk->book_size_scale = 0;
11704 chk->send_size = SCTP_SIZE32(chk->book_size);
11705 SCTP_BUF_LEN(chk->data) = chk->send_size;
11712 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11713 uint32_t resp_seq, uint32_t result,
11714 uint32_t send_una, uint32_t recv_next)
11717 struct sctp_stream_reset_response_tsn *resp;
11718 struct sctp_chunkhdr *ch;
11720 ch = mtod(chk->data, struct sctp_chunkhdr *);
11723 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11725 /* get to new offset for the param. */
11726 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11727 /* now how long will this param be? */
11728 len = sizeof(struct sctp_stream_reset_response_tsn);
11729 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11730 resp->ph.param_length = htons(len);
11731 resp->response_seq = htonl(resp_seq);
11732 resp->result = htonl(result);
11733 resp->senders_next_tsn = htonl(send_una);
11734 resp->receivers_next_tsn = htonl(recv_next);
11736 /* now fix the chunk length */
11737 ch->chunk_length = htons(len + old_len);
11738 chk->book_size = len + old_len;
11739 chk->send_size = SCTP_SIZE32(chk->book_size);
11740 chk->book_size_scale = 0;
11741 SCTP_BUF_LEN(chk->data) = chk->send_size;
11746 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11751 struct sctp_chunkhdr *ch;
11752 struct sctp_stream_reset_add_strm *addstr;
11754 ch = mtod(chk->data, struct sctp_chunkhdr *);
11755 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11757 /* get to new offset for the param. */
11758 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11759 /* now how long will this param be? */
11760 len = sizeof(struct sctp_stream_reset_add_strm);
11763 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11764 addstr->ph.param_length = htons(len);
11765 addstr->request_seq = htonl(seq);
11766 addstr->number_of_streams = htons(adding);
11767 addstr->reserved = 0;
11769 /* now fix the chunk length */
11770 ch->chunk_length = htons(len + old_len);
11771 chk->send_size = len + old_len;
11772 chk->book_size = SCTP_SIZE32(chk->send_size);
11773 chk->book_size_scale = 0;
11774 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11779 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11784 struct sctp_chunkhdr *ch;
11785 struct sctp_stream_reset_add_strm *addstr;
11787 ch = mtod(chk->data, struct sctp_chunkhdr *);
11788 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11790 /* get to new offset for the param. */
11791 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11792 /* now how long will this param be? */
11793 len = sizeof(struct sctp_stream_reset_add_strm);
11795 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11796 addstr->ph.param_length = htons(len);
11797 addstr->request_seq = htonl(seq);
11798 addstr->number_of_streams = htons(adding);
11799 addstr->reserved = 0;
11801 /* now fix the chunk length */
11802 ch->chunk_length = htons(len + old_len);
11803 chk->send_size = len + old_len;
11804 chk->book_size = SCTP_SIZE32(chk->send_size);
11805 chk->book_size_scale = 0;
11806 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11813 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11814 int number_entries, uint16_t * list,
11815 uint8_t send_out_req,
11816 uint8_t send_in_req,
11817 uint8_t send_tsn_req,
11818 uint8_t add_stream,
11820 uint16_t adding_i, uint8_t peer_asked
11824 struct sctp_association *asoc;
11825 struct sctp_tmit_chunk *chk;
11826 struct sctp_chunkhdr *ch;
11829 asoc = &stcb->asoc;
11830 if (asoc->stream_reset_outstanding) {
11832 * Already one pending, must get ACK back to clear the flag.
11834 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11837 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11838 (add_stream == 0)) {
11839 /* nothing to do */
11840 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11843 if (send_tsn_req && (send_out_req || send_in_req)) {
11844 /* error, can't do that */
11845 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11848 sctp_alloc_a_chunk(stcb, chk);
11850 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11853 chk->copy_by_ref = 0;
11854 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11855 chk->rec.chunk_id.can_take_data = 0;
11856 chk->asoc = &stcb->asoc;
11857 chk->book_size = sizeof(struct sctp_chunkhdr);
11858 chk->send_size = SCTP_SIZE32(chk->book_size);
11859 chk->book_size_scale = 0;
11861 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11862 if (chk->data == NULL) {
11863 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11864 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11867 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11869 /* setup chunk parameters */
11870 chk->sent = SCTP_DATAGRAM_UNSENT;
11871 chk->snd_count = 0;
11872 if (stcb->asoc.alternate) {
11873 chk->whoTo = stcb->asoc.alternate;
11875 chk->whoTo = stcb->asoc.primary_destination;
11877 atomic_add_int(&chk->whoTo->ref_count, 1);
11878 ch = mtod(chk->data, struct sctp_chunkhdr *);
11879 ch->chunk_type = SCTP_STREAM_RESET;
11880 ch->chunk_flags = 0;
11881 ch->chunk_length = htons(chk->book_size);
11882 SCTP_BUF_LEN(chk->data) = chk->send_size;
11884 seq = stcb->asoc.str_reset_seq_out;
11885 if (send_out_req) {
11886 sctp_add_stream_reset_out(chk, number_entries, list,
11887 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
11888 asoc->stream_reset_out_is_outstanding = 1;
11890 asoc->stream_reset_outstanding++;
11892 if ((add_stream & 1) &&
11893 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
11894 /* Need to allocate more */
11895 struct sctp_stream_out *oldstream;
11896 struct sctp_stream_queue_pending *sp, *nsp;
11899 oldstream = stcb->asoc.strmout;
11900 /* get some more */
11901 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
11902 ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
11904 if (stcb->asoc.strmout == NULL) {
11907 stcb->asoc.strmout = oldstream;
11908 /* Turn off the bit */
11909 x = add_stream & 0xfe;
11914 * Ok now we proceed with copying the old out stuff and
11915 * initializing the new stuff.
11917 SCTP_TCB_SEND_LOCK(stcb);
11918 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
11919 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11920 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11921 stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
11922 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
11923 stcb->asoc.strmout[i].stream_no = i;
11924 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
11925 /* now anything on those queues? */
11926 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
11927 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
11928 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
11930 /* Now move assoc pointers too */
11931 if (stcb->asoc.last_out_stream == &oldstream[i]) {
11932 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
11934 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
11935 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
11938 /* now the new streams */
11939 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
11940 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
11941 stcb->asoc.strmout[i].next_sequence_sent = 0x0;
11942 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11943 stcb->asoc.strmout[i].stream_no = i;
11944 stcb->asoc.strmout[i].last_msg_incomplete = 0;
11945 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
11947 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
11948 SCTP_FREE(oldstream, SCTP_M_STRMO);
11949 SCTP_TCB_SEND_UNLOCK(stcb);
11952 if ((add_stream & 1) && (adding_o > 0)) {
11953 asoc->strm_pending_add_size = adding_o;
11954 asoc->peer_req_out = peer_asked;
11955 sctp_add_an_out_stream(chk, seq, adding_o);
11957 asoc->stream_reset_outstanding++;
11959 if ((add_stream & 2) && (adding_i > 0)) {
11960 sctp_add_an_in_stream(chk, seq, adding_i);
11962 asoc->stream_reset_outstanding++;
11965 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11967 asoc->stream_reset_outstanding++;
11969 if (send_tsn_req) {
11970 sctp_add_stream_reset_tsn(chk, seq);
11971 asoc->stream_reset_outstanding++;
11973 asoc->str_reset = chk;
11974 /* insert the chunk for sending */
11975 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11978 asoc->ctrl_queue_cnt++;
11979 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11984 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11985 struct mbuf *cause,
11986 uint8_t use_mflowid, uint32_t mflowid,
11987 uint32_t vrf_id, uint16_t port)
11989 /* Don't respond to an ABORT with an ABORT. */
11990 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11992 sctp_m_freem(cause);
11995 sctp_send_resp_msg(m, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
11996 use_mflowid, mflowid,
12002 sctp_send_operr_to(struct mbuf *m, struct sctphdr *sh, uint32_t vtag,
12003 struct mbuf *cause,
12004 uint8_t use_mflowid, uint32_t mflowid,
12005 uint32_t vrf_id, uint16_t port)
12007 sctp_send_resp_msg(m, sh, vtag, SCTP_OPERATION_ERROR, cause,
12008 use_mflowid, mflowid,
12013 static struct mbuf *
12014 sctp_copy_resume(struct uio *uio,
12016 int user_marks_eor,
12019 struct mbuf **new_tail)
12023 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12024 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12026 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12029 *sndout = m_length(m, NULL);
12030 *new_tail = m_last(m);
12036 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12043 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12045 if (sp->data == NULL) {
12046 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12049 sp->tail_mbuf = m_last(sp->data);
12055 static struct sctp_stream_queue_pending *
12056 sctp_copy_it_in(struct sctp_tcb *stcb,
12057 struct sctp_association *asoc,
12058 struct sctp_sndrcvinfo *srcv,
12060 struct sctp_nets *net,
12062 int user_marks_eor,
12066 * This routine must be very careful in its work. Protocol
12067 * processing is up and running so care must be taken to spl...()
12068 * when you need to do something that may effect the stcb/asoc. The
12069 * sb is locked however. When data is copied the protocol processing
12070 * should be enabled since this is a slower operation...
12072 struct sctp_stream_queue_pending *sp = NULL;
12076 /* Now can we send this? */
12077 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12078 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12079 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12080 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12081 /* got data while shutting down */
12082 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12083 *error = ECONNRESET;
12086 sctp_alloc_a_strmoq(stcb, sp);
12088 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12093 sp->sender_all_done = 0;
12094 sp->sinfo_flags = srcv->sinfo_flags;
12095 sp->timetolive = srcv->sinfo_timetolive;
12096 sp->ppid = srcv->sinfo_ppid;
12097 sp->context = srcv->sinfo_context;
12099 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12101 sp->stream = srcv->sinfo_stream;
12102 sp->length = min(uio->uio_resid, max_send_len);
12103 if ((sp->length == (uint32_t) uio->uio_resid) &&
12104 ((user_marks_eor == 0) ||
12105 (srcv->sinfo_flags & SCTP_EOF) ||
12106 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12107 sp->msg_is_complete = 1;
12109 sp->msg_is_complete = 0;
12111 sp->sender_all_done = 0;
12112 sp->some_taken = 0;
12113 sp->put_last_out = 0;
12114 resv_in_first = sizeof(struct sctp_data_chunk);
12115 sp->data = sp->tail_mbuf = NULL;
12116 if (sp->length == 0) {
12120 if (srcv->sinfo_keynumber_valid) {
12121 sp->auth_keyid = srcv->sinfo_keynumber;
12123 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12125 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12126 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12127 sp->holds_key_ref = 1;
12129 *error = sctp_copy_one(sp, uio, resv_in_first);
12132 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12135 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12137 atomic_add_int(&sp->net->ref_count, 1);
12141 sctp_set_prsctp_policy(sp);
12149 sctp_sosend(struct socket *so,
12150 struct sockaddr *addr,
12153 struct mbuf *control,
12158 int error, use_sndinfo = 0;
12159 struct sctp_sndrcvinfo sndrcvninfo;
12160 struct sockaddr *addr_to_use;
12162 #if defined(INET) && defined(INET6)
12163 struct sockaddr_in sin;
12168 /* process cmsg snd/rcv info (maybe a assoc-id) */
12169 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12170 sizeof(sndrcvninfo))) {
12175 addr_to_use = addr;
12176 #if defined(INET) && defined(INET6)
12177 if ((addr) && (addr->sa_family == AF_INET6)) {
12178 struct sockaddr_in6 *sin6;
12180 sin6 = (struct sockaddr_in6 *)addr;
12181 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12182 in6_sin6_2_sin(&sin, sin6);
12183 addr_to_use = (struct sockaddr *)&sin;
12187 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12190 use_sndinfo ? &sndrcvninfo : NULL
12198 sctp_lower_sosend(struct socket *so,
12199 struct sockaddr *addr,
12201 struct mbuf *i_pak,
12202 struct mbuf *control,
12204 struct sctp_sndrcvinfo *srcv
12209 unsigned int sndlen = 0, max_len;
12211 struct mbuf *top = NULL;
12212 int queue_only = 0, queue_only_for_init = 0;
12213 int free_cnt_applied = 0;
12215 int now_filled = 0;
12216 unsigned int inqueue_bytes = 0;
12217 struct sctp_block_entry be;
12218 struct sctp_inpcb *inp;
12219 struct sctp_tcb *stcb = NULL;
12220 struct timeval now;
12221 struct sctp_nets *net;
12222 struct sctp_association *asoc;
12223 struct sctp_inpcb *t_inp;
12224 int user_marks_eor;
12225 int create_lock_applied = 0;
12226 int nagle_applies = 0;
12227 int some_on_control = 0;
12228 int got_all_of_the_send = 0;
12229 int hold_tcblock = 0;
12230 int non_blocking = 0;
12231 uint32_t local_add_more, local_soresv = 0;
12233 uint16_t sinfo_flags;
12234 sctp_assoc_t sinfo_assoc_id;
12241 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12243 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12246 SCTP_RELEASE_PKT(i_pak);
12250 if ((uio == NULL) && (i_pak == NULL)) {
12251 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12254 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12255 atomic_add_int(&inp->total_sends, 1);
12257 if (uio->uio_resid < 0) {
12258 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12261 sndlen = uio->uio_resid;
12263 top = SCTP_HEADER_TO_CHAIN(i_pak);
12264 sndlen = SCTP_HEADER_LEN(i_pak);
12266 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12269 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12270 (inp->sctp_socket->so_qlimit)) {
12271 /* The listener can NOT send */
12272 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12277 * Pre-screen address, if one is given the sin-len
12278 * must be set correctly!
12281 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12283 switch (raddr->sa.sa_family) {
12286 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12287 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12291 port = raddr->sin.sin_port;
12296 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12297 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12301 port = raddr->sin6.sin6_port;
12305 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12306 error = EAFNOSUPPORT;
12313 sinfo_flags = srcv->sinfo_flags;
12314 sinfo_assoc_id = srcv->sinfo_assoc_id;
12315 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12316 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12317 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12321 if (srcv->sinfo_flags)
12322 SCTP_STAT_INCR(sctps_sends_with_flags);
12324 sinfo_flags = inp->def_send.sinfo_flags;
12325 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12327 if (sinfo_flags & SCTP_SENDALL) {
12328 /* its a sendall */
12329 error = sctp_sendall(inp, uio, top, srcv);
12333 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12334 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12338 /* now we must find the assoc */
12339 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12340 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12341 SCTP_INP_RLOCK(inp);
12342 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12344 SCTP_TCB_LOCK(stcb);
12347 SCTP_INP_RUNLOCK(inp);
12348 } else if (sinfo_assoc_id) {
12349 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12352 * Since we did not use findep we must
12353 * increment it, and if we don't find a tcb
12356 SCTP_INP_WLOCK(inp);
12357 SCTP_INP_INCR_REF(inp);
12358 SCTP_INP_WUNLOCK(inp);
12359 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12360 if (stcb == NULL) {
12361 SCTP_INP_WLOCK(inp);
12362 SCTP_INP_DECR_REF(inp);
12363 SCTP_INP_WUNLOCK(inp);
12368 if ((stcb == NULL) && (addr)) {
12369 /* Possible implicit send? */
12370 SCTP_ASOC_CREATE_LOCK(inp);
12371 create_lock_applied = 1;
12372 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12373 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12374 /* Should I really unlock ? */
12375 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12380 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12381 (addr->sa_family == AF_INET6)) {
12382 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12386 SCTP_INP_WLOCK(inp);
12387 SCTP_INP_INCR_REF(inp);
12388 SCTP_INP_WUNLOCK(inp);
12389 /* With the lock applied look again */
12390 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12391 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12392 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12394 if (stcb == NULL) {
12395 SCTP_INP_WLOCK(inp);
12396 SCTP_INP_DECR_REF(inp);
12397 SCTP_INP_WUNLOCK(inp);
12404 if (t_inp != inp) {
12405 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12410 if (stcb == NULL) {
12411 if (addr == NULL) {
12412 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12416 /* We must go ahead and start the INIT process */
12419 if ((sinfo_flags & SCTP_ABORT) ||
12420 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12422 * User asks to abort a non-existant assoc,
12423 * or EOF a non-existant assoc with no data
12425 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12429 /* get an asoc/stcb struct */
12430 vrf_id = inp->def_vrf_id;
12432 if (create_lock_applied == 0) {
12433 panic("Error, should hold create lock and I don't?");
12436 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12439 if (stcb == NULL) {
12440 /* Error is setup for us in the call */
12443 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12444 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12446 * Set the connected flag so we can queue
12449 soisconnecting(so);
12452 if (create_lock_applied) {
12453 SCTP_ASOC_CREATE_UNLOCK(inp);
12454 create_lock_applied = 0;
12456 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12459 * Turn on queue only flag to prevent data from
12463 asoc = &stcb->asoc;
12464 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12465 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12467 /* initialize authentication params for the assoc */
12468 sctp_initialize_auth_params(inp, stcb);
12471 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12472 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12478 /* out with the INIT */
12479 queue_only_for_init = 1;
12481 * we may want to dig in after this call and adjust the MTU
12482 * value. It defaulted to 1500 (constant) but the ro
12483 * structure may now have an update and thus we may need to
12484 * change it BEFORE we append the message.
12488 asoc = &stcb->asoc;
12490 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12491 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12493 net = sctp_findnet(stcb, addr);
12496 if ((net == NULL) ||
12497 ((port != 0) && (port != stcb->rport))) {
12498 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12503 if (stcb->asoc.alternate) {
12504 net = stcb->asoc.alternate;
12506 net = stcb->asoc.primary_destination;
12509 atomic_add_int(&stcb->total_sends, 1);
12510 /* Keep the stcb from being freed under our feet */
12511 atomic_add_int(&asoc->refcnt, 1);
12512 free_cnt_applied = 1;
12514 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12515 if (sndlen > asoc->smallest_mtu) {
12516 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12521 if (SCTP_SO_IS_NBIO(so)
12522 || (flags & MSG_NBIO)
12526 /* would we block? */
12527 if (non_blocking) {
12528 if (hold_tcblock == 0) {
12529 SCTP_TCB_LOCK(stcb);
12532 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12533 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12534 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12535 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12536 if (sndlen > SCTP_SB_LIMIT_SND(so))
12539 error = EWOULDBLOCK;
12542 stcb->asoc.sb_send_resv += sndlen;
12543 SCTP_TCB_UNLOCK(stcb);
12546 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12548 local_soresv = sndlen;
12549 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12550 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12551 error = ECONNRESET;
12554 if (create_lock_applied) {
12555 SCTP_ASOC_CREATE_UNLOCK(inp);
12556 create_lock_applied = 0;
12558 if (asoc->stream_reset_outstanding) {
12560 * Can't queue any data while stream reset is underway.
12562 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12566 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12567 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12570 /* we are now done with all control */
12572 sctp_m_freem(control);
12575 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12576 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12577 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12578 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12579 if (srcv->sinfo_flags & SCTP_ABORT) {
12582 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12583 error = ECONNRESET;
12587 /* Ok, we will attempt a msgsnd :> */
12589 p->td_ru.ru_msgsnd++;
12591 /* Are we aborting? */
12592 if (srcv->sinfo_flags & SCTP_ABORT) {
12594 int tot_demand, tot_out = 0, max_out;
12596 SCTP_STAT_INCR(sctps_sends_with_abort);
12597 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12598 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12599 /* It has to be up before we abort */
12600 /* how big is the user initiated abort? */
12601 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12605 if (hold_tcblock) {
12606 SCTP_TCB_UNLOCK(stcb);
12610 struct mbuf *cntm = NULL;
12612 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAIT, 1, MT_DATA);
12614 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12615 tot_out += SCTP_BUF_LEN(cntm);
12619 /* Must fit in a MTU */
12621 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12622 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12624 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12628 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12631 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12635 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12636 max_out -= sizeof(struct sctp_abort_msg);
12637 if (tot_out > max_out) {
12641 struct sctp_paramhdr *ph;
12643 /* now move forward the data pointer */
12644 ph = mtod(mm, struct sctp_paramhdr *);
12645 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12646 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12648 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12650 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12653 * Here if we can't get his data we
12654 * still abort we just don't get to
12655 * send the users note :-0
12662 SCTP_BUF_NEXT(mm) = top;
12666 if (hold_tcblock == 0) {
12667 SCTP_TCB_LOCK(stcb);
12669 atomic_add_int(&stcb->asoc.refcnt, -1);
12670 free_cnt_applied = 0;
12671 /* release this lock, otherwise we hang on ourselves */
12672 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12673 /* now relock the stcb so everything is sane */
12677 * In this case top is already chained to mm avoid double
12678 * free, since we free it below if top != NULL and driver
12679 * would free it after sending the packet out
12686 /* Calculate the maximum we can send */
12687 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12688 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12689 if (non_blocking) {
12690 /* we already checked for non-blocking above. */
12693 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12698 if (hold_tcblock) {
12699 SCTP_TCB_UNLOCK(stcb);
12702 /* Is the stream no. valid? */
12703 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12704 /* Invalid stream number */
12705 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12709 if (asoc->strmout == NULL) {
12710 /* huh? software error */
12711 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12715 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12716 if ((user_marks_eor == 0) &&
12717 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12718 /* It will NEVER fit */
12719 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12723 if ((uio == NULL) && user_marks_eor) {
12725 * We do not support eeor mode for
12726 * sending with mbuf chains (like sendfile).
12728 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12732 if (user_marks_eor) {
12733 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12736 * For non-eeor the whole message must fit in
12737 * the socket send buffer.
12739 local_add_more = sndlen;
12742 if (non_blocking) {
12743 goto skip_preblock;
12745 if (((max_len <= local_add_more) &&
12746 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12748 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12749 /* No room right now ! */
12750 SOCKBUF_LOCK(&so->so_snd);
12751 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12752 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12753 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12754 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12755 (unsigned int)SCTP_SB_LIMIT_SND(so),
12758 stcb->asoc.stream_queue_cnt,
12759 stcb->asoc.chunks_on_out_queue,
12760 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12762 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
12765 stcb->block_entry = &be;
12766 error = sbwait(&so->so_snd);
12767 stcb->block_entry = NULL;
12768 if (error || so->so_error || be.error) {
12771 error = so->so_error;
12776 SOCKBUF_UNLOCK(&so->so_snd);
12779 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12780 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12781 asoc, stcb->asoc.total_output_queue_size);
12783 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12786 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12788 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12789 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12793 SOCKBUF_UNLOCK(&so->so_snd);
12796 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12800 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12801 * case NOTE: uio will be null when top/mbuf is passed
12804 if (srcv->sinfo_flags & SCTP_EOF) {
12805 got_all_of_the_send = 1;
12808 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12814 struct sctp_stream_queue_pending *sp;
12815 struct sctp_stream_out *strm;
12818 SCTP_TCB_SEND_LOCK(stcb);
12819 if ((asoc->stream_locked) &&
12820 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12821 SCTP_TCB_SEND_UNLOCK(stcb);
12822 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12826 SCTP_TCB_SEND_UNLOCK(stcb);
12828 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12829 if (strm->last_msg_incomplete == 0) {
12831 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
12832 if ((sp == NULL) || (error)) {
12835 SCTP_TCB_SEND_LOCK(stcb);
12836 if (sp->msg_is_complete) {
12837 strm->last_msg_incomplete = 0;
12838 asoc->stream_locked = 0;
12841 * Just got locked to this guy in case of an
12844 strm->last_msg_incomplete = 1;
12845 asoc->stream_locked = 1;
12846 asoc->stream_locked_on = srcv->sinfo_stream;
12847 sp->sender_all_done = 0;
12849 sctp_snd_sb_alloc(stcb, sp->length);
12850 atomic_add_int(&asoc->stream_queue_cnt, 1);
12851 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
12852 sp->strseq = strm->next_sequence_sent;
12853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
12854 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
12855 (uintptr_t) stcb, sp->length,
12856 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
12858 strm->next_sequence_sent++;
12860 SCTP_STAT_INCR(sctps_sends_with_unord);
12862 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12863 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
12864 SCTP_TCB_SEND_UNLOCK(stcb);
12866 SCTP_TCB_SEND_LOCK(stcb);
12867 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12868 SCTP_TCB_SEND_UNLOCK(stcb);
12870 /* ???? Huh ??? last msg is gone */
12872 panic("Warning: Last msg marked incomplete, yet nothing left?");
12874 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12875 strm->last_msg_incomplete = 0;
12881 while (uio->uio_resid > 0) {
12882 /* How much room do we have? */
12883 struct mbuf *new_tail, *mm;
12885 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12886 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12890 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12891 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12892 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
12895 if (hold_tcblock) {
12896 SCTP_TCB_UNLOCK(stcb);
12899 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
12900 if ((mm == NULL) || error) {
12906 /* Update the mbuf and count */
12907 SCTP_TCB_SEND_LOCK(stcb);
12908 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12910 * we need to get out. Peer probably
12914 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12915 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12916 error = ECONNRESET;
12918 SCTP_TCB_SEND_UNLOCK(stcb);
12921 if (sp->tail_mbuf) {
12922 /* tack it to the end */
12923 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12924 sp->tail_mbuf = new_tail;
12926 /* A stolen mbuf */
12928 sp->tail_mbuf = new_tail;
12930 sctp_snd_sb_alloc(stcb, sndout);
12931 atomic_add_int(&sp->length, sndout);
12934 /* Did we reach EOR? */
12935 if ((uio->uio_resid == 0) &&
12936 ((user_marks_eor == 0) ||
12937 (srcv->sinfo_flags & SCTP_EOF) ||
12938 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12939 sp->msg_is_complete = 1;
12941 sp->msg_is_complete = 0;
12943 SCTP_TCB_SEND_UNLOCK(stcb);
12945 if (uio->uio_resid == 0) {
12950 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12952 * This is ugly but we must assure locking
12955 if (hold_tcblock == 0) {
12956 SCTP_TCB_LOCK(stcb);
12959 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12960 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12961 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12962 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12968 SCTP_TCB_UNLOCK(stcb);
12971 /* wait for space now */
12972 if (non_blocking) {
12973 /* Non-blocking io in place out */
12976 /* What about the INIT, send it maybe */
12977 if (queue_only_for_init) {
12978 if (hold_tcblock == 0) {
12979 SCTP_TCB_LOCK(stcb);
12982 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
12983 /* a collision took us forward? */
12986 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
12987 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12991 if ((net->flight_size > net->cwnd) &&
12992 (asoc->sctp_cmt_on_off == 0)) {
12993 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12995 } else if (asoc->ifp_had_enobuf) {
12996 SCTP_STAT_INCR(sctps_ifnomemqueued);
12997 if (net->flight_size > (2 * net->mtu)) {
13000 asoc->ifp_had_enobuf = 0;
13002 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13003 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13004 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13005 (stcb->asoc.total_flight > 0) &&
13006 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13007 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13010 * Ok, Nagle is set on and we have data outstanding.
13011 * Don't send anything and let SACKs drive out the
13012 * data unless wen have a "full" segment to send.
13014 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13015 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13017 SCTP_STAT_INCR(sctps_naglequeued);
13020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13021 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13022 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13024 SCTP_STAT_INCR(sctps_naglesent);
13027 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13029 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13030 nagle_applies, un_sent);
13031 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13032 stcb->asoc.total_flight,
13033 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13035 if (queue_only_for_init)
13036 queue_only_for_init = 0;
13037 if ((queue_only == 0) && (nagle_applies == 0)) {
13039 * need to start chunk output
13040 * before blocking.. note that if
13041 * a lock is already applied, then
13042 * the input via the net is happening
13043 * and I don't need to start output :-D
13045 if (hold_tcblock == 0) {
13046 if (SCTP_TCB_TRYLOCK(stcb)) {
13048 sctp_chunk_output(inp,
13050 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13053 sctp_chunk_output(inp,
13055 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13057 if (hold_tcblock == 1) {
13058 SCTP_TCB_UNLOCK(stcb);
13062 SOCKBUF_LOCK(&so->so_snd);
13064 * This is a bit strange, but I think it will
13065 * work. The total_output_queue_size is locked and
13066 * protected by the TCB_LOCK, which we just released.
13067 * There is a race that can occur between releasing it
13068 * above, and me getting the socket lock, where sacks
13069 * come in but we have not put the SB_WAIT on the
13070 * so_snd buffer to get the wakeup. After the LOCK
13071 * is applied the sack_processing will also need to
13072 * LOCK the so->so_snd to do the actual sowwakeup(). So
13073 * once we have the socket buffer lock if we recheck the
13074 * size we KNOW we will get to sleep safely with the
13075 * wakeup flag in place.
13077 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13078 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13080 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13081 asoc, uio->uio_resid);
13084 stcb->block_entry = &be;
13085 error = sbwait(&so->so_snd);
13086 stcb->block_entry = NULL;
13088 if (error || so->so_error || be.error) {
13091 error = so->so_error;
13096 SOCKBUF_UNLOCK(&so->so_snd);
13099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13100 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13101 asoc, stcb->asoc.total_output_queue_size);
13104 SOCKBUF_UNLOCK(&so->so_snd);
13105 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13109 SCTP_TCB_SEND_LOCK(stcb);
13111 if (sp->msg_is_complete == 0) {
13112 strm->last_msg_incomplete = 1;
13113 asoc->stream_locked = 1;
13114 asoc->stream_locked_on = srcv->sinfo_stream;
13116 sp->sender_all_done = 1;
13117 strm->last_msg_incomplete = 0;
13118 asoc->stream_locked = 0;
13121 SCTP_PRINTF("Huh no sp TSNH?\n");
13122 strm->last_msg_incomplete = 0;
13123 asoc->stream_locked = 0;
13125 SCTP_TCB_SEND_UNLOCK(stcb);
13126 if (uio->uio_resid == 0) {
13127 got_all_of_the_send = 1;
13130 /* We send in a 0, since we do NOT have any locks */
13131 error = sctp_msg_append(stcb, net, top, srcv, 0);
13133 if (srcv->sinfo_flags & SCTP_EOF) {
13135 * This should only happen for Panda for the mbuf
13136 * send case, which does NOT yet support EEOR mode.
13137 * Thus, we can just set this flag to do the proper
13140 got_all_of_the_send = 1;
13148 if ((srcv->sinfo_flags & SCTP_EOF) &&
13149 (got_all_of_the_send == 1)) {
13152 SCTP_STAT_INCR(sctps_sends_with_eof);
13154 if (hold_tcblock == 0) {
13155 SCTP_TCB_LOCK(stcb);
13158 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13159 if (TAILQ_EMPTY(&asoc->send_queue) &&
13160 TAILQ_EMPTY(&asoc->sent_queue) &&
13162 if (asoc->locked_on_sending) {
13165 /* there is nothing queued to send, so I'm done... */
13166 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13167 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13168 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13169 struct sctp_nets *netp;
13171 if (stcb->asoc.alternate) {
13172 netp = stcb->asoc.alternate;
13174 netp = stcb->asoc.primary_destination;
13176 /* only send SHUTDOWN the first time through */
13177 sctp_send_shutdown(stcb, netp);
13178 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13179 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13181 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13182 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13183 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13185 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13186 asoc->primary_destination);
13190 * we still got (or just got) data to send, so set
13194 * XXX sockets draft says that SCTP_EOF should be
13195 * sent with no data. currently, we will allow user
13196 * data to be sent first and move to
13199 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13200 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13201 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13202 if (hold_tcblock == 0) {
13203 SCTP_TCB_LOCK(stcb);
13206 if (asoc->locked_on_sending) {
13207 /* Locked to send out the data */
13208 struct sctp_stream_queue_pending *sp;
13210 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13212 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13213 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13216 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13217 if (TAILQ_EMPTY(&asoc->send_queue) &&
13218 TAILQ_EMPTY(&asoc->sent_queue) &&
13219 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13221 if (free_cnt_applied) {
13222 atomic_add_int(&stcb->asoc.refcnt, -1);
13223 free_cnt_applied = 0;
13225 sctp_abort_an_association(stcb->sctp_ep, stcb,
13226 NULL, SCTP_SO_LOCKED);
13228 * now relock the stcb so everything
13235 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13236 asoc->primary_destination);
13237 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13242 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13243 some_on_control = 1;
13245 if (queue_only_for_init) {
13246 if (hold_tcblock == 0) {
13247 SCTP_TCB_LOCK(stcb);
13250 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13251 /* a collision took us forward? */
13254 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13255 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13259 if ((net->flight_size > net->cwnd) &&
13260 (stcb->asoc.sctp_cmt_on_off == 0)) {
13261 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13263 } else if (asoc->ifp_had_enobuf) {
13264 SCTP_STAT_INCR(sctps_ifnomemqueued);
13265 if (net->flight_size > (2 * net->mtu)) {
13268 asoc->ifp_had_enobuf = 0;
13270 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13271 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13272 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13273 (stcb->asoc.total_flight > 0) &&
13274 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13275 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13277 * Ok, Nagle is set on and we have data outstanding.
13278 * Don't send anything and let SACKs drive out the
13279 * data unless wen have a "full" segment to send.
13281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13282 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13284 SCTP_STAT_INCR(sctps_naglequeued);
13287 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13288 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13289 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13291 SCTP_STAT_INCR(sctps_naglesent);
13294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13295 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13296 nagle_applies, un_sent);
13297 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13298 stcb->asoc.total_flight,
13299 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13301 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13302 /* we can attempt to send too. */
13303 if (hold_tcblock == 0) {
13305 * If there is activity recv'ing sacks no need to
13308 if (SCTP_TCB_TRYLOCK(stcb)) {
13309 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13313 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13315 } else if ((queue_only == 0) &&
13316 (stcb->asoc.peers_rwnd == 0) &&
13317 (stcb->asoc.total_flight == 0)) {
13318 /* We get to have a probe outstanding */
13319 if (hold_tcblock == 0) {
13321 SCTP_TCB_LOCK(stcb);
13323 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13324 } else if (some_on_control) {
13325 int num_out, reason, frag_point;
13327 /* Here we do control only */
13328 if (hold_tcblock == 0) {
13330 SCTP_TCB_LOCK(stcb);
13332 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13333 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13334 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13336 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13337 queue_only, stcb->asoc.peers_rwnd, un_sent,
13338 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13339 stcb->asoc.total_output_queue_size, error);
13344 if (local_soresv && stcb) {
13345 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13347 if (create_lock_applied) {
13348 SCTP_ASOC_CREATE_UNLOCK(inp);
13350 if ((stcb) && hold_tcblock) {
13351 SCTP_TCB_UNLOCK(stcb);
13353 if (stcb && free_cnt_applied) {
13354 atomic_add_int(&stcb->asoc.refcnt, -1);
13358 if (mtx_owned(&stcb->tcb_mtx)) {
13359 panic("Leaving with tcb mtx owned?");
13361 if (mtx_owned(&stcb->tcb_send_mtx)) {
13362 panic("Leaving with tcb send mtx owned?");
13368 sctp_validate_no_locks(inp);
13370 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
13377 sctp_m_freem(control);
13384 * generate an AUTHentication chunk, if required
13387 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13388 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13389 struct sctp_tcb *stcb, uint8_t chunk)
13391 struct mbuf *m_auth;
13392 struct sctp_auth_chunk *auth;
13396 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13400 /* sysctl disabled auth? */
13401 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13404 /* peer doesn't do auth... */
13405 if (!stcb->asoc.peer_supports_auth) {
13408 /* does the requested chunk require auth? */
13409 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13412 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13413 if (m_auth == NULL) {
13417 /* reserve some space if this will be the first mbuf */
13419 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13420 /* fill in the AUTH chunk details */
13421 auth = mtod(m_auth, struct sctp_auth_chunk *);
13422 bzero(auth, sizeof(*auth));
13423 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13424 auth->ch.chunk_flags = 0;
13425 chunk_len = sizeof(*auth) +
13426 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13427 auth->ch.chunk_length = htons(chunk_len);
13428 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13429 /* key id and hmac digest will be computed and filled in upon send */
13431 /* save the offset where the auth was inserted into the chain */
13433 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13434 *offset += SCTP_BUF_LEN(cn);
13437 /* update length and return pointer to the auth chunk */
13438 SCTP_BUF_LEN(m_auth) = chunk_len;
13439 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13440 if (auth_ret != NULL)
13448 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13450 struct nd_prefix *pfx = NULL;
13451 struct nd_pfxrouter *pfxrtr = NULL;
13452 struct sockaddr_in6 gw6;
13454 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13457 /* get prefix entry of address */
13458 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13459 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13461 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13462 &src6->sin6_addr, &pfx->ndpr_mask))
13465 /* no prefix entry in the prefix list */
13467 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13468 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13471 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13472 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13474 /* search installed gateway from prefix entry */
13475 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13476 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13477 gw6.sin6_family = AF_INET6;
13478 gw6.sin6_len = sizeof(struct sockaddr_in6);
13479 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13480 sizeof(struct in6_addr));
13481 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13482 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13483 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13484 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13485 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13486 ro->ro_rt->rt_gateway)) {
13487 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13491 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13498 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13501 struct sockaddr_in *sin, *mask;
13502 struct ifaddr *ifa;
13503 struct in_addr srcnetaddr, gwnetaddr;
13505 if (ro == NULL || ro->ro_rt == NULL ||
13506 sifa->address.sa.sa_family != AF_INET) {
13509 ifa = (struct ifaddr *)sifa->ifa;
13510 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13511 sin = (struct sockaddr_in *)&sifa->address.sin;
13512 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13513 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13514 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13515 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13517 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13518 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13519 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13520 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13521 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13522 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {