2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <netinet/udp_var.h>
55 #include <machine/in_cksum.h>
59 #define SCTP_MAX_GAPS_INARRAY 4
61 uint8_t right_edge; /* mergable on the right edge */
62 uint8_t left_edge; /* mergable on the left edge */
65 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
68 struct sack_track sack_array[256] = {
69 {0, 0, 0, 0, /* 0x00 */
76 {1, 0, 1, 0, /* 0x01 */
83 {0, 0, 1, 0, /* 0x02 */
90 {1, 0, 1, 0, /* 0x03 */
97 {0, 0, 1, 0, /* 0x04 */
104 {1, 0, 2, 0, /* 0x05 */
111 {0, 0, 1, 0, /* 0x06 */
118 {1, 0, 1, 0, /* 0x07 */
125 {0, 0, 1, 0, /* 0x08 */
132 {1, 0, 2, 0, /* 0x09 */
139 {0, 0, 2, 0, /* 0x0a */
146 {1, 0, 2, 0, /* 0x0b */
153 {0, 0, 1, 0, /* 0x0c */
160 {1, 0, 2, 0, /* 0x0d */
167 {0, 0, 1, 0, /* 0x0e */
174 {1, 0, 1, 0, /* 0x0f */
181 {0, 0, 1, 0, /* 0x10 */
188 {1, 0, 2, 0, /* 0x11 */
195 {0, 0, 2, 0, /* 0x12 */
202 {1, 0, 2, 0, /* 0x13 */
209 {0, 0, 2, 0, /* 0x14 */
216 {1, 0, 3, 0, /* 0x15 */
223 {0, 0, 2, 0, /* 0x16 */
230 {1, 0, 2, 0, /* 0x17 */
237 {0, 0, 1, 0, /* 0x18 */
244 {1, 0, 2, 0, /* 0x19 */
251 {0, 0, 2, 0, /* 0x1a */
258 {1, 0, 2, 0, /* 0x1b */
265 {0, 0, 1, 0, /* 0x1c */
272 {1, 0, 2, 0, /* 0x1d */
279 {0, 0, 1, 0, /* 0x1e */
286 {1, 0, 1, 0, /* 0x1f */
293 {0, 0, 1, 0, /* 0x20 */
300 {1, 0, 2, 0, /* 0x21 */
307 {0, 0, 2, 0, /* 0x22 */
314 {1, 0, 2, 0, /* 0x23 */
321 {0, 0, 2, 0, /* 0x24 */
328 {1, 0, 3, 0, /* 0x25 */
335 {0, 0, 2, 0, /* 0x26 */
342 {1, 0, 2, 0, /* 0x27 */
349 {0, 0, 2, 0, /* 0x28 */
356 {1, 0, 3, 0, /* 0x29 */
363 {0, 0, 3, 0, /* 0x2a */
370 {1, 0, 3, 0, /* 0x2b */
377 {0, 0, 2, 0, /* 0x2c */
384 {1, 0, 3, 0, /* 0x2d */
391 {0, 0, 2, 0, /* 0x2e */
398 {1, 0, 2, 0, /* 0x2f */
405 {0, 0, 1, 0, /* 0x30 */
412 {1, 0, 2, 0, /* 0x31 */
419 {0, 0, 2, 0, /* 0x32 */
426 {1, 0, 2, 0, /* 0x33 */
433 {0, 0, 2, 0, /* 0x34 */
440 {1, 0, 3, 0, /* 0x35 */
447 {0, 0, 2, 0, /* 0x36 */
454 {1, 0, 2, 0, /* 0x37 */
461 {0, 0, 1, 0, /* 0x38 */
468 {1, 0, 2, 0, /* 0x39 */
475 {0, 0, 2, 0, /* 0x3a */
482 {1, 0, 2, 0, /* 0x3b */
489 {0, 0, 1, 0, /* 0x3c */
496 {1, 0, 2, 0, /* 0x3d */
503 {0, 0, 1, 0, /* 0x3e */
510 {1, 0, 1, 0, /* 0x3f */
517 {0, 0, 1, 0, /* 0x40 */
524 {1, 0, 2, 0, /* 0x41 */
531 {0, 0, 2, 0, /* 0x42 */
538 {1, 0, 2, 0, /* 0x43 */
545 {0, 0, 2, 0, /* 0x44 */
552 {1, 0, 3, 0, /* 0x45 */
559 {0, 0, 2, 0, /* 0x46 */
566 {1, 0, 2, 0, /* 0x47 */
573 {0, 0, 2, 0, /* 0x48 */
580 {1, 0, 3, 0, /* 0x49 */
587 {0, 0, 3, 0, /* 0x4a */
594 {1, 0, 3, 0, /* 0x4b */
601 {0, 0, 2, 0, /* 0x4c */
608 {1, 0, 3, 0, /* 0x4d */
615 {0, 0, 2, 0, /* 0x4e */
622 {1, 0, 2, 0, /* 0x4f */
629 {0, 0, 2, 0, /* 0x50 */
636 {1, 0, 3, 0, /* 0x51 */
643 {0, 0, 3, 0, /* 0x52 */
650 {1, 0, 3, 0, /* 0x53 */
657 {0, 0, 3, 0, /* 0x54 */
664 {1, 0, 4, 0, /* 0x55 */
671 {0, 0, 3, 0, /* 0x56 */
678 {1, 0, 3, 0, /* 0x57 */
685 {0, 0, 2, 0, /* 0x58 */
692 {1, 0, 3, 0, /* 0x59 */
699 {0, 0, 3, 0, /* 0x5a */
706 {1, 0, 3, 0, /* 0x5b */
713 {0, 0, 2, 0, /* 0x5c */
720 {1, 0, 3, 0, /* 0x5d */
727 {0, 0, 2, 0, /* 0x5e */
734 {1, 0, 2, 0, /* 0x5f */
741 {0, 0, 1, 0, /* 0x60 */
748 {1, 0, 2, 0, /* 0x61 */
755 {0, 0, 2, 0, /* 0x62 */
762 {1, 0, 2, 0, /* 0x63 */
769 {0, 0, 2, 0, /* 0x64 */
776 {1, 0, 3, 0, /* 0x65 */
783 {0, 0, 2, 0, /* 0x66 */
790 {1, 0, 2, 0, /* 0x67 */
797 {0, 0, 2, 0, /* 0x68 */
804 {1, 0, 3, 0, /* 0x69 */
811 {0, 0, 3, 0, /* 0x6a */
818 {1, 0, 3, 0, /* 0x6b */
825 {0, 0, 2, 0, /* 0x6c */
832 {1, 0, 3, 0, /* 0x6d */
839 {0, 0, 2, 0, /* 0x6e */
846 {1, 0, 2, 0, /* 0x6f */
853 {0, 0, 1, 0, /* 0x70 */
860 {1, 0, 2, 0, /* 0x71 */
867 {0, 0, 2, 0, /* 0x72 */
874 {1, 0, 2, 0, /* 0x73 */
881 {0, 0, 2, 0, /* 0x74 */
888 {1, 0, 3, 0, /* 0x75 */
895 {0, 0, 2, 0, /* 0x76 */
902 {1, 0, 2, 0, /* 0x77 */
909 {0, 0, 1, 0, /* 0x78 */
916 {1, 0, 2, 0, /* 0x79 */
923 {0, 0, 2, 0, /* 0x7a */
930 {1, 0, 2, 0, /* 0x7b */
937 {0, 0, 1, 0, /* 0x7c */
944 {1, 0, 2, 0, /* 0x7d */
951 {0, 0, 1, 0, /* 0x7e */
958 {1, 0, 1, 0, /* 0x7f */
965 {0, 1, 1, 0, /* 0x80 */
972 {1, 1, 2, 0, /* 0x81 */
979 {0, 1, 2, 0, /* 0x82 */
986 {1, 1, 2, 0, /* 0x83 */
993 {0, 1, 2, 0, /* 0x84 */
1000 {1, 1, 3, 0, /* 0x85 */
1007 {0, 1, 2, 0, /* 0x86 */
1014 {1, 1, 2, 0, /* 0x87 */
1021 {0, 1, 2, 0, /* 0x88 */
1028 {1, 1, 3, 0, /* 0x89 */
1035 {0, 1, 3, 0, /* 0x8a */
1042 {1, 1, 3, 0, /* 0x8b */
1049 {0, 1, 2, 0, /* 0x8c */
1056 {1, 1, 3, 0, /* 0x8d */
1063 {0, 1, 2, 0, /* 0x8e */
1070 {1, 1, 2, 0, /* 0x8f */
1077 {0, 1, 2, 0, /* 0x90 */
1084 {1, 1, 3, 0, /* 0x91 */
1091 {0, 1, 3, 0, /* 0x92 */
1098 {1, 1, 3, 0, /* 0x93 */
1105 {0, 1, 3, 0, /* 0x94 */
1112 {1, 1, 4, 0, /* 0x95 */
1119 {0, 1, 3, 0, /* 0x96 */
1126 {1, 1, 3, 0, /* 0x97 */
1133 {0, 1, 2, 0, /* 0x98 */
1140 {1, 1, 3, 0, /* 0x99 */
1147 {0, 1, 3, 0, /* 0x9a */
1154 {1, 1, 3, 0, /* 0x9b */
1161 {0, 1, 2, 0, /* 0x9c */
1168 {1, 1, 3, 0, /* 0x9d */
1175 {0, 1, 2, 0, /* 0x9e */
1182 {1, 1, 2, 0, /* 0x9f */
1189 {0, 1, 2, 0, /* 0xa0 */
1196 {1, 1, 3, 0, /* 0xa1 */
1203 {0, 1, 3, 0, /* 0xa2 */
1210 {1, 1, 3, 0, /* 0xa3 */
1217 {0, 1, 3, 0, /* 0xa4 */
1224 {1, 1, 4, 0, /* 0xa5 */
1231 {0, 1, 3, 0, /* 0xa6 */
1238 {1, 1, 3, 0, /* 0xa7 */
1245 {0, 1, 3, 0, /* 0xa8 */
1252 {1, 1, 4, 0, /* 0xa9 */
1259 {0, 1, 4, 0, /* 0xaa */
1266 {1, 1, 4, 0, /* 0xab */
1273 {0, 1, 3, 0, /* 0xac */
1280 {1, 1, 4, 0, /* 0xad */
1287 {0, 1, 3, 0, /* 0xae */
1294 {1, 1, 3, 0, /* 0xaf */
1301 {0, 1, 2, 0, /* 0xb0 */
1308 {1, 1, 3, 0, /* 0xb1 */
1315 {0, 1, 3, 0, /* 0xb2 */
1322 {1, 1, 3, 0, /* 0xb3 */
1329 {0, 1, 3, 0, /* 0xb4 */
1336 {1, 1, 4, 0, /* 0xb5 */
1343 {0, 1, 3, 0, /* 0xb6 */
1350 {1, 1, 3, 0, /* 0xb7 */
1357 {0, 1, 2, 0, /* 0xb8 */
1364 {1, 1, 3, 0, /* 0xb9 */
1371 {0, 1, 3, 0, /* 0xba */
1378 {1, 1, 3, 0, /* 0xbb */
1385 {0, 1, 2, 0, /* 0xbc */
1392 {1, 1, 3, 0, /* 0xbd */
1399 {0, 1, 2, 0, /* 0xbe */
1406 {1, 1, 2, 0, /* 0xbf */
1413 {0, 1, 1, 0, /* 0xc0 */
1420 {1, 1, 2, 0, /* 0xc1 */
1427 {0, 1, 2, 0, /* 0xc2 */
1434 {1, 1, 2, 0, /* 0xc3 */
1441 {0, 1, 2, 0, /* 0xc4 */
1448 {1, 1, 3, 0, /* 0xc5 */
1455 {0, 1, 2, 0, /* 0xc6 */
1462 {1, 1, 2, 0, /* 0xc7 */
1469 {0, 1, 2, 0, /* 0xc8 */
1476 {1, 1, 3, 0, /* 0xc9 */
1483 {0, 1, 3, 0, /* 0xca */
1490 {1, 1, 3, 0, /* 0xcb */
1497 {0, 1, 2, 0, /* 0xcc */
1504 {1, 1, 3, 0, /* 0xcd */
1511 {0, 1, 2, 0, /* 0xce */
1518 {1, 1, 2, 0, /* 0xcf */
1525 {0, 1, 2, 0, /* 0xd0 */
1532 {1, 1, 3, 0, /* 0xd1 */
1539 {0, 1, 3, 0, /* 0xd2 */
1546 {1, 1, 3, 0, /* 0xd3 */
1553 {0, 1, 3, 0, /* 0xd4 */
1560 {1, 1, 4, 0, /* 0xd5 */
1567 {0, 1, 3, 0, /* 0xd6 */
1574 {1, 1, 3, 0, /* 0xd7 */
1581 {0, 1, 2, 0, /* 0xd8 */
1588 {1, 1, 3, 0, /* 0xd9 */
1595 {0, 1, 3, 0, /* 0xda */
1602 {1, 1, 3, 0, /* 0xdb */
1609 {0, 1, 2, 0, /* 0xdc */
1616 {1, 1, 3, 0, /* 0xdd */
1623 {0, 1, 2, 0, /* 0xde */
1630 {1, 1, 2, 0, /* 0xdf */
1637 {0, 1, 1, 0, /* 0xe0 */
1644 {1, 1, 2, 0, /* 0xe1 */
1651 {0, 1, 2, 0, /* 0xe2 */
1658 {1, 1, 2, 0, /* 0xe3 */
1665 {0, 1, 2, 0, /* 0xe4 */
1672 {1, 1, 3, 0, /* 0xe5 */
1679 {0, 1, 2, 0, /* 0xe6 */
1686 {1, 1, 2, 0, /* 0xe7 */
1693 {0, 1, 2, 0, /* 0xe8 */
1700 {1, 1, 3, 0, /* 0xe9 */
1707 {0, 1, 3, 0, /* 0xea */
1714 {1, 1, 3, 0, /* 0xeb */
1721 {0, 1, 2, 0, /* 0xec */
1728 {1, 1, 3, 0, /* 0xed */
1735 {0, 1, 2, 0, /* 0xee */
1742 {1, 1, 2, 0, /* 0xef */
1749 {0, 1, 1, 0, /* 0xf0 */
1756 {1, 1, 2, 0, /* 0xf1 */
1763 {0, 1, 2, 0, /* 0xf2 */
1770 {1, 1, 2, 0, /* 0xf3 */
1777 {0, 1, 2, 0, /* 0xf4 */
1784 {1, 1, 3, 0, /* 0xf5 */
1791 {0, 1, 2, 0, /* 0xf6 */
1798 {1, 1, 2, 0, /* 0xf7 */
1805 {0, 1, 1, 0, /* 0xf8 */
1812 {1, 1, 2, 0, /* 0xf9 */
1819 {0, 1, 2, 0, /* 0xfa */
1826 {1, 1, 2, 0, /* 0xfb */
1833 {0, 1, 1, 0, /* 0xfc */
1840 {1, 1, 2, 0, /* 0xfd */
1847 {0, 1, 1, 0, /* 0xfe */
1854 {1, 1, 1, 0, /* 0xff */
1865 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1866 int ipv4_addr_legal,
1867 int ipv6_addr_legal,
1869 int ipv4_local_scope,
1870 int local_scope SCTP_UNUSED,/* XXX */
1874 if ((loopback_scope == 0) &&
1875 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1877 * skip loopback if not in scope *
1881 switch (ifa->address.sa.sa_family) {
1884 if (ipv4_addr_legal) {
1885 struct sockaddr_in *sin;
1887 sin = (struct sockaddr_in *)&ifa->address.sin;
1888 if (sin->sin_addr.s_addr == 0) {
1889 /* not in scope , unspecified */
1892 if ((ipv4_local_scope == 0) &&
1893 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1894 /* private address not in scope */
1904 if (ipv6_addr_legal) {
1905 struct sockaddr_in6 *sin6;
1908 * Must update the flags, bummer, which means any
1909 * IFA locks must now be applied HERE <->
1912 sctp_gather_internal_ifa_flags(ifa);
1914 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1917 /* ok to use deprecated addresses? */
1918 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1919 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1920 /* skip unspecifed addresses */
1923 if ( /* (local_scope == 0) && */
1924 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1927 if ((site_scope == 0) &&
1928 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1942 static struct mbuf *
1943 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t * len)
1945 struct sctp_paramhdr *parmh;
1949 switch (ifa->address.sa.sa_family) {
1952 plen = (uint16_t) sizeof(struct sctp_ipv4addr_param);
1957 plen = (uint16_t) sizeof(struct sctp_ipv6addr_param);
1963 if (M_TRAILINGSPACE(m) >= plen) {
1964 /* easy side we just drop it on the end */
1965 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1968 /* Need more space */
1970 while (SCTP_BUF_NEXT(mret) != NULL) {
1971 mret = SCTP_BUF_NEXT(mret);
1973 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_DONTWAIT, 1, MT_DATA);
1974 if (SCTP_BUF_NEXT(mret) == NULL) {
1975 /* We are hosed, can't add more addresses */
1978 mret = SCTP_BUF_NEXT(mret);
1979 parmh = mtod(mret, struct sctp_paramhdr *);
1981 /* now add the parameter */
1982 switch (ifa->address.sa.sa_family) {
1986 struct sctp_ipv4addr_param *ipv4p;
1987 struct sockaddr_in *sin;
1989 sin = (struct sockaddr_in *)&ifa->address.sin;
1990 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1991 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1992 parmh->param_length = htons(plen);
1993 ipv4p->addr = sin->sin_addr.s_addr;
1994 SCTP_BUF_LEN(mret) += plen;
2001 struct sctp_ipv6addr_param *ipv6p;
2002 struct sockaddr_in6 *sin6;
2004 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2005 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2006 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2007 parmh->param_length = htons(plen);
2008 memcpy(ipv6p->addr, &sin6->sin6_addr,
2009 sizeof(ipv6p->addr));
2010 /* clear embedded scope in the address */
2011 in6_clearscope((struct in6_addr *)ipv6p->addr);
2012 SCTP_BUF_LEN(mret) += plen;
2027 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2028 struct sctp_scoping *scope,
2029 struct mbuf *m_at, int cnt_inits_to,
2030 uint16_t * padding_len, uint16_t * chunk_len)
2032 struct sctp_vrf *vrf = NULL;
2033 int cnt, limit_out = 0, total_count;
2036 vrf_id = inp->def_vrf_id;
2037 SCTP_IPI_ADDR_RLOCK();
2038 vrf = sctp_find_vrf(vrf_id);
2040 SCTP_IPI_ADDR_RUNLOCK();
2043 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2044 struct sctp_ifa *sctp_ifap;
2045 struct sctp_ifn *sctp_ifnp;
2048 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2050 cnt = SCTP_ADDRESS_LIMIT;
2053 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2054 if ((scope->loopback_scope == 0) &&
2055 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2057 * Skip loopback devices if loopback_scope
2062 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2063 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2066 if (sctp_is_address_in_scope(sctp_ifap,
2067 scope->ipv4_addr_legal,
2068 scope->ipv6_addr_legal,
2069 scope->loopback_scope,
2070 scope->ipv4_local_scope,
2072 scope->site_scope, 1) == 0) {
2076 if (cnt > SCTP_ADDRESS_LIMIT) {
2080 if (cnt > SCTP_ADDRESS_LIMIT) {
2087 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2089 if ((scope->loopback_scope == 0) &&
2090 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2092 * Skip loopback devices if
2093 * loopback_scope not set
2097 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2098 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2101 if (sctp_is_address_in_scope(sctp_ifap,
2102 scope->ipv4_addr_legal,
2103 scope->ipv6_addr_legal,
2104 scope->loopback_scope,
2105 scope->ipv4_local_scope,
2107 scope->site_scope, 0) == 0) {
2110 if ((chunk_len != NULL) &&
2111 (padding_len != NULL) &&
2112 (*padding_len > 0)) {
2113 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2114 SCTP_BUF_LEN(m_at) += *padding_len;
2115 *chunk_len += *padding_len;
2118 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2129 if (total_count > SCTP_ADDRESS_LIMIT) {
2130 /* No more addresses */
2138 struct sctp_laddr *laddr;
2141 /* First, how many ? */
2142 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2143 if (laddr->ifa == NULL) {
2146 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2148 * Address being deleted by the system, dont
2152 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2154 * Address being deleted on this ep don't
2159 if (sctp_is_address_in_scope(laddr->ifa,
2160 scope->ipv4_addr_legal,
2161 scope->ipv6_addr_legal,
2162 scope->loopback_scope,
2163 scope->ipv4_local_scope,
2165 scope->site_scope, 1) == 0) {
2171 * To get through a NAT we only list addresses if we have
2172 * more than one. That way if you just bind a single address
2173 * we let the source of the init dictate our address.
2177 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2178 if (laddr->ifa == NULL) {
2181 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2184 if (sctp_is_address_in_scope(laddr->ifa,
2185 scope->ipv4_addr_legal,
2186 scope->ipv6_addr_legal,
2187 scope->loopback_scope,
2188 scope->ipv4_local_scope,
2190 scope->site_scope, 0) == 0) {
2193 if ((chunk_len != NULL) &&
2194 (padding_len != NULL) &&
2195 (*padding_len > 0)) {
2196 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2197 SCTP_BUF_LEN(m_at) += *padding_len;
2198 *chunk_len += *padding_len;
2201 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2203 if (cnt >= SCTP_ADDRESS_LIMIT) {
2209 SCTP_IPI_ADDR_RUNLOCK();
2213 static struct sctp_ifa *
2214 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2215 uint8_t dest_is_loop,
2216 uint8_t dest_is_priv,
2219 uint8_t dest_is_global = 0;
2221 /* dest_is_priv is true if destination is a private address */
2222 /* dest_is_loop is true if destination is a loopback addresses */
2225 * Here we determine if its a preferred address. A preferred address
2226 * means it is the same scope or higher scope then the destination.
2227 * L = loopback, P = private, G = global
2228 * -----------------------------------------
2229 * src | dest | result
2230 * ----------------------------------------
2232 * -----------------------------------------
2233 * P | L | yes-v4 no-v6
2234 * -----------------------------------------
2235 * G | L | yes-v4 no-v6
2236 * -----------------------------------------
2238 * -----------------------------------------
2240 * -----------------------------------------
2242 * -----------------------------------------
2244 * -----------------------------------------
2246 * -----------------------------------------
2248 * -----------------------------------------
2251 if (ifa->address.sa.sa_family != fam) {
2252 /* forget mis-matched family */
2255 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2258 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2259 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2260 /* Ok the address may be ok */
2262 if (fam == AF_INET6) {
2263 /* ok to use deprecated addresses? no lets not! */
2264 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2265 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2268 if (ifa->src_is_priv && !ifa->src_is_loop) {
2270 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2274 if (ifa->src_is_glob) {
2276 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2283 * Now that we know what is what, implement or table this could in
2284 * theory be done slicker (it used to be), but this is
2285 * straightforward and easier to validate :-)
2287 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2288 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2289 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2290 dest_is_loop, dest_is_priv, dest_is_global);
2292 if ((ifa->src_is_loop) && (dest_is_priv)) {
2293 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2296 if ((ifa->src_is_glob) && (dest_is_priv)) {
2297 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2300 if ((ifa->src_is_loop) && (dest_is_global)) {
2301 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2304 if ((ifa->src_is_priv) && (dest_is_global)) {
2305 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2308 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2309 /* its a preferred address */
2313 static struct sctp_ifa *
2314 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2315 uint8_t dest_is_loop,
2316 uint8_t dest_is_priv,
2319 uint8_t dest_is_global = 0;
2322 * Here we determine if its a acceptable address. A acceptable
2323 * address means it is the same scope or higher scope but we can
2324 * allow for NAT which means its ok to have a global dest and a
2327 * L = loopback, P = private, G = global
2328 * -----------------------------------------
2329 * src | dest | result
2330 * -----------------------------------------
2332 * -----------------------------------------
2333 * P | L | yes-v4 no-v6
2334 * -----------------------------------------
2336 * -----------------------------------------
2338 * -----------------------------------------
2340 * -----------------------------------------
2341 * G | P | yes - May not work
2342 * -----------------------------------------
2344 * -----------------------------------------
2345 * P | G | yes - May not work
2346 * -----------------------------------------
2348 * -----------------------------------------
2351 if (ifa->address.sa.sa_family != fam) {
2352 /* forget non matching family */
2353 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2354 ifa->address.sa.sa_family, fam);
2357 /* Ok the address may be ok */
2358 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2359 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2360 dest_is_loop, dest_is_priv);
2361 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2365 if (fam == AF_INET6) {
2366 /* ok to use deprecated addresses? */
2367 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2370 if (ifa->src_is_priv) {
2371 /* Special case, linklocal to loop */
2378 * Now that we know what is what, implement our table. This could in
2379 * theory be done slicker (it used to be), but this is
2380 * straightforward and easier to validate :-)
2382 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2385 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2388 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2391 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2394 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2395 /* its an acceptable address */
2400 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2402 struct sctp_laddr *laddr;
2405 /* There are no restrictions, no TCB :-) */
2408 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2409 if (laddr->ifa == NULL) {
2410 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2414 if (laddr->ifa == ifa) {
2415 /* Yes it is on the list */
2424 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2426 struct sctp_laddr *laddr;
2430 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2431 if (laddr->ifa == NULL) {
2432 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2436 if ((laddr->ifa == ifa) && laddr->action == 0)
2445 static struct sctp_ifa *
2446 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2449 int non_asoc_addr_ok,
2450 uint8_t dest_is_priv,
2451 uint8_t dest_is_loop,
2454 struct sctp_laddr *laddr, *starting_point;
2457 struct sctp_ifn *sctp_ifn;
2458 struct sctp_ifa *sctp_ifa, *sifa;
2459 struct sctp_vrf *vrf;
2462 vrf = sctp_find_vrf(vrf_id);
2466 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2467 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2468 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2470 * first question, is the ifn we will emit on in our list, if so, we
2471 * want such an address. Note that we first looked for a preferred
2475 /* is a preferred one on the interface we route out? */
2476 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2477 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2478 (non_asoc_addr_ok == 0))
2480 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2485 if (sctp_is_addr_in_ep(inp, sifa)) {
2486 atomic_add_int(&sifa->refcount, 1);
2492 * ok, now we now need to find one on the list of the addresses. We
2493 * can't get one on the emitting interface so let's find first a
2494 * preferred one. If not that an acceptable one otherwise... we
2497 starting_point = inp->next_addr_touse;
2499 if (inp->next_addr_touse == NULL) {
2500 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2503 for (laddr = inp->next_addr_touse; laddr;
2504 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2505 if (laddr->ifa == NULL) {
2506 /* address has been removed */
2509 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2510 /* address is being deleted */
2513 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2517 atomic_add_int(&sifa->refcount, 1);
2520 if (resettotop == 0) {
2521 inp->next_addr_touse = NULL;
2524 inp->next_addr_touse = starting_point;
2527 if (inp->next_addr_touse == NULL) {
2528 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2531 /* ok, what about an acceptable address in the inp */
2532 for (laddr = inp->next_addr_touse; laddr;
2533 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2534 if (laddr->ifa == NULL) {
2535 /* address has been removed */
2538 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2539 /* address is being deleted */
2542 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2546 atomic_add_int(&sifa->refcount, 1);
2549 if (resettotop == 0) {
2550 inp->next_addr_touse = NULL;
2551 goto once_again_too;
2554 * no address bound can be a source for the destination we are in
2562 static struct sctp_ifa *
2563 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2564 struct sctp_tcb *stcb,
2567 uint8_t dest_is_priv,
2568 uint8_t dest_is_loop,
2569 int non_asoc_addr_ok,
2572 struct sctp_laddr *laddr, *starting_point;
2574 struct sctp_ifn *sctp_ifn;
2575 struct sctp_ifa *sctp_ifa, *sifa;
2576 uint8_t start_at_beginning = 0;
2577 struct sctp_vrf *vrf;
2581 * first question, is the ifn we will emit on in our list, if so, we
2584 vrf = sctp_find_vrf(vrf_id);
2588 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2589 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2590 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2593 * first question, is the ifn we will emit on in our list? If so,
2594 * we want that one. First we look for a preferred. Second, we go
2595 * for an acceptable.
2598 /* first try for a preferred address on the ep */
2599 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2600 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2602 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2603 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2606 if (((non_asoc_addr_ok == 0) &&
2607 (sctp_is_addr_restricted(stcb, sifa))) ||
2608 (non_asoc_addr_ok &&
2609 (sctp_is_addr_restricted(stcb, sifa)) &&
2610 (!sctp_is_addr_pending(stcb, sifa)))) {
2611 /* on the no-no list */
2614 atomic_add_int(&sifa->refcount, 1);
2618 /* next try for an acceptable address on the ep */
2619 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2620 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2622 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2623 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2626 if (((non_asoc_addr_ok == 0) &&
2627 (sctp_is_addr_restricted(stcb, sifa))) ||
2628 (non_asoc_addr_ok &&
2629 (sctp_is_addr_restricted(stcb, sifa)) &&
2630 (!sctp_is_addr_pending(stcb, sifa)))) {
2631 /* on the no-no list */
2634 atomic_add_int(&sifa->refcount, 1);
2641 * if we can't find one like that then we must look at all addresses
2642 * bound to pick one at first preferable then secondly acceptable.
2644 starting_point = stcb->asoc.last_used_address;
2646 if (stcb->asoc.last_used_address == NULL) {
2647 start_at_beginning = 1;
2648 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2650 /* search beginning with the last used address */
2651 for (laddr = stcb->asoc.last_used_address; laddr;
2652 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2653 if (laddr->ifa == NULL) {
2654 /* address has been removed */
2657 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2658 /* address is being deleted */
2661 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2664 if (((non_asoc_addr_ok == 0) &&
2665 (sctp_is_addr_restricted(stcb, sifa))) ||
2666 (non_asoc_addr_ok &&
2667 (sctp_is_addr_restricted(stcb, sifa)) &&
2668 (!sctp_is_addr_pending(stcb, sifa)))) {
2669 /* on the no-no list */
2672 stcb->asoc.last_used_address = laddr;
2673 atomic_add_int(&sifa->refcount, 1);
2676 if (start_at_beginning == 0) {
2677 stcb->asoc.last_used_address = NULL;
2678 goto sctp_from_the_top;
2680 /* now try for any higher scope than the destination */
2681 stcb->asoc.last_used_address = starting_point;
2682 start_at_beginning = 0;
2684 if (stcb->asoc.last_used_address == NULL) {
2685 start_at_beginning = 1;
2686 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2688 /* search beginning with the last used address */
2689 for (laddr = stcb->asoc.last_used_address; laddr;
2690 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2691 if (laddr->ifa == NULL) {
2692 /* address has been removed */
2695 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2696 /* address is being deleted */
2699 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2703 if (((non_asoc_addr_ok == 0) &&
2704 (sctp_is_addr_restricted(stcb, sifa))) ||
2705 (non_asoc_addr_ok &&
2706 (sctp_is_addr_restricted(stcb, sifa)) &&
2707 (!sctp_is_addr_pending(stcb, sifa)))) {
2708 /* on the no-no list */
2711 stcb->asoc.last_used_address = laddr;
2712 atomic_add_int(&sifa->refcount, 1);
2715 if (start_at_beginning == 0) {
2716 stcb->asoc.last_used_address = NULL;
2717 goto sctp_from_the_top2;
2722 static struct sctp_ifa *
2723 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2724 struct sctp_tcb *stcb,
2725 int non_asoc_addr_ok,
2726 uint8_t dest_is_loop,
2727 uint8_t dest_is_priv,
2733 struct sctp_ifa *ifa, *sifa;
2734 int num_eligible_addr = 0;
2737 struct sockaddr_in6 sin6, lsa6;
2739 if (fam == AF_INET6) {
2740 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2741 (void)sa6_recoverscope(&sin6);
2744 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2745 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2746 (non_asoc_addr_ok == 0))
2748 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2753 if (fam == AF_INET6 &&
2755 sifa->src_is_loop && sifa->src_is_priv) {
2757 * don't allow fe80::1 to be a src on loop ::1, we
2758 * don't list it to the peer so we will get an
2763 if (fam == AF_INET6 &&
2764 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2765 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2767 * link-local <-> link-local must belong to the same
2770 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2771 (void)sa6_recoverscope(&lsa6);
2772 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2779 * Check if the IPv6 address matches to next-hop. In the
2780 * mobile case, old IPv6 address may be not deleted from the
2781 * interface. Then, the interface has previous and new
2782 * addresses. We should use one corresponding to the
2783 * next-hop. (by micchie)
2786 if (stcb && fam == AF_INET6 &&
2787 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2788 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2795 /* Avoid topologically incorrect IPv4 address */
2796 if (stcb && fam == AF_INET &&
2797 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2798 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2804 if (sctp_is_address_in_scope(ifa,
2805 stcb->asoc.ipv4_addr_legal,
2806 stcb->asoc.ipv6_addr_legal,
2807 stcb->asoc.loopback_scope,
2808 stcb->asoc.ipv4_local_scope,
2809 stcb->asoc.local_scope,
2810 stcb->asoc.site_scope, 0) == 0) {
2813 if (((non_asoc_addr_ok == 0) &&
2814 (sctp_is_addr_restricted(stcb, sifa))) ||
2815 (non_asoc_addr_ok &&
2816 (sctp_is_addr_restricted(stcb, sifa)) &&
2817 (!sctp_is_addr_pending(stcb, sifa)))) {
2819 * It is restricted for some reason..
2820 * probably not yet added.
2825 if (num_eligible_addr >= addr_wanted) {
2828 num_eligible_addr++;
2835 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2836 struct sctp_tcb *stcb,
2837 int non_asoc_addr_ok,
2838 uint8_t dest_is_loop,
2839 uint8_t dest_is_priv,
2842 struct sctp_ifa *ifa, *sifa;
2843 int num_eligible_addr = 0;
2845 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2846 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2847 (non_asoc_addr_ok == 0)) {
2850 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2856 if (sctp_is_address_in_scope(ifa,
2857 stcb->asoc.ipv4_addr_legal,
2858 stcb->asoc.ipv6_addr_legal,
2859 stcb->asoc.loopback_scope,
2860 stcb->asoc.ipv4_local_scope,
2861 stcb->asoc.local_scope,
2862 stcb->asoc.site_scope, 0) == 0) {
2865 if (((non_asoc_addr_ok == 0) &&
2866 (sctp_is_addr_restricted(stcb, sifa))) ||
2867 (non_asoc_addr_ok &&
2868 (sctp_is_addr_restricted(stcb, sifa)) &&
2869 (!sctp_is_addr_pending(stcb, sifa)))) {
2871 * It is restricted for some reason..
2872 * probably not yet added.
2877 num_eligible_addr++;
2879 return (num_eligible_addr);
2882 static struct sctp_ifa *
2883 sctp_choose_boundall(struct sctp_tcb *stcb,
2884 struct sctp_nets *net,
2887 uint8_t dest_is_priv,
2888 uint8_t dest_is_loop,
2889 int non_asoc_addr_ok,
2892 int cur_addr_num = 0, num_preferred = 0;
2894 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2895 struct sctp_ifa *sctp_ifa, *sifa;
2897 struct sctp_vrf *vrf;
2905 * For boundall we can use any address in the association.
2906 * If non_asoc_addr_ok is set we can use any address (at least in
2907 * theory). So we look for preferred addresses first. If we find one,
2908 * we use it. Otherwise we next try to get an address on the
2909 * interface, which we should be able to do (unless non_asoc_addr_ok
2910 * is false and we are routed out that way). In these cases where we
2911 * can't use the address of the interface we go through all the
2912 * ifn's looking for an address we can use and fill that in. Punting
2913 * means we send back address 0, which will probably cause problems
2914 * actually since then IP will fill in the address of the route ifn,
2915 * which means we probably already rejected it.. i.e. here comes an
2918 vrf = sctp_find_vrf(vrf_id);
2922 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2923 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2924 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2925 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2926 if (sctp_ifn == NULL) {
2927 /* ?? We don't have this guy ?? */
2928 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2929 goto bound_all_plan_b;
2931 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2932 ifn_index, sctp_ifn->ifn_name);
2935 cur_addr_num = net->indx_of_eligible_next_to_use;
2937 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2942 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2943 num_preferred, sctp_ifn->ifn_name);
2944 if (num_preferred == 0) {
2946 * no eligible addresses, we must use some other interface
2947 * address if we can find one.
2949 goto bound_all_plan_b;
2952 * Ok we have num_eligible_addr set with how many we can use, this
2953 * may vary from call to call due to addresses being deprecated
2956 if (cur_addr_num >= num_preferred) {
2960 * select the nth address from the list (where cur_addr_num is the
2961 * nth) and 0 is the first one, 1 is the second one etc...
2963 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2965 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2966 dest_is_priv, cur_addr_num, fam, ro);
2968 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2970 atomic_add_int(&sctp_ifa->refcount, 1);
2972 /* save off where the next one we will want */
2973 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2978 * plan_b: Look at all interfaces and find a preferred address. If
2979 * no preferred fall through to plan_c.
2982 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2983 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2984 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2985 sctp_ifn->ifn_name);
2986 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2987 /* wrong base scope */
2988 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2991 if ((sctp_ifn == looked_at) && looked_at) {
2992 /* already looked at this guy */
2993 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2996 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2997 dest_is_loop, dest_is_priv, fam);
2998 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2999 "Found ifn:%p %d preferred source addresses\n",
3000 ifn, num_preferred);
3001 if (num_preferred == 0) {
3002 /* None on this interface. */
3003 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
3006 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3007 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3008 num_preferred, (void *)sctp_ifn, cur_addr_num);
3011 * Ok we have num_eligible_addr set with how many we can
3012 * use, this may vary from call to call due to addresses
3013 * being deprecated etc..
3015 if (cur_addr_num >= num_preferred) {
3018 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
3019 dest_is_priv, cur_addr_num, fam, ro);
3023 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3024 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3026 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3027 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3028 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3029 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3031 atomic_add_int(&sifa->refcount, 1);
3035 again_with_private_addresses_allowed:
3037 /* plan_c: do we have an acceptable address on the emit interface */
3039 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3040 if (emit_ifn == NULL) {
3041 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3044 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3045 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3046 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3047 (non_asoc_addr_ok == 0)) {
3048 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3051 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3054 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3058 if (sctp_is_address_in_scope(sifa,
3059 stcb->asoc.ipv4_addr_legal,
3060 stcb->asoc.ipv6_addr_legal,
3061 stcb->asoc.loopback_scope,
3062 stcb->asoc.ipv4_local_scope,
3063 stcb->asoc.local_scope,
3064 stcb->asoc.site_scope, 0) == 0) {
3065 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3069 if (((non_asoc_addr_ok == 0) &&
3070 (sctp_is_addr_restricted(stcb, sifa))) ||
3071 (non_asoc_addr_ok &&
3072 (sctp_is_addr_restricted(stcb, sifa)) &&
3073 (!sctp_is_addr_pending(stcb, sifa)))) {
3075 * It is restricted for some reason..
3076 * probably not yet added.
3078 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3083 SCTP_PRINTF("Stcb is null - no print\n");
3085 atomic_add_int(&sifa->refcount, 1);
3090 * plan_d: We are in trouble. No preferred address on the emit
3091 * interface. And not even a preferred address on all interfaces. Go
3092 * out and see if we can find an acceptable address somewhere
3093 * amongst all interfaces.
3095 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3096 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3097 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3098 /* wrong base scope */
3101 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3102 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3103 (non_asoc_addr_ok == 0))
3105 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3111 if (sctp_is_address_in_scope(sifa,
3112 stcb->asoc.ipv4_addr_legal,
3113 stcb->asoc.ipv6_addr_legal,
3114 stcb->asoc.loopback_scope,
3115 stcb->asoc.ipv4_local_scope,
3116 stcb->asoc.local_scope,
3117 stcb->asoc.site_scope, 0) == 0) {
3121 if (((non_asoc_addr_ok == 0) &&
3122 (sctp_is_addr_restricted(stcb, sifa))) ||
3123 (non_asoc_addr_ok &&
3124 (sctp_is_addr_restricted(stcb, sifa)) &&
3125 (!sctp_is_addr_pending(stcb, sifa)))) {
3127 * It is restricted for some
3128 * reason.. probably not yet added.
3138 if ((retried == 0) && (stcb->asoc.ipv4_local_scope == 0)) {
3139 stcb->asoc.ipv4_local_scope = 1;
3141 goto again_with_private_addresses_allowed;
3142 } else if (retried == 1) {
3143 stcb->asoc.ipv4_local_scope = 0;
3150 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3151 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3152 /* wrong base scope */
3155 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3156 struct sctp_ifa *tmp_sifa;
3158 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3159 (non_asoc_addr_ok == 0))
3161 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3164 if (tmp_sifa == NULL) {
3167 if (tmp_sifa == sifa) {
3171 if (sctp_is_address_in_scope(tmp_sifa,
3172 stcb->asoc.ipv4_addr_legal,
3173 stcb->asoc.ipv6_addr_legal,
3174 stcb->asoc.loopback_scope,
3175 stcb->asoc.ipv4_local_scope,
3176 stcb->asoc.local_scope,
3177 stcb->asoc.site_scope, 0) == 0) {
3180 if (((non_asoc_addr_ok == 0) &&
3181 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3182 (non_asoc_addr_ok &&
3183 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3184 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3194 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3195 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3196 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3201 atomic_add_int(&sifa->refcount, 1);
3209 /* tcb may be NULL */
3211 sctp_source_address_selection(struct sctp_inpcb *inp,
3212 struct sctp_tcb *stcb,
3214 struct sctp_nets *net,
3215 int non_asoc_addr_ok, uint32_t vrf_id)
3217 struct sctp_ifa *answer;
3218 uint8_t dest_is_priv, dest_is_loop;
3222 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3226 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3231 * Rules: - Find the route if needed, cache if I can. - Look at
3232 * interface address in route, Is it in the bound list. If so we
3233 * have the best source. - If not we must rotate amongst the
3238 * Do we need to pay attention to scope. We can have a private address
3239 * or a global address we are sourcing or sending to. So if we draw
3241 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3243 * ------------------------------------------
3244 * source * dest * result
3245 * -----------------------------------------
3246 * <a> Private * Global * NAT
3247 * -----------------------------------------
3248 * <b> Private * Private * No problem
3249 * -----------------------------------------
3250 * <c> Global * Private * Huh, How will this work?
3251 * -----------------------------------------
3252 * <d> Global * Global * No Problem
3253 *------------------------------------------
3254 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3256 *------------------------------------------
3257 * source * dest * result
3258 * -----------------------------------------
3259 * <a> Linklocal * Global *
3260 * -----------------------------------------
3261 * <b> Linklocal * Linklocal * No problem
3262 * -----------------------------------------
3263 * <c> Global * Linklocal * Huh, How will this work?
3264 * -----------------------------------------
3265 * <d> Global * Global * No Problem
3266 *------------------------------------------
3267 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3269 * And then we add to that what happens if there are multiple addresses
3270 * assigned to an interface. Remember the ifa on a ifn is a linked
3271 * list of addresses. So one interface can have more than one IP
3272 * address. What happens if we have both a private and a global
3273 * address? Do we then use context of destination to sort out which
3274 * one is best? And what about NAT's sending P->G may get you a NAT
3275 * translation, or should you select the G thats on the interface in
3280 * - count the number of addresses on the interface.
3281 * - if it is one, no problem except case <c>.
3282 * For <a> we will assume a NAT out there.
3283 * - if there are more than one, then we need to worry about scope P
3284 * or G. We should prefer G -> G and P -> P if possible.
3285 * Then as a secondary fall back to mixed types G->P being a last
3287 * - The above all works for bound all, but bound specific we need to
3288 * use the same concept but instead only consider the bound
3289 * addresses. If the bound set is NOT assigned to the interface then
3290 * we must use rotation amongst the bound addresses..
3292 if (ro->ro_rt == NULL) {
3294 * Need a route to cache.
3296 SCTP_RTALLOC(ro, vrf_id);
3298 if (ro->ro_rt == NULL) {
3301 fam = ro->ro_dst.sa_family;
3302 dest_is_priv = dest_is_loop = 0;
3303 /* Setup our scopes for the destination */
3307 /* Scope based on outbound address */
3308 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3311 /* mark it as local */
3312 net->addr_is_local = 1;
3314 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3321 /* Scope based on outbound address */
3322 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3323 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3325 * If the address is a loopback address, which
3326 * consists of "::1" OR "fe80::1%lo0", we are
3327 * loopback scope. But we don't use dest_is_priv
3328 * (link local addresses).
3332 /* mark it as local */
3333 net->addr_is_local = 1;
3335 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3341 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3342 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3343 SCTP_IPI_ADDR_RLOCK();
3344 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3348 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3349 dest_is_priv, dest_is_loop,
3350 non_asoc_addr_ok, fam);
3351 SCTP_IPI_ADDR_RUNLOCK();
3358 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3359 vrf_id, dest_is_priv,
3361 non_asoc_addr_ok, fam);
3363 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3368 SCTP_IPI_ADDR_RUNLOCK();
3373 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3376 int tlen, at, found;
3377 struct sctp_sndinfo sndinfo;
3378 struct sctp_prinfo prinfo;
3379 struct sctp_authinfo authinfo;
3381 tlen = SCTP_BUF_LEN(control);
3385 * Independent of how many mbufs, find the c_type inside the control
3386 * structure and copy out the data.
3389 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3390 /* There is not enough room for one more. */
3393 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3394 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3395 /* We dont't have a complete CMSG header. */
3398 if (((int)cmh.cmsg_len + at) > tlen) {
3399 /* We don't have the complete CMSG. */
3402 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3403 ((c_type == cmh.cmsg_type) ||
3404 ((c_type == SCTP_SNDRCV) &&
3405 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3406 (cmh.cmsg_type == SCTP_PRINFO) ||
3407 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3408 if (c_type == cmh.cmsg_type) {
3409 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
3412 /* It is exactly what we want. Copy it out. */
3413 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data);
3416 struct sctp_sndrcvinfo *sndrcvinfo;
3418 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3420 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3423 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3425 switch (cmh.cmsg_type) {
3427 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
3430 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3431 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3432 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3433 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3434 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3435 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3438 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
3441 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3442 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3443 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3446 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
3449 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3450 sndrcvinfo->sinfo_keynumber_valid = 1;
3451 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3459 at += CMSG_ALIGN(cmh.cmsg_len);
3465 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3469 struct sctp_initmsg initmsg;
3472 struct sockaddr_in sin;
3476 struct sockaddr_in6 sin6;
3480 tlen = SCTP_BUF_LEN(control);
3483 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3484 /* There is not enough room for one more. */
3488 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3489 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3490 /* We dont't have a complete CMSG header. */
3494 if (((int)cmh.cmsg_len + at) > tlen) {
3495 /* We don't have the complete CMSG. */
3499 if (cmh.cmsg_level == IPPROTO_SCTP) {
3500 switch (cmh.cmsg_type) {
3502 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3506 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3507 if (initmsg.sinit_max_attempts)
3508 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3509 if (initmsg.sinit_num_ostreams)
3510 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3511 if (initmsg.sinit_max_instreams)
3512 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3513 if (initmsg.sinit_max_init_timeo)
3514 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3515 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3516 struct sctp_stream_out *tmp_str;
3519 /* Default is NOT correct */
3520 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3521 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3522 SCTP_TCB_UNLOCK(stcb);
3523 SCTP_MALLOC(tmp_str,
3524 struct sctp_stream_out *,
3525 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3527 SCTP_TCB_LOCK(stcb);
3528 if (tmp_str != NULL) {
3529 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3530 stcb->asoc.strmout = tmp_str;
3531 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3533 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3535 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3536 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3537 stcb->asoc.strmout[i].chunks_on_queues = 0;
3538 stcb->asoc.strmout[i].next_sequence_send = 0;
3539 stcb->asoc.strmout[i].stream_no = i;
3540 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3541 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3546 case SCTP_DSTADDRV4:
3547 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3551 memset(&sin, 0, sizeof(struct sockaddr_in));
3552 sin.sin_family = AF_INET;
3553 sin.sin_len = sizeof(struct sockaddr_in);
3554 sin.sin_port = stcb->rport;
3555 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3556 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3557 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3558 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3562 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3563 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3570 case SCTP_DSTADDRV6:
3571 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3575 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3576 sin6.sin6_family = AF_INET6;
3577 sin6.sin6_len = sizeof(struct sockaddr_in6);
3578 sin6.sin6_port = stcb->rport;
3579 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3580 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3581 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3586 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3587 in6_sin6_2_sin(&sin, &sin6);
3588 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3589 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3590 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3594 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3595 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3601 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3602 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3612 at += CMSG_ALIGN(cmh.cmsg_len);
3617 static struct sctp_tcb *
3618 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3620 struct mbuf *control,
3621 struct sctp_nets **net_p,
3626 struct sctp_tcb *stcb;
3627 struct sockaddr *addr;
3630 struct sockaddr_in sin;
3634 struct sockaddr_in6 sin6;
3638 tlen = SCTP_BUF_LEN(control);
3641 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3642 /* There is not enough room for one more. */
3646 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3647 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3648 /* We dont't have a complete CMSG header. */
3652 if (((int)cmh.cmsg_len + at) > tlen) {
3653 /* We don't have the complete CMSG. */
3657 if (cmh.cmsg_level == IPPROTO_SCTP) {
3658 switch (cmh.cmsg_type) {
3660 case SCTP_DSTADDRV4:
3661 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3665 memset(&sin, 0, sizeof(struct sockaddr_in));
3666 sin.sin_family = AF_INET;
3667 sin.sin_len = sizeof(struct sockaddr_in);
3668 sin.sin_port = port;
3669 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3670 addr = (struct sockaddr *)&sin;
3674 case SCTP_DSTADDRV6:
3675 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3679 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3680 sin6.sin6_family = AF_INET6;
3681 sin6.sin6_len = sizeof(struct sockaddr_in6);
3682 sin6.sin6_port = port;
3683 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3685 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3686 in6_sin6_2_sin(&sin, &sin6);
3687 addr = (struct sockaddr *)&sin;
3690 addr = (struct sockaddr *)&sin6;
3698 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3704 at += CMSG_ALIGN(cmh.cmsg_len);
3709 static struct mbuf *
3710 sctp_add_cookie(struct mbuf *init, int init_offset,
3711 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3713 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3714 struct sctp_state_cookie *stc;
3715 struct sctp_paramhdr *ph;
3721 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3722 sizeof(struct sctp_paramhdr)), 0,
3723 M_DONTWAIT, 1, MT_DATA);
3727 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3728 if (copy_init == NULL) {
3732 #ifdef SCTP_MBUF_LOGGING
3733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3736 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) {
3737 if (SCTP_BUF_IS_EXTENDED(mat)) {
3738 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3743 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3745 if (copy_initack == NULL) {
3747 sctp_m_freem(copy_init);
3750 #ifdef SCTP_MBUF_LOGGING
3751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3754 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) {
3755 if (SCTP_BUF_IS_EXTENDED(mat)) {
3756 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3761 /* easy side we just drop it on the end */
3762 ph = mtod(mret, struct sctp_paramhdr *);
3763 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3764 sizeof(struct sctp_paramhdr);
3765 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3766 sizeof(struct sctp_paramhdr));
3767 ph->param_type = htons(SCTP_STATE_COOKIE);
3768 ph->param_length = 0; /* fill in at the end */
3769 /* Fill in the stc cookie data */
3770 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3772 /* tack the INIT and then the INIT-ACK onto the chain */
3774 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3775 cookie_sz += SCTP_BUF_LEN(m_at);
3776 if (SCTP_BUF_NEXT(m_at) == NULL) {
3777 SCTP_BUF_NEXT(m_at) = copy_init;
3781 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3782 cookie_sz += SCTP_BUF_LEN(m_at);
3783 if (SCTP_BUF_NEXT(m_at) == NULL) {
3784 SCTP_BUF_NEXT(m_at) = copy_initack;
3788 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3789 cookie_sz += SCTP_BUF_LEN(m_at);
3790 if (SCTP_BUF_NEXT(m_at) == NULL) {
3794 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3796 /* no space, so free the entire chain */
3800 SCTP_BUF_LEN(sig) = 0;
3801 SCTP_BUF_NEXT(m_at) = sig;
3803 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3804 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3806 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3807 cookie_sz += SCTP_SIGNATURE_SIZE;
3808 ph->param_length = htons(cookie_sz);
3814 sctp_get_ect(struct sctp_tcb *stcb)
3816 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3817 return (SCTP_ECT0_BIT);
3823 #if defined(INET) || defined(INET6)
3825 sctp_handle_no_route(struct sctp_tcb *stcb,
3826 struct sctp_nets *net,
3829 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3832 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3833 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3834 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3835 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3836 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3837 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3841 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3842 net->dest_state &= ~SCTP_ADDR_PF;
3846 if (net == stcb->asoc.primary_destination) {
3847 /* need a new primary */
3848 struct sctp_nets *alt;
3850 alt = sctp_find_alternate_net(stcb, net, 0);
3852 if (stcb->asoc.alternate) {
3853 sctp_free_remote_addr(stcb->asoc.alternate);
3855 stcb->asoc.alternate = alt;
3856 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3857 if (net->ro._s_addr) {
3858 sctp_free_ifa(net->ro._s_addr);
3859 net->ro._s_addr = NULL;
3861 net->src_addr_selected = 0;
3871 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3872 struct sctp_tcb *stcb, /* may be NULL */
3873 struct sctp_nets *net,
3874 struct sockaddr *to,
3876 uint32_t auth_offset,
3877 struct sctp_auth_chunk *auth,
3878 uint16_t auth_keyid,
3879 int nofragment_flag,
3886 union sctp_sockstore *over_addr,
3887 uint8_t use_mflowid, uint32_t mflowid,
3888 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3889 int so_locked SCTP_UNUSED
3894 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3897 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3898 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3899 * - fill in the HMAC digest of any AUTH chunk in the packet.
3900 * - calculate and fill in the SCTP checksum.
3901 * - prepend an IP address header.
3902 * - if boundall use INADDR_ANY.
3903 * - if boundspecific do source address selection.
3904 * - set fragmentation option for ipV4.
3905 * - On return from IP output, check/adjust mtu size of output
3906 * interface and smallest_mtu size as well.
3908 /* Will need ifdefs around this */
3910 struct sctphdr *sctphdr;
3915 #if defined(INET) || defined(INET6)
3917 sctp_route_t *ro = NULL;
3918 struct udphdr *udp = NULL;
3923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3924 struct socket *so = NULL;
3928 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3929 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3934 vrf_id = stcb->asoc.vrf_id;
3936 vrf_id = inp->def_vrf_id;
3939 /* fill in the HMAC digest for any AUTH chunk in the packet */
3940 if ((auth != NULL) && (stcb != NULL)) {
3941 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3944 tos_value = net->dscp;
3946 tos_value = stcb->asoc.default_dscp;
3948 tos_value = inp->sctp_ep.default_dscp;
3951 switch (to->sa_family) {
3955 struct ip *ip = NULL;
3956 sctp_route_t iproute;
3959 len = sizeof(struct ip) + sizeof(struct sctphdr);
3961 len += sizeof(struct udphdr);
3963 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3966 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3969 SCTP_ALIGN_TO_END(newm, len);
3970 SCTP_BUF_LEN(newm) = len;
3971 SCTP_BUF_NEXT(newm) = m;
3975 if (net->flowidset == 0) {
3976 panic("Flow ID not set");
3979 m->m_pkthdr.flowid = net->flowid;
3980 m->m_flags |= M_FLOWID;
3982 if (use_mflowid != 0) {
3983 m->m_pkthdr.flowid = mflowid;
3984 m->m_flags |= M_FLOWID;
3987 packet_length = sctp_calculate_len(m);
3988 ip = mtod(m, struct ip *);
3989 ip->ip_v = IPVERSION;
3990 ip->ip_hl = (sizeof(struct ip) >> 2);
3991 if (tos_value == 0) {
3993 * This means especially, that it is not set
3994 * at the SCTP layer. So use the value from
3997 tos_value = inp->ip_inp.inp.inp_ip_tos;
4001 tos_value |= sctp_get_ect(stcb);
4003 if ((nofragment_flag) && (port == 0)) {
4008 /* FreeBSD has a function for ip_id's */
4009 ip->ip_id = ip_newid();
4011 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4012 ip->ip_len = packet_length;
4013 ip->ip_tos = tos_value;
4015 ip->ip_p = IPPROTO_UDP;
4017 ip->ip_p = IPPROTO_SCTP;
4022 memset(&iproute, 0, sizeof(iproute));
4023 memcpy(&ro->ro_dst, to, to->sa_len);
4025 ro = (sctp_route_t *) & net->ro;
4027 /* Now the address selection part */
4028 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4030 /* call the routine to select the src address */
4031 if (net && out_of_asoc_ok == 0) {
4032 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4033 sctp_free_ifa(net->ro._s_addr);
4034 net->ro._s_addr = NULL;
4035 net->src_addr_selected = 0;
4041 if (net->src_addr_selected == 0) {
4042 /* Cache the source address */
4043 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4046 net->src_addr_selected = 1;
4048 if (net->ro._s_addr == NULL) {
4049 /* No route to host */
4050 net->src_addr_selected = 0;
4051 sctp_handle_no_route(stcb, net, so_locked);
4052 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4054 return (EHOSTUNREACH);
4056 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4058 if (over_addr == NULL) {
4059 struct sctp_ifa *_lsrc;
4061 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4065 if (_lsrc == NULL) {
4066 sctp_handle_no_route(stcb, net, so_locked);
4067 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4069 return (EHOSTUNREACH);
4071 ip->ip_src = _lsrc->address.sin.sin_addr;
4072 sctp_free_ifa(_lsrc);
4074 ip->ip_src = over_addr->sin.sin_addr;
4075 SCTP_RTALLOC(ro, vrf_id);
4079 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4080 sctp_handle_no_route(stcb, net, so_locked);
4081 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4083 return (EHOSTUNREACH);
4085 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4086 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4087 udp->uh_dport = port;
4088 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4090 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4094 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4096 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4099 sctphdr->src_port = src_port;
4100 sctphdr->dest_port = dest_port;
4101 sctphdr->v_tag = v_tag;
4102 sctphdr->checksum = 0;
4105 * If source address selection fails and we find no
4106 * route then the ip_output should fail as well with
4107 * a NO_ROUTE_TO_HOST type error. We probably should
4108 * catch that somewhere and abort the association
4109 * right away (assuming this is an INIT being sent).
4111 if (ro->ro_rt == NULL) {
4113 * src addr selection failed to find a route
4114 * (or valid source addr), so we can't get
4115 * there from here (yet)!
4117 sctp_handle_no_route(stcb, net, so_locked);
4118 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4120 return (EHOSTUNREACH);
4122 if (ro != &iproute) {
4123 memcpy(&iproute, ro, sizeof(*ro));
4125 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4126 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4127 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4128 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4129 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4132 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4133 /* failed to prepend data, give up */
4134 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4138 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4140 #if defined(SCTP_WITH_NO_CSUM)
4141 SCTP_STAT_INCR(sctps_sendnocrc);
4143 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4144 SCTP_STAT_INCR(sctps_sendswcrc);
4147 SCTP_ENABLE_UDP_CSUM(o_pak);
4150 #if defined(SCTP_WITH_NO_CSUM)
4151 SCTP_STAT_INCR(sctps_sendnocrc);
4153 m->m_pkthdr.csum_flags = CSUM_SCTP;
4154 m->m_pkthdr.csum_data = 0;
4155 SCTP_STAT_INCR(sctps_sendhwcrc);
4158 #ifdef SCTP_PACKET_LOGGING
4159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4160 sctp_packet_log(o_pak);
4162 /* send it out. table id is taken from stcb */
4163 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4164 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4165 so = SCTP_INP_SO(inp);
4166 SCTP_SOCKET_UNLOCK(so, 0);
4169 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4170 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4171 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4172 atomic_add_int(&stcb->asoc.refcnt, 1);
4173 SCTP_TCB_UNLOCK(stcb);
4174 SCTP_SOCKET_LOCK(so, 0);
4175 SCTP_TCB_LOCK(stcb);
4176 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4179 SCTP_STAT_INCR(sctps_sendpackets);
4180 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4182 SCTP_STAT_INCR(sctps_senderrors);
4184 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4186 /* free tempy routes */
4190 * PMTU check versus smallest asoc MTU goes
4193 if ((ro->ro_rt != NULL) &&
4194 (net->ro._s_addr)) {
4197 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4199 mtu -= sizeof(struct udphdr);
4201 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4202 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4205 } else if (ro->ro_rt == NULL) {
4206 /* route was freed */
4207 if (net->ro._s_addr &&
4208 net->src_addr_selected) {
4209 sctp_free_ifa(net->ro._s_addr);
4210 net->ro._s_addr = NULL;
4212 net->src_addr_selected = 0;
4221 uint32_t flowlabel, flowinfo;
4222 struct ip6_hdr *ip6h;
4223 struct route_in6 ip6route;
4225 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4227 struct sockaddr_in6 lsa6_storage;
4229 u_short prev_port = 0;
4233 flowlabel = net->flowlabel;
4235 flowlabel = stcb->asoc.default_flowlabel;
4237 flowlabel = inp->sctp_ep.default_flowlabel;
4239 if (flowlabel == 0) {
4241 * This means especially, that it is not set
4242 * at the SCTP layer. So use the value from
4245 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4247 flowlabel &= 0x000fffff;
4248 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4250 len += sizeof(struct udphdr);
4252 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4255 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4258 SCTP_ALIGN_TO_END(newm, len);
4259 SCTP_BUF_LEN(newm) = len;
4260 SCTP_BUF_NEXT(newm) = m;
4264 if (net->flowidset == 0) {
4265 panic("Flow ID not set");
4268 m->m_pkthdr.flowid = net->flowid;
4269 m->m_flags |= M_FLOWID;
4271 if (use_mflowid != 0) {
4272 m->m_pkthdr.flowid = mflowid;
4273 m->m_flags |= M_FLOWID;
4276 packet_length = sctp_calculate_len(m);
4278 ip6h = mtod(m, struct ip6_hdr *);
4279 /* protect *sin6 from overwrite */
4280 sin6 = (struct sockaddr_in6 *)to;
4284 /* KAME hack: embed scopeid */
4285 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4286 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4290 memset(&ip6route, 0, sizeof(ip6route));
4291 ro = (sctp_route_t *) & ip6route;
4292 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4294 ro = (sctp_route_t *) & net->ro;
4297 * We assume here that inp_flow is in host byte
4298 * order within the TCB!
4300 if (tos_value == 0) {
4302 * This means especially, that it is not set
4303 * at the SCTP layer. So use the value from
4306 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4310 tos_value |= sctp_get_ect(stcb);
4314 flowinfo |= tos_value;
4316 flowinfo |= flowlabel;
4317 ip6h->ip6_flow = htonl(flowinfo);
4319 ip6h->ip6_nxt = IPPROTO_UDP;
4321 ip6h->ip6_nxt = IPPROTO_SCTP;
4323 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4324 ip6h->ip6_dst = sin6->sin6_addr;
4327 * Add SRC address selection here: we can only reuse
4328 * to a limited degree the kame src-addr-sel, since
4329 * we can try their selection but it may not be
4332 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4333 lsa6_tmp.sin6_family = AF_INET6;
4334 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4336 if (net && out_of_asoc_ok == 0) {
4337 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4338 sctp_free_ifa(net->ro._s_addr);
4339 net->ro._s_addr = NULL;
4340 net->src_addr_selected = 0;
4346 if (net->src_addr_selected == 0) {
4347 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4348 /* KAME hack: embed scopeid */
4349 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4350 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4353 /* Cache the source address */
4354 net->ro._s_addr = sctp_source_address_selection(inp,
4360 (void)sa6_recoverscope(sin6);
4361 net->src_addr_selected = 1;
4363 if (net->ro._s_addr == NULL) {
4364 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4365 net->src_addr_selected = 0;
4366 sctp_handle_no_route(stcb, net, so_locked);
4367 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4369 return (EHOSTUNREACH);
4371 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4373 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4374 /* KAME hack: embed scopeid */
4375 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4376 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4379 if (over_addr == NULL) {
4380 struct sctp_ifa *_lsrc;
4382 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4386 if (_lsrc == NULL) {
4387 sctp_handle_no_route(stcb, net, so_locked);
4388 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4390 return (EHOSTUNREACH);
4392 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4393 sctp_free_ifa(_lsrc);
4395 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4396 SCTP_RTALLOC(ro, vrf_id);
4398 (void)sa6_recoverscope(sin6);
4400 lsa6->sin6_port = inp->sctp_lport;
4402 if (ro->ro_rt == NULL) {
4404 * src addr selection failed to find a route
4405 * (or valid source addr), so we can't get
4408 sctp_handle_no_route(stcb, net, so_locked);
4409 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4411 return (EHOSTUNREACH);
4414 * XXX: sa6 may not have a valid sin6_scope_id in
4415 * the non-SCOPEDROUTING case.
4417 bzero(&lsa6_storage, sizeof(lsa6_storage));
4418 lsa6_storage.sin6_family = AF_INET6;
4419 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4420 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4421 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4422 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4427 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4428 lsa6_storage.sin6_port = inp->sctp_lport;
4429 lsa6 = &lsa6_storage;
4430 ip6h->ip6_src = lsa6->sin6_addr;
4433 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4434 sctp_handle_no_route(stcb, net, so_locked);
4435 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4437 return (EHOSTUNREACH);
4439 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4440 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4441 udp->uh_dport = port;
4442 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4444 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4446 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4449 sctphdr->src_port = src_port;
4450 sctphdr->dest_port = dest_port;
4451 sctphdr->v_tag = v_tag;
4452 sctphdr->checksum = 0;
4455 * We set the hop limit now since there is a good
4456 * chance that our ro pointer is now filled
4458 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4459 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4462 /* Copy to be sure something bad is not happening */
4463 sin6->sin6_addr = ip6h->ip6_dst;
4464 lsa6->sin6_addr = ip6h->ip6_src;
4467 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4468 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4469 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4470 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4471 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4473 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4475 * preserve the port and scope for link
4478 prev_scope = sin6->sin6_scope_id;
4479 prev_port = sin6->sin6_port;
4481 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4482 /* failed to prepend data, give up */
4484 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4487 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4489 #if defined(SCTP_WITH_NO_CSUM)
4490 SCTP_STAT_INCR(sctps_sendnocrc);
4492 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4493 SCTP_STAT_INCR(sctps_sendswcrc);
4495 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4496 udp->uh_sum = 0xffff;
4499 #if defined(SCTP_WITH_NO_CSUM)
4500 SCTP_STAT_INCR(sctps_sendnocrc);
4502 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4503 m->m_pkthdr.csum_data = 0;
4504 SCTP_STAT_INCR(sctps_sendhwcrc);
4507 /* send it out. table id is taken from stcb */
4508 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4509 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4510 so = SCTP_INP_SO(inp);
4511 SCTP_SOCKET_UNLOCK(so, 0);
4514 #ifdef SCTP_PACKET_LOGGING
4515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4516 sctp_packet_log(o_pak);
4518 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4519 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4520 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4521 atomic_add_int(&stcb->asoc.refcnt, 1);
4522 SCTP_TCB_UNLOCK(stcb);
4523 SCTP_SOCKET_LOCK(so, 0);
4524 SCTP_TCB_LOCK(stcb);
4525 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4529 /* for link local this must be done */
4530 sin6->sin6_scope_id = prev_scope;
4531 sin6->sin6_port = prev_port;
4533 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4534 SCTP_STAT_INCR(sctps_sendpackets);
4535 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4537 SCTP_STAT_INCR(sctps_senderrors);
4540 /* Now if we had a temp route free it */
4544 * PMTU check versus smallest asoc MTU goes
4547 if (ro->ro_rt == NULL) {
4548 /* Route was freed */
4549 if (net->ro._s_addr &&
4550 net->src_addr_selected) {
4551 sctp_free_ifa(net->ro._s_addr);
4552 net->ro._s_addr = NULL;
4554 net->src_addr_selected = 0;
4556 if ((ro->ro_rt != NULL) &&
4557 (net->ro._s_addr)) {
4560 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4562 (stcb->asoc.smallest_mtu > mtu)) {
4563 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4566 net->mtu -= sizeof(struct udphdr);
4570 if (ND_IFINFO(ifp)->linkmtu &&
4571 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4572 sctp_mtu_size_reset(inp,
4574 ND_IFINFO(ifp)->linkmtu);
4582 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4583 ((struct sockaddr *)to)->sa_family);
4585 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4592 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4593 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4598 struct sctp_scoping scp;
4600 struct sctp_nets *net;
4601 struct sctp_init_chunk *init;
4602 struct sctp_supported_addr_param *sup_addr;
4603 struct sctp_adaptation_layer_indication *ali;
4604 struct sctp_supported_chunk_types_param *pr_supported;
4605 struct sctp_paramhdr *ph;
4606 int cnt_inits_to = 0;
4608 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4610 /* INIT's always go to the primary (and usually ONLY address) */
4611 net = stcb->asoc.primary_destination;
4613 net = TAILQ_FIRST(&stcb->asoc.nets);
4618 /* we confirm any address we send an INIT to */
4619 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4620 (void)sctp_set_primary_addr(stcb, NULL, net);
4622 /* we confirm any address we send an INIT to */
4623 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4625 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4627 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4629 * special hook, if we are sending to link local it will not
4630 * show up in our private address count.
4632 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4636 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4637 /* This case should not happen */
4638 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4641 /* start the INIT timer */
4642 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4644 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4646 /* No memory, INIT timer will re-attempt. */
4647 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4650 chunk_len = (uint16_t) sizeof(struct sctp_init_chunk);
4653 * assume peer supports asconf in order to be able to queue local
4654 * address changes while an INIT is in flight and before the assoc
4657 stcb->asoc.peer_supports_asconf = 1;
4658 /* Now lets put the chunk header in place */
4659 init = mtod(m, struct sctp_init_chunk *);
4660 /* now the chunk header */
4661 init->ch.chunk_type = SCTP_INITIATION;
4662 init->ch.chunk_flags = 0;
4663 /* fill in later from mbuf we build */
4664 init->ch.chunk_length = 0;
4665 /* place in my tag */
4666 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4667 /* set up some of the credits. */
4668 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4669 SCTP_MINIMAL_RWND));
4670 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4671 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4672 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4674 #if defined(INET) || defined(INET6)
4675 /* now the address restriction */
4676 /* XXX Should we take the address family of the socket into account? */
4677 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4678 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4681 /* we support 2 types: IPv4/IPv6 */
4682 parameter_len = (uint16_t) (sizeof(struct sctp_paramhdr) + 2 * sizeof(uint16_t));
4683 sup_addr->ph.param_length = htons(parameter_len);
4684 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4685 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4688 /* we support 1 type: IPv6 */
4689 parameter_len = (uint16_t) (sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4690 sup_addr->ph.param_length = htons(parameter_len);
4691 sup_addr->addr_type[0] = htons(SCTP_IPV6_ADDRESS);
4692 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4693 padding_len = (uint16_t) sizeof(uint16_t);
4696 /* we support 1 type: IPv4 */
4697 parameter_len = (uint16_t) (sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4698 sup_addr->ph.param_length = htons(parameter_len);
4699 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4700 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4701 padding_len = (uint16_t) sizeof(uint16_t);
4703 chunk_len += parameter_len;
4705 /* Adaptation layer indication parameter */
4706 /* XXX: Should we include this always? */
4707 if (padding_len > 0) {
4708 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4709 chunk_len += padding_len;
4712 parameter_len = (uint16_t) sizeof(struct sctp_adaptation_layer_indication);
4713 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4714 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4715 ali->ph.param_length = htons(parameter_len);
4716 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4717 chunk_len += parameter_len;
4719 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4720 /* Add NAT friendly parameter. */
4721 if (padding_len > 0) {
4722 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4723 chunk_len += padding_len;
4726 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4727 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4728 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4729 ph->param_length = htons(parameter_len);
4730 chunk_len += parameter_len;
4732 /* now any cookie time extensions */
4733 if (stcb->asoc.cookie_preserve_req) {
4734 struct sctp_cookie_perserve_param *cookie_preserve;
4736 if (padding_len > 0) {
4737 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4738 chunk_len += padding_len;
4741 parameter_len = (uint16_t) sizeof(struct sctp_cookie_perserve_param);
4742 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4743 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4744 cookie_preserve->ph.param_length = htons(parameter_len);
4745 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4746 stcb->asoc.cookie_preserve_req = 0;
4747 chunk_len += parameter_len;
4750 if (stcb->asoc.ecn_allowed == 1) {
4751 if (padding_len > 0) {
4752 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4753 chunk_len += padding_len;
4756 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4757 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4758 ph->param_type = htons(SCTP_ECN_CAPABLE);
4759 ph->param_length = htons(parameter_len);
4760 chunk_len += parameter_len;
4762 /* And now tell the peer we do support PR-SCTP. */
4763 if (padding_len > 0) {
4764 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4765 chunk_len += padding_len;
4768 parameter_len = (uint16_t) sizeof(struct sctp_paramhdr);
4769 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4770 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4771 ph->param_length = htons(parameter_len);
4772 chunk_len += parameter_len;
4774 /* And now tell the peer we do all the extensions */
4775 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4776 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4778 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4779 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4780 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4781 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4782 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4783 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4784 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4786 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4787 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4789 parameter_len = (uint16_t) sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4790 pr_supported->ph.param_length = htons(parameter_len);
4791 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4792 chunk_len += parameter_len;
4794 /* add authentication parameters */
4795 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4796 /* attach RANDOM parameter, if available */
4797 if (stcb->asoc.authinfo.random != NULL) {
4798 struct sctp_auth_random *randp;
4800 if (padding_len > 0) {
4801 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4802 chunk_len += padding_len;
4805 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4806 parameter_len = (uint16_t) sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4807 /* random key already contains the header */
4808 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4809 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4810 chunk_len += parameter_len;
4812 /* add HMAC_ALGO parameter */
4813 if ((stcb->asoc.local_hmacs != NULL) &&
4814 (stcb->asoc.local_hmacs->num_algo > 0)) {
4815 struct sctp_auth_hmac_algo *hmacs;
4817 if (padding_len > 0) {
4818 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4819 chunk_len += padding_len;
4822 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4823 parameter_len = (uint16_t) (sizeof(struct sctp_auth_hmac_algo) +
4824 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4825 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4826 hmacs->ph.param_length = htons(parameter_len);
4827 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *) hmacs->hmac_ids);
4828 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4829 chunk_len += parameter_len;
4831 /* add CHUNKS parameter */
4832 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) {
4833 struct sctp_auth_chunk_list *chunks;
4835 if (padding_len > 0) {
4836 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4837 chunk_len += padding_len;
4840 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4841 parameter_len = (uint16_t) (sizeof(struct sctp_auth_chunk_list) +
4842 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4843 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4844 chunks->ph.param_length = htons(parameter_len);
4845 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4846 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4847 chunk_len += parameter_len;
4850 SCTP_BUF_LEN(m) = chunk_len;
4852 /* now the addresses */
4854 * To optimize this we could put the scoping stuff into a structure
4855 * and remove the individual uint8's from the assoc structure. Then
4856 * we could just sifa in the address within the stcb. But for now
4857 * this is a quick hack to get the address stuff teased apart.
4859 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4860 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4861 scp.loopback_scope = stcb->asoc.loopback_scope;
4862 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4863 scp.local_scope = stcb->asoc.local_scope;
4864 scp.site_scope = stcb->asoc.site_scope;
4865 sctp_add_addresses_to_i_ia(inp, stcb, &scp, m, cnt_inits_to, &padding_len, &chunk_len);
4867 init->ch.chunk_length = htons(chunk_len);
4868 if (padding_len > 0) {
4869 struct mbuf *m_at, *mp_last;
4872 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4873 if (SCTP_BUF_NEXT(m_at) == NULL)
4876 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) {
4881 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4882 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4883 (struct sockaddr *)&net->ro._l_addr,
4884 m, 0, NULL, 0, 0, 0, 0,
4885 inp->sctp_lport, stcb->rport, htonl(0),
4889 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4890 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4891 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4895 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4896 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4899 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4900 * being equal to the beginning of the params i.e. (iphlen +
4901 * sizeof(struct sctp_init_msg) parse through the parameters to the
4902 * end of the mbuf verifying that all parameters are known.
4904 * For unknown parameters build and return a mbuf with
4905 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4906 * processing this chunk stop, and set *abort_processing to 1.
4908 * By having param_offset be pre-set to where parameters begin it is
4909 * hoped that this routine may be reused in the future by new
4912 struct sctp_paramhdr *phdr, params;
4914 struct mbuf *mat, *op_err;
4915 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4916 int at, limit, pad_needed;
4917 uint16_t ptype, plen, padded_size;
4920 *abort_processing = 0;
4923 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4926 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4927 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4928 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4929 ptype = ntohs(phdr->param_type);
4930 plen = ntohs(phdr->param_length);
4931 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4932 /* wacked parameter */
4933 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4936 limit -= SCTP_SIZE32(plen);
4938 * All parameters for all chunks that we know/understand are
4939 * listed here. We process them other places and make
4940 * appropriate stop actions per the upper bits. However this
4941 * is the generic routine processor's can call to get back
4942 * an operr.. to either incorporate (init-ack) or send.
4944 padded_size = SCTP_SIZE32(plen);
4946 /* Param's with variable size */
4947 case SCTP_HEARTBEAT_INFO:
4948 case SCTP_STATE_COOKIE:
4949 case SCTP_UNRECOG_PARAM:
4950 case SCTP_ERROR_CAUSE_IND:
4954 /* Param's with variable size within a range */
4955 case SCTP_CHUNK_LIST:
4956 case SCTP_SUPPORTED_CHUNK_EXT:
4957 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4958 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4963 case SCTP_SUPPORTED_ADDRTYPE:
4964 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4965 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4971 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4972 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4977 case SCTP_SET_PRIM_ADDR:
4978 case SCTP_DEL_IP_ADDRESS:
4979 case SCTP_ADD_IP_ADDRESS:
4980 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4981 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4982 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4987 /* Param's with a fixed size */
4988 case SCTP_IPV4_ADDRESS:
4989 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4990 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4995 case SCTP_IPV6_ADDRESS:
4996 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4997 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5002 case SCTP_COOKIE_PRESERVE:
5003 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5004 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5009 case SCTP_HAS_NAT_SUPPORT:
5012 case SCTP_PRSCTP_SUPPORTED:
5014 if (padded_size != sizeof(struct sctp_paramhdr)) {
5015 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5020 case SCTP_ECN_CAPABLE:
5021 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
5022 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5027 case SCTP_ULP_ADAPTATION:
5028 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5029 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5034 case SCTP_SUCCESS_REPORT:
5035 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5036 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5041 case SCTP_HOSTNAME_ADDRESS:
5043 /* We can NOT handle HOST NAME addresses!! */
5046 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5047 *abort_processing = 1;
5048 if (op_err == NULL) {
5049 /* Ok need to try to get a mbuf */
5051 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5053 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5056 l_len += sizeof(struct sctp_paramhdr);
5057 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5059 SCTP_BUF_LEN(op_err) = 0;
5061 * pre-reserve space for ip
5062 * and sctp header and
5066 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5068 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5070 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5071 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5075 /* If we have space */
5076 struct sctp_paramhdr s;
5079 uint32_t cpthis = 0;
5081 pad_needed = 4 - (err_at % 4);
5082 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5083 err_at += pad_needed;
5085 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5086 s.param_length = htons(sizeof(s) + plen);
5087 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5088 err_at += sizeof(s);
5089 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5091 sctp_m_freem(op_err);
5093 * we are out of memory but
5094 * we still need to have a
5095 * look at what to do (the
5096 * system is in trouble
5101 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5108 * we do not recognize the parameter figure out what
5111 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5112 if ((ptype & 0x4000) == 0x4000) {
5113 /* Report bit is set?? */
5114 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5115 if (op_err == NULL) {
5118 /* Ok need to try to get an mbuf */
5120 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5122 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5125 l_len += sizeof(struct sctp_paramhdr);
5126 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5128 SCTP_BUF_LEN(op_err) = 0;
5130 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5132 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5134 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5135 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5139 /* If we have space */
5140 struct sctp_paramhdr s;
5143 uint32_t cpthis = 0;
5145 pad_needed = 4 - (err_at % 4);
5146 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5147 err_at += pad_needed;
5149 s.param_type = htons(SCTP_UNRECOG_PARAM);
5150 s.param_length = htons(sizeof(s) + plen);
5151 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5152 err_at += sizeof(s);
5153 if (plen > sizeof(tempbuf)) {
5154 plen = sizeof(tempbuf);
5156 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5158 sctp_m_freem(op_err);
5160 * we are out of memory but
5161 * we still need to have a
5162 * look at what to do (the
5163 * system is in trouble
5167 goto more_processing;
5169 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5174 if ((ptype & 0x8000) == 0x0000) {
5175 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5178 /* skip this chunk and continue processing */
5179 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5180 at += SCTP_SIZE32(plen);
5185 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5189 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5190 *abort_processing = 1;
5191 if ((op_err == NULL) && phdr) {
5195 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5197 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5199 l_len += (2 * sizeof(struct sctp_paramhdr));
5200 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5202 SCTP_BUF_LEN(op_err) = 0;
5204 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5206 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5208 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5209 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5212 if ((op_err) && phdr) {
5213 struct sctp_paramhdr s;
5216 uint32_t cpthis = 0;
5218 pad_needed = 4 - (err_at % 4);
5219 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5220 err_at += pad_needed;
5222 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5223 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5224 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5225 err_at += sizeof(s);
5226 /* Only copy back the p-hdr that caused the issue */
5227 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5233 sctp_are_there_new_addresses(struct sctp_association *asoc,
5234 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5237 * Given a INIT packet, look through the packet to verify that there
5238 * are NO new addresses. As we go through the parameters add reports
5239 * of any un-understood parameters that require an error. Also we
5240 * must return (1) to drop the packet if we see a un-understood
5241 * parameter that tells us to drop the chunk.
5243 struct sockaddr *sa_touse;
5244 struct sockaddr *sa;
5245 struct sctp_paramhdr *phdr, params;
5246 uint16_t ptype, plen;
5248 struct sctp_nets *net;
5251 struct sockaddr_in sin4, *sa4;
5255 struct sockaddr_in6 sin6, *sa6;
5260 memset(&sin4, 0, sizeof(sin4));
5261 sin4.sin_family = AF_INET;
5262 sin4.sin_len = sizeof(sin4);
5265 memset(&sin6, 0, sizeof(sin6));
5266 sin6.sin6_family = AF_INET6;
5267 sin6.sin6_len = sizeof(sin6);
5269 /* First what about the src address of the pkt ? */
5271 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5272 sa = (struct sockaddr *)&net->ro._l_addr;
5273 if (sa->sa_family == src->sa_family) {
5275 if (sa->sa_family == AF_INET) {
5276 struct sockaddr_in *src4;
5278 sa4 = (struct sockaddr_in *)sa;
5279 src4 = (struct sockaddr_in *)src;
5280 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5287 if (sa->sa_family == AF_INET6) {
5288 struct sockaddr_in6 *src6;
5290 sa6 = (struct sockaddr_in6 *)sa;
5291 src6 = (struct sockaddr_in6 *)src;
5292 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5301 /* New address added! no need to look futher. */
5304 /* Ok so far lets munge through the rest of the packet */
5305 offset += sizeof(struct sctp_init_chunk);
5306 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5309 ptype = ntohs(phdr->param_type);
5310 plen = ntohs(phdr->param_length);
5313 case SCTP_IPV4_ADDRESS:
5315 struct sctp_ipv4addr_param *p4, p4_buf;
5317 phdr = sctp_get_next_param(in_initpkt, offset,
5318 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5319 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5323 p4 = (struct sctp_ipv4addr_param *)phdr;
5324 sin4.sin_addr.s_addr = p4->addr;
5325 sa_touse = (struct sockaddr *)&sin4;
5330 case SCTP_IPV6_ADDRESS:
5332 struct sctp_ipv6addr_param *p6, p6_buf;
5334 phdr = sctp_get_next_param(in_initpkt, offset,
5335 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5336 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5340 p6 = (struct sctp_ipv6addr_param *)phdr;
5341 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5343 sa_touse = (struct sockaddr *)&sin6;
5352 /* ok, sa_touse points to one to check */
5354 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5355 sa = (struct sockaddr *)&net->ro._l_addr;
5356 if (sa->sa_family != sa_touse->sa_family) {
5360 if (sa->sa_family == AF_INET) {
5361 sa4 = (struct sockaddr_in *)sa;
5362 if (sa4->sin_addr.s_addr ==
5363 sin4.sin_addr.s_addr) {
5370 if (sa->sa_family == AF_INET6) {
5371 sa6 = (struct sockaddr_in6 *)sa;
5372 if (SCTP6_ARE_ADDR_EQUAL(
5381 /* New addr added! no need to look further */
5385 offset += SCTP_SIZE32(plen);
5386 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5392 * Given a MBUF chain that was sent into us containing an INIT. Build a
5393 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5394 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5395 * message (i.e. the struct sctp_init_msg).
5398 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5399 struct mbuf *init_pkt, int iphlen, int offset,
5400 struct sockaddr *src, struct sockaddr *dst,
5401 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5402 uint8_t use_mflowid, uint32_t mflowid,
5403 uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5405 struct sctp_association *asoc;
5406 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5407 struct sctp_init_ack_chunk *initack;
5408 struct sctp_adaptation_layer_indication *ali;
5409 struct sctp_ecn_supported_param *ecn;
5410 struct sctp_prsctp_supported_param *prsctp;
5411 struct sctp_supported_chunk_types_param *pr_supported;
5412 union sctp_sockstore *over_addr;
5415 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5416 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5417 struct sockaddr_in *sin;
5421 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5422 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5423 struct sockaddr_in6 *sin6;
5426 struct sockaddr *to;
5427 struct sctp_state_cookie stc;
5428 struct sctp_nets *net = NULL;
5429 uint8_t *signature = NULL;
5430 int cnt_inits_to = 0;
5431 uint16_t his_limit, i_want;
5432 int abort_flag, padval;
5435 int nat_friendly = 0;
5444 if ((asoc != NULL) &&
5445 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5446 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) {
5447 /* new addresses, out of here in non-cookie-wait states */
5449 * Send a ABORT, we don't add the new address error clause
5450 * though we even set the T bit and copy in the 0 tag.. this
5451 * looks no different than if no listener was present.
5453 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, NULL,
5454 use_mflowid, mflowid,
5459 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5460 (offset + sizeof(struct sctp_init_chunk)),
5461 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5464 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5465 init_chk->init.initiate_tag, op_err,
5466 use_mflowid, mflowid,
5470 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5472 /* No memory, INIT timer will re-attempt. */
5474 sctp_m_freem(op_err);
5477 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5479 /* the time I built cookie */
5480 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5482 /* populate any tie tags */
5484 /* unlock before tag selections */
5485 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5486 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5487 stc.cookie_life = asoc->cookie_life;
5488 net = asoc->primary_destination;
5490 stc.tie_tag_my_vtag = 0;
5491 stc.tie_tag_peer_vtag = 0;
5492 /* life I will award this cookie */
5493 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5496 /* copy in the ports for later check */
5497 stc.myport = sh->dest_port;
5498 stc.peerport = sh->src_port;
5501 * If we wanted to honor cookie life extentions, we would add to
5502 * stc.cookie_life. For now we should NOT honor any extension
5504 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5505 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5506 struct inpcb *in_inp;
5508 /* Its a V6 socket */
5509 in_inp = (struct inpcb *)inp;
5510 stc.ipv6_addr_legal = 1;
5511 /* Now look at the binding flag to see if V4 will be legal */
5512 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5513 stc.ipv4_addr_legal = 1;
5515 /* V4 addresses are NOT legal on the association */
5516 stc.ipv4_addr_legal = 0;
5519 /* Its a V4 socket, no - V6 */
5520 stc.ipv4_addr_legal = 1;
5521 stc.ipv6_addr_legal = 0;
5524 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5531 switch (dst->sa_family) {
5535 /* lookup address */
5536 stc.address[0] = src4->sin_addr.s_addr;
5540 stc.addr_type = SCTP_IPV4_ADDRESS;
5541 /* local from address */
5542 stc.laddress[0] = dst4->sin_addr.s_addr;
5543 stc.laddress[1] = 0;
5544 stc.laddress[2] = 0;
5545 stc.laddress[3] = 0;
5546 stc.laddr_type = SCTP_IPV4_ADDRESS;
5547 /* scope_id is only for v6 */
5549 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5550 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) {
5555 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5556 /* Must use the address in this case */
5557 if (sctp_is_address_on_local_host(src, vrf_id)) {
5558 stc.loopback_scope = 1;
5561 stc.local_scope = 0;
5569 stc.addr_type = SCTP_IPV6_ADDRESS;
5570 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5571 stc.scope_id = in6_getscope(&src6->sin6_addr);
5572 if (sctp_is_address_on_local_host(src, vrf_id)) {
5573 stc.loopback_scope = 1;
5574 stc.local_scope = 0;
5577 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) {
5579 * If the new destination is a
5580 * LINK_LOCAL we must have common
5581 * both site and local scope. Don't
5582 * set local scope though since we
5583 * must depend on the source to be
5584 * added implicitly. We cannot
5585 * assure just because we share one
5586 * link that all links are common.
5588 stc.local_scope = 0;
5592 * we start counting for the private
5593 * address stuff at 1. since the
5594 * link local we source from won't
5595 * show up in our scoped count.
5599 * pull out the scope_id from
5602 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) {
5604 * If the new destination is
5605 * SITE_LOCAL then we must have site
5610 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5611 stc.laddr_type = SCTP_IPV6_ADDRESS;
5621 /* set the scope per the existing tcb */
5624 struct sctp_nets *lnet;
5628 stc.loopback_scope = asoc->loopback_scope;
5629 stc.ipv4_scope = asoc->ipv4_local_scope;
5630 stc.site_scope = asoc->site_scope;
5631 stc.local_scope = asoc->local_scope;
5633 /* Why do we not consider IPv4 LL addresses? */
5634 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5635 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5636 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5638 * if we have a LL address, start
5646 /* use the net pointer */
5647 to = (struct sockaddr *)&net->ro._l_addr;
5648 switch (to->sa_family) {
5651 sin = (struct sockaddr_in *)to;
5652 stc.address[0] = sin->sin_addr.s_addr;
5656 stc.addr_type = SCTP_IPV4_ADDRESS;
5657 if (net->src_addr_selected == 0) {
5659 * strange case here, the INIT should have
5660 * did the selection.
5662 net->ro._s_addr = sctp_source_address_selection(inp,
5663 stcb, (sctp_route_t *) & net->ro,
5665 if (net->ro._s_addr == NULL)
5668 net->src_addr_selected = 1;
5671 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5672 stc.laddress[1] = 0;
5673 stc.laddress[2] = 0;
5674 stc.laddress[3] = 0;
5675 stc.laddr_type = SCTP_IPV4_ADDRESS;
5676 /* scope_id is only for v6 */
5682 sin6 = (struct sockaddr_in6 *)to;
5683 memcpy(&stc.address, &sin6->sin6_addr,
5684 sizeof(struct in6_addr));
5685 stc.addr_type = SCTP_IPV6_ADDRESS;
5686 stc.scope_id = sin6->sin6_scope_id;
5687 if (net->src_addr_selected == 0) {
5689 * strange case here, the INIT should have
5690 * done the selection.
5692 net->ro._s_addr = sctp_source_address_selection(inp,
5693 stcb, (sctp_route_t *) & net->ro,
5695 if (net->ro._s_addr == NULL)
5698 net->src_addr_selected = 1;
5700 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5701 sizeof(struct in6_addr));
5702 stc.laddr_type = SCTP_IPV6_ADDRESS;
5707 /* Now lets put the SCTP header in place */
5708 initack = mtod(m, struct sctp_init_ack_chunk *);
5709 /* Save it off for quick ref */
5710 stc.peers_vtag = init_chk->init.initiate_tag;
5712 memcpy(stc.identification, SCTP_VERSION_STRING,
5713 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5714 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5715 /* now the chunk header */
5716 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5717 initack->ch.chunk_flags = 0;
5718 /* fill in later from mbuf we build */
5719 initack->ch.chunk_length = 0;
5720 /* place in my tag */
5721 if ((asoc != NULL) &&
5722 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5723 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5724 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5725 /* re-use the v-tags and init-seq here */
5726 initack->init.initiate_tag = htonl(asoc->my_vtag);
5727 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5729 uint32_t vtag, itsn;
5731 if (hold_inp_lock) {
5732 SCTP_INP_INCR_REF(inp);
5733 SCTP_INP_RUNLOCK(inp);
5736 atomic_add_int(&asoc->refcnt, 1);
5737 SCTP_TCB_UNLOCK(stcb);
5739 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5740 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5742 * Got a duplicate vtag on some guy behind a
5743 * nat make sure we don't use it.
5747 initack->init.initiate_tag = htonl(vtag);
5748 /* get a TSN to use too */
5749 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5750 initack->init.initial_tsn = htonl(itsn);
5751 SCTP_TCB_LOCK(stcb);
5752 atomic_add_int(&asoc->refcnt, -1);
5754 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5755 initack->init.initiate_tag = htonl(vtag);
5756 /* get a TSN to use too */
5757 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5759 if (hold_inp_lock) {
5760 SCTP_INP_RLOCK(inp);
5761 SCTP_INP_DECR_REF(inp);
5764 /* save away my tag to */
5765 stc.my_vtag = initack->init.initiate_tag;
5767 /* set up some of the credits. */
5768 so = inp->sctp_socket;
5770 /* memory problem */
5774 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5776 /* set what I want */
5777 his_limit = ntohs(init_chk->init.num_inbound_streams);
5778 /* choose what I want */
5780 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5781 i_want = asoc->streamoutcnt;
5783 i_want = inp->sctp_ep.pre_open_stream_count;
5786 i_want = inp->sctp_ep.pre_open_stream_count;
5788 if (his_limit < i_want) {
5789 /* I Want more :< */
5790 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5792 /* I can have what I want :> */
5793 initack->init.num_outbound_streams = htons(i_want);
5795 /* tell him his limit. */
5796 initack->init.num_inbound_streams =
5797 htons(inp->sctp_ep.max_open_streams_intome);
5799 /* adaptation layer indication parameter */
5800 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5801 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5802 ali->ph.param_length = htons(sizeof(*ali));
5803 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5804 SCTP_BUF_LEN(m) += sizeof(*ali);
5805 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5808 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5809 (inp->sctp_ecn_enable == 1)) {
5810 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5811 ecn->ph.param_length = htons(sizeof(*ecn));
5812 SCTP_BUF_LEN(m) += sizeof(*ecn);
5814 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5817 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5819 /* And now tell the peer we do pr-sctp */
5820 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5821 prsctp->ph.param_length = htons(sizeof(*prsctp));
5822 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5824 /* Add NAT friendly parameter */
5825 struct sctp_paramhdr *ph;
5827 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5828 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5829 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5830 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5832 /* And now tell the peer we do all the extensions */
5833 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5834 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5836 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5837 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5838 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5839 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5840 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5841 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5842 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5843 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5844 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5845 p_len = sizeof(*pr_supported) + num_ext;
5846 pr_supported->ph.param_length = htons(p_len);
5847 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5848 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5850 /* add authentication parameters */
5851 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5852 struct sctp_auth_random *randp;
5853 struct sctp_auth_hmac_algo *hmacs;
5854 struct sctp_auth_chunk_list *chunks;
5855 uint16_t random_len;
5857 /* generate and add RANDOM parameter */
5858 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5859 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5860 randp->ph.param_type = htons(SCTP_RANDOM);
5861 p_len = sizeof(*randp) + random_len;
5862 randp->ph.param_length = htons(p_len);
5863 SCTP_READ_RANDOM(randp->random_data, random_len);
5864 /* zero out any padding required */
5865 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5866 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5868 /* add HMAC_ALGO parameter */
5869 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5870 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5871 (uint8_t *) hmacs->hmac_ids);
5873 p_len += sizeof(*hmacs);
5874 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5875 hmacs->ph.param_length = htons(p_len);
5876 /* zero out any padding required */
5877 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5878 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5880 /* add CHUNKS parameter */
5881 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5882 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5883 chunks->chunk_types);
5885 p_len += sizeof(*chunks);
5886 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5887 chunks->ph.param_length = htons(p_len);
5888 /* zero out any padding required */
5889 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5890 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5894 /* now the addresses */
5896 struct sctp_scoping scp;
5899 * To optimize this we could put the scoping stuff into a
5900 * structure and remove the individual uint8's from the stc
5901 * structure. Then we could just sifa in the address within
5902 * the stc.. but for now this is a quick hack to get the
5903 * address stuff teased apart.
5905 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5906 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5907 scp.loopback_scope = stc.loopback_scope;
5908 scp.ipv4_local_scope = stc.ipv4_scope;
5909 scp.local_scope = stc.local_scope;
5910 scp.site_scope = stc.site_scope;
5911 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL);
5914 /* tack on the operational error if present */
5923 llen += SCTP_BUF_LEN(ol);
5924 ol = SCTP_BUF_NEXT(ol);
5927 /* must add a pad to the param */
5928 uint32_t cpthis = 0;
5931 padlen = 4 - (llen % 4);
5932 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5934 while (SCTP_BUF_NEXT(m_at) != NULL) {
5935 m_at = SCTP_BUF_NEXT(m_at);
5937 SCTP_BUF_NEXT(m_at) = op_err;
5938 while (SCTP_BUF_NEXT(m_at) != NULL) {
5939 m_at = SCTP_BUF_NEXT(m_at);
5942 /* pre-calulate the size and update pkt header and chunk header */
5944 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5945 p_len += SCTP_BUF_LEN(m_tmp);
5946 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5947 /* m_tmp should now point to last one */
5952 /* Now we must build a cookie */
5953 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
5954 if (m_cookie == NULL) {
5955 /* memory problem */
5959 /* Now append the cookie to the end and update the space/size */
5960 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5962 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5963 p_len += SCTP_BUF_LEN(m_tmp);
5964 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5965 /* m_tmp should now point to last one */
5971 * Place in the size, but we don't include the last pad (if any) in
5974 initack->ch.chunk_length = htons(p_len);
5977 * Time to sign the cookie, we don't sign over the cookie signature
5978 * though thus we set trailer.
5980 (void)sctp_hmac_m(SCTP_HMAC,
5981 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5982 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5983 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5985 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5986 * here since the timer will drive a retranmission.
5989 if ((padval) && (mp_last)) {
5990 /* see my previous comments on mp_last */
5991 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
5992 /* Houston we have a problem, no space */
5997 if (stc.loopback_scope) {
5998 over_addr = (union sctp_sockstore *)dst;
6003 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6005 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6007 use_mflowid, mflowid,
6008 SCTP_SO_NOT_LOCKED);
6009 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6014 sctp_prune_prsctp(struct sctp_tcb *stcb,
6015 struct sctp_association *asoc,
6016 struct sctp_sndrcvinfo *srcv,
6020 struct sctp_tmit_chunk *chk, *nchk;
6022 SCTP_TCB_LOCK_ASSERT(stcb);
6023 if ((asoc->peer_supports_prsctp) &&
6024 (asoc->sent_queue_cnt_removeable > 0)) {
6025 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6027 * Look for chunks marked with the PR_SCTP flag AND
6028 * the buffer space flag. If the one being sent is
6029 * equal or greater priority then purge the old one
6030 * and free some space.
6032 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6034 * This one is PR-SCTP AND buffer space
6037 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6039 * Lower numbers equates to higher
6040 * priority so if the one we are
6041 * looking at has a larger or equal
6042 * priority we want to drop the data
6043 * and NOT retransmit it.
6047 * We release the book_size
6048 * if the mbuf is here
6053 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6057 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6060 freed_spc += ret_spc;
6061 if (freed_spc >= dataout) {
6064 } /* if chunk was present */
6065 } /* if of sufficent priority */
6066 } /* if chunk has enabled */
6067 } /* tailqforeach */
6069 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6070 /* Here we must move to the sent queue and mark */
6071 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6072 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6075 * We release the book_size
6076 * if the mbuf is here
6080 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6083 freed_spc += ret_spc;
6084 if (freed_spc >= dataout) {
6087 } /* end if chk->data */
6088 } /* end if right class */
6089 } /* end if chk pr-sctp */
6090 } /* tailqforeachsafe (chk) */
6091 } /* if enabled in asoc */
6095 sctp_get_frag_point(struct sctp_tcb *stcb,
6096 struct sctp_association *asoc)
6101 * For endpoints that have both v6 and v4 addresses we must reserve
6102 * room for the ipv6 header, for those that are only dealing with V4
6103 * we use a larger frag point.
6105 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6106 ovh = SCTP_MED_OVERHEAD;
6108 ovh = SCTP_MED_V4_OVERHEAD;
6111 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6112 siz = asoc->smallest_mtu - ovh;
6114 siz = (stcb->asoc.sctp_frag_point - ovh);
6116 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6118 /* A data chunk MUST fit in a cluster */
6119 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6122 /* adjust for an AUTH chunk if DATA requires auth */
6123 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6124 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6127 /* make it an even word boundary please */
6134 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6138 * We assume that the user wants PR_SCTP_TTL if the user provides a
6139 * positive lifetime but does not specify any PR_SCTP policy. This
6140 * is a BAD assumption and causes problems at least with the
6141 * U-Vancovers MPI folks. I will change this to be no policy means
6144 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6145 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6150 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6151 case CHUNK_FLAGS_PR_SCTP_BUF:
6153 * Time to live is a priority stored in tv_sec when doing
6154 * the buffer drop thing.
6156 sp->ts.tv_sec = sp->timetolive;
6159 case CHUNK_FLAGS_PR_SCTP_TTL:
6163 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6164 tv.tv_sec = sp->timetolive / 1000;
6165 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6167 * TODO sctp_constants.h needs alternative time
6168 * macros when _KERNEL is undefined.
6170 timevaladd(&sp->ts, &tv);
6173 case CHUNK_FLAGS_PR_SCTP_RTX:
6175 * Time to live is a the number or retransmissions stored in
6178 sp->ts.tv_sec = sp->timetolive;
6182 SCTPDBG(SCTP_DEBUG_USRREQ1,
6183 "Unknown PR_SCTP policy %u.\n",
6184 PR_SCTP_POLICY(sp->sinfo_flags));
6190 sctp_msg_append(struct sctp_tcb *stcb,
6191 struct sctp_nets *net,
6193 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6197 struct sctp_stream_queue_pending *sp = NULL;
6198 struct sctp_stream_out *strm;
6201 * Given an mbuf chain, put it into the association send queue and
6202 * place it on the wheel
6204 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6205 /* Invalid stream number */
6206 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6210 if ((stcb->asoc.stream_locked) &&
6211 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6212 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6216 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6217 /* Now can we send this? */
6218 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6219 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6220 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6221 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6222 /* got data while shutting down */
6223 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6227 sctp_alloc_a_strmoq(stcb, sp);
6229 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6233 sp->sinfo_flags = srcv->sinfo_flags;
6234 sp->timetolive = srcv->sinfo_timetolive;
6235 sp->ppid = srcv->sinfo_ppid;
6236 sp->context = srcv->sinfo_context;
6237 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6239 atomic_add_int(&sp->net->ref_count, 1);
6243 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6244 sp->stream = srcv->sinfo_stream;
6245 sp->msg_is_complete = 1;
6246 sp->sender_all_done = 1;
6249 sp->tail_mbuf = NULL;
6250 sctp_set_prsctp_policy(sp);
6252 * We could in theory (for sendall) sifa the length in, but we would
6253 * still have to hunt through the chain since we need to setup the
6257 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6258 if (SCTP_BUF_NEXT(at) == NULL)
6260 sp->length += SCTP_BUF_LEN(at);
6262 if (srcv->sinfo_keynumber_valid) {
6263 sp->auth_keyid = srcv->sinfo_keynumber;
6265 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6267 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6268 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6269 sp->holds_key_ref = 1;
6271 if (hold_stcb_lock == 0) {
6272 SCTP_TCB_SEND_LOCK(stcb);
6274 sctp_snd_sb_alloc(stcb, sp->length);
6275 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6276 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6277 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6279 if (hold_stcb_lock == 0) {
6280 SCTP_TCB_SEND_UNLOCK(stcb);
6290 static struct mbuf *
6291 sctp_copy_mbufchain(struct mbuf *clonechain,
6292 struct mbuf *outchain,
6293 struct mbuf **endofchain,
6296 uint8_t copy_by_ref)
6299 struct mbuf *appendchain;
6303 if (endofchain == NULL) {
6307 sctp_m_freem(outchain);
6310 if (can_take_mbuf) {
6311 appendchain = clonechain;
6314 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6316 /* Its not in a cluster */
6317 if (*endofchain == NULL) {
6318 /* lets get a mbuf cluster */
6319 if (outchain == NULL) {
6320 /* This is the general case */
6322 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6323 if (outchain == NULL) {
6326 SCTP_BUF_LEN(outchain) = 0;
6327 *endofchain = outchain;
6328 /* get the prepend space */
6329 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6332 * We really should not get a NULL
6338 if (SCTP_BUF_NEXT(m) == NULL) {
6342 m = SCTP_BUF_NEXT(m);
6345 if (*endofchain == NULL) {
6347 * huh, TSNH XXX maybe we
6350 sctp_m_freem(outchain);
6354 /* get the new end of length */
6355 len = M_TRAILINGSPACE(*endofchain);
6357 /* how much is left at the end? */
6358 len = M_TRAILINGSPACE(*endofchain);
6360 /* Find the end of the data, for appending */
6361 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6363 /* Now lets copy it out */
6364 if (len >= sizeofcpy) {
6365 /* It all fits, copy it in */
6366 m_copydata(clonechain, 0, sizeofcpy, cp);
6367 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6369 /* fill up the end of the chain */
6371 m_copydata(clonechain, 0, len, cp);
6372 SCTP_BUF_LEN((*endofchain)) += len;
6373 /* now we need another one */
6376 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6381 SCTP_BUF_NEXT((*endofchain)) = m;
6383 cp = mtod((*endofchain), caddr_t);
6384 m_copydata(clonechain, len, sizeofcpy, cp);
6385 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6389 /* copy the old fashion way */
6390 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6391 #ifdef SCTP_MBUF_LOGGING
6392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6395 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) {
6396 if (SCTP_BUF_IS_EXTENDED(mat)) {
6397 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6404 if (appendchain == NULL) {
6407 sctp_m_freem(outchain);
6411 /* tack on to the end */
6412 if (*endofchain != NULL) {
6413 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6417 if (SCTP_BUF_NEXT(m) == NULL) {
6418 SCTP_BUF_NEXT(m) = appendchain;
6421 m = SCTP_BUF_NEXT(m);
6425 * save off the end and update the end-chain postion
6429 if (SCTP_BUF_NEXT(m) == NULL) {
6433 m = SCTP_BUF_NEXT(m);
6437 /* save off the end and update the end-chain postion */
6440 if (SCTP_BUF_NEXT(m) == NULL) {
6444 m = SCTP_BUF_NEXT(m);
6446 return (appendchain);
6451 sctp_med_chunk_output(struct sctp_inpcb *inp,
6452 struct sctp_tcb *stcb,
6453 struct sctp_association *asoc,
6456 int control_only, int from_where,
6457 struct timeval *now, int *now_filled, int frag_point, int so_locked
6458 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6464 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6465 uint32_t val SCTP_UNUSED)
6467 struct sctp_copy_all *ca;
6470 int added_control = 0;
6471 int un_sent, do_chunk_output = 1;
6472 struct sctp_association *asoc;
6473 struct sctp_nets *net;
6475 ca = (struct sctp_copy_all *)ptr;
6476 if (ca->m == NULL) {
6479 if (ca->inp != inp) {
6483 if ((ca->m) && ca->sndlen) {
6484 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6486 /* can't copy so we are done */
6490 #ifdef SCTP_MBUF_LOGGING
6491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6494 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6495 if (SCTP_BUF_IS_EXTENDED(mat)) {
6496 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6504 SCTP_TCB_LOCK_ASSERT(stcb);
6505 if (stcb->asoc.alternate) {
6506 net = stcb->asoc.alternate;
6508 net = stcb->asoc.primary_destination;
6510 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6511 /* Abort this assoc with m as the user defined reason */
6513 struct sctp_paramhdr *ph;
6515 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6517 ph = mtod(m, struct sctp_paramhdr *);
6518 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6519 ph->param_length = htons(ca->sndlen);
6522 * We add one here to keep the assoc from
6523 * dis-appearing on us.
6525 atomic_add_int(&stcb->asoc.refcnt, 1);
6526 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6528 * sctp_abort_an_association calls sctp_free_asoc()
6529 * free association will NOT free it since we
6530 * incremented the refcnt .. we do this to prevent
6531 * it being freed and things getting tricky since we
6532 * could end up (from free_asoc) calling inpcb_free
6533 * which would get a recursive lock call to the
6534 * iterator lock.. But as a consequence of that the
6535 * stcb will return to us un-locked.. since
6536 * free_asoc returns with either no TCB or the TCB
6537 * unlocked, we must relock.. to unlock in the
6538 * iterator timer :-0
6540 SCTP_TCB_LOCK(stcb);
6541 atomic_add_int(&stcb->asoc.refcnt, -1);
6542 goto no_chunk_output;
6546 ret = sctp_msg_append(stcb, net, m,
6550 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6551 /* shutdown this assoc */
6554 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6556 if (TAILQ_EMPTY(&asoc->send_queue) &&
6557 TAILQ_EMPTY(&asoc->sent_queue) &&
6559 if (asoc->locked_on_sending) {
6563 * there is nothing queued to send, so I'm
6566 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6567 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6568 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6570 * only send SHUTDOWN the first time
6573 sctp_send_shutdown(stcb, net);
6574 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6575 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6577 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6578 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6579 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6581 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6582 asoc->primary_destination);
6584 do_chunk_output = 0;
6588 * we still got (or just got) data to send,
6589 * so set SHUTDOWN_PENDING
6592 * XXX sockets draft says that SCTP_EOF
6593 * should be sent with no data. currently,
6594 * we will allow user data to be sent first
6595 * and move to SHUTDOWN-PENDING
6597 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6598 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6599 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6600 if (asoc->locked_on_sending) {
6602 * Locked to send out the
6605 struct sctp_stream_queue_pending *sp;
6607 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6609 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6610 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6613 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6614 if (TAILQ_EMPTY(&asoc->send_queue) &&
6615 TAILQ_EMPTY(&asoc->sent_queue) &&
6616 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6618 atomic_add_int(&stcb->asoc.refcnt, 1);
6619 sctp_abort_an_association(stcb->sctp_ep, stcb,
6620 NULL, SCTP_SO_NOT_LOCKED);
6621 atomic_add_int(&stcb->asoc.refcnt, -1);
6622 goto no_chunk_output;
6624 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6625 asoc->primary_destination);
6631 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6632 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6634 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6635 (stcb->asoc.total_flight > 0) &&
6636 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6638 do_chunk_output = 0;
6640 if (do_chunk_output)
6641 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6642 else if (added_control) {
6643 int num_out = 0, reason = 0, now_filled = 0;
6647 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6648 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6649 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6660 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6662 struct sctp_copy_all *ca;
6664 ca = (struct sctp_copy_all *)ptr;
6666 * Do a notify here? Kacheong suggests that the notify be done at
6667 * the send time.. so you would push up a notification if any send
6668 * failed. Don't know if this is feasable since the only failures we
6669 * have is "memory" related and if you cannot get an mbuf to send
6670 * the data you surely can't get an mbuf to send up to notify the
6671 * user you can't send the data :->
6674 /* now free everything */
6675 sctp_m_freem(ca->m);
6676 SCTP_FREE(ca, SCTP_M_COPYAL);
6680 #define MC_ALIGN(m, len) do { \
6681 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6686 static struct mbuf *
6687 sctp_copy_out_all(struct uio *uio, int len)
6689 struct mbuf *ret, *at;
6690 int left, willcpy, cancpy, error;
6692 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6698 SCTP_BUF_LEN(ret) = 0;
6699 /* save space for the data chunk header */
6700 cancpy = M_TRAILINGSPACE(ret);
6701 willcpy = min(cancpy, left);
6704 /* Align data to the end */
6705 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6711 SCTP_BUF_LEN(at) = willcpy;
6712 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6715 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6716 if (SCTP_BUF_NEXT(at) == NULL) {
6719 at = SCTP_BUF_NEXT(at);
6720 SCTP_BUF_LEN(at) = 0;
6721 cancpy = M_TRAILINGSPACE(at);
6722 willcpy = min(cancpy, left);
6729 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6730 struct sctp_sndrcvinfo *srcv)
6733 struct sctp_copy_all *ca;
6735 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6739 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6742 memset(ca, 0, sizeof(struct sctp_copy_all));
6746 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6749 * take off the sendall flag, it would be bad if we failed to do
6752 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6753 /* get length and mbuf chain */
6755 ca->sndlen = uio->uio_resid;
6756 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6757 if (ca->m == NULL) {
6758 SCTP_FREE(ca, SCTP_M_COPYAL);
6759 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6763 /* Gather the length of the send */
6769 ca->sndlen += SCTP_BUF_LEN(m);
6770 m = SCTP_BUF_NEXT(m);
6774 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6775 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6776 SCTP_ASOC_ANY_STATE,
6778 sctp_sendall_completes, inp, 1);
6780 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6781 SCTP_FREE(ca, SCTP_M_COPYAL);
6782 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6790 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6792 struct sctp_tmit_chunk *chk, *nchk;
6794 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6795 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6796 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6798 sctp_m_freem(chk->data);
6801 asoc->ctrl_queue_cnt--;
6802 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6808 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6810 struct sctp_association *asoc;
6811 struct sctp_tmit_chunk *chk, *nchk;
6812 struct sctp_asconf_chunk *acp;
6815 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6816 /* find SCTP_ASCONF chunk in queue */
6817 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6819 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6820 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6825 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6827 sctp_m_freem(chk->data);
6830 asoc->ctrl_queue_cnt--;
6831 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6838 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6839 struct sctp_association *asoc,
6840 struct sctp_tmit_chunk **data_list,
6842 struct sctp_nets *net)
6845 struct sctp_tmit_chunk *tp1;
6847 for (i = 0; i < bundle_at; i++) {
6848 /* off of the send queue */
6849 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6850 asoc->send_queue_cnt--;
6853 * Any chunk NOT 0 you zap the time chunk 0 gets
6854 * zapped or set based on if a RTO measurment is
6857 data_list[i]->do_rtt = 0;
6860 data_list[i]->sent_rcv_time = net->last_sent_time;
6861 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6862 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6863 if (data_list[i]->whoTo == NULL) {
6864 data_list[i]->whoTo = net;
6865 atomic_add_int(&net->ref_count, 1);
6867 /* on to the sent queue */
6868 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6869 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6870 struct sctp_tmit_chunk *tpp;
6872 /* need to move back */
6874 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6876 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6880 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6883 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6885 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6890 /* This does not lower until the cum-ack passes it */
6891 asoc->sent_queue_cnt++;
6892 if ((asoc->peers_rwnd <= 0) &&
6893 (asoc->total_flight == 0) &&
6895 /* Mark the chunk as being a window probe */
6896 SCTP_STAT_INCR(sctps_windowprobed);
6898 #ifdef SCTP_AUDITING_ENABLED
6899 sctp_audit_log(0xC2, 3);
6901 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6902 data_list[i]->snd_count = 1;
6903 data_list[i]->rec.data.chunk_was_revoked = 0;
6904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6905 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6906 data_list[i]->whoTo->flight_size,
6907 data_list[i]->book_size,
6908 (uintptr_t) data_list[i]->whoTo,
6909 data_list[i]->rec.data.TSN_seq);
6911 sctp_flight_size_increase(data_list[i]);
6912 sctp_total_flight_increase(stcb, data_list[i]);
6913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6914 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6915 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6917 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6918 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6919 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6920 /* SWS sender side engages */
6921 asoc->peers_rwnd = 0;
6924 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6925 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6930 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6931 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6936 struct sctp_tmit_chunk *chk, *nchk;
6938 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6939 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6940 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6941 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6942 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6943 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6944 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6945 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6946 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6947 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6948 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6949 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6950 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6951 /* Stray chunks must be cleaned up */
6953 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6955 sctp_m_freem(chk->data);
6958 asoc->ctrl_queue_cnt--;
6959 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
6960 asoc->fwd_tsn_cnt--;
6961 sctp_free_a_chunk(stcb, chk, so_locked);
6962 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6963 /* special handling, we must look into the param */
6964 if (chk != asoc->str_reset) {
6965 goto clean_up_anyway;
6973 sctp_can_we_split_this(struct sctp_tcb *stcb,
6975 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6978 * Make a decision on if I should split a msg into multiple parts.
6979 * This is only asked of incomplete messages.
6983 * If we are doing EEOR we need to always send it if its the
6984 * entire thing, since it might be all the guy is putting in
6987 if (goal_mtu >= length) {
6989 * If we have data outstanding,
6990 * we get another chance when the sack
6991 * arrives to transmit - wait for more data
6993 if (stcb->asoc.total_flight == 0) {
6995 * If nothing is in flight, we zero the
7003 /* You can fill the rest */
7008 * For those strange folk that make the send buffer
7009 * smaller than our fragmentation point, we can't
7010 * get a full msg in so we have to allow splitting.
7012 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7015 if ((length <= goal_mtu) ||
7016 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7017 /* Sub-optimial residual don't split in non-eeor mode. */
7021 * If we reach here length is larger than the goal_mtu. Do we wish
7022 * to split it for the sake of packet putting together?
7024 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7025 /* Its ok to split it */
7026 return (min(goal_mtu, frag_point));
7028 /* Nope, can't split */
7034 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7035 struct sctp_stream_out *strq,
7037 uint32_t frag_point,
7043 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7048 /* Move from the stream to the send_queue keeping track of the total */
7049 struct sctp_association *asoc;
7050 struct sctp_stream_queue_pending *sp;
7051 struct sctp_tmit_chunk *chk;
7052 struct sctp_data_chunk *dchkh;
7053 uint32_t to_move, length;
7054 uint8_t rcv_flags = 0;
7056 uint8_t send_lock_up = 0;
7058 SCTP_TCB_LOCK_ASSERT(stcb);
7061 /* sa_ignore FREED_MEMORY */
7062 sp = TAILQ_FIRST(&strq->outqueue);
7065 if (send_lock_up == 0) {
7066 SCTP_TCB_SEND_LOCK(stcb);
7069 sp = TAILQ_FIRST(&strq->outqueue);
7073 if (strq->last_msg_incomplete) {
7074 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7076 strq->last_msg_incomplete);
7077 strq->last_msg_incomplete = 0;
7081 SCTP_TCB_SEND_UNLOCK(stcb);
7086 if ((sp->msg_is_complete) && (sp->length == 0)) {
7087 if (sp->sender_all_done) {
7089 * We are doing differed cleanup. Last time through
7090 * when we took all the data the sender_all_done was
7093 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7094 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7095 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7096 sp->sender_all_done,
7098 sp->msg_is_complete,
7102 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7103 SCTP_TCB_SEND_LOCK(stcb);
7106 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7107 TAILQ_REMOVE(&strq->outqueue, sp, next);
7108 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7110 sctp_free_remote_addr(sp->net);
7114 sctp_m_freem(sp->data);
7117 sctp_free_a_strmoq(stcb, sp, so_locked);
7118 /* we can't be locked to it */
7120 stcb->asoc.locked_on_sending = NULL;
7122 SCTP_TCB_SEND_UNLOCK(stcb);
7125 /* back to get the next msg */
7129 * sender just finished this but still holds a
7138 /* is there some to get */
7139 if (sp->length == 0) {
7145 } else if (sp->discard_rest) {
7146 if (send_lock_up == 0) {
7147 SCTP_TCB_SEND_LOCK(stcb);
7150 /* Whack down the size */
7151 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7152 if ((stcb->sctp_socket != NULL) && \
7153 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7154 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7155 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7158 sctp_m_freem(sp->data);
7160 sp->tail_mbuf = NULL;
7170 some_taken = sp->some_taken;
7171 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7172 sp->msg_is_complete = 1;
7175 length = sp->length;
7176 if (sp->msg_is_complete) {
7177 /* The message is complete */
7178 to_move = min(length, frag_point);
7179 if (to_move == length) {
7180 /* All of it fits in the MTU */
7181 if (sp->some_taken) {
7182 rcv_flags |= SCTP_DATA_LAST_FRAG;
7183 sp->put_last_out = 1;
7185 rcv_flags |= SCTP_DATA_NOT_FRAG;
7186 sp->put_last_out = 1;
7189 /* Not all of it fits, we fragment */
7190 if (sp->some_taken == 0) {
7191 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7196 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7199 * We use a snapshot of length in case it
7200 * is expanding during the compare.
7205 if (to_move >= llen) {
7207 if (send_lock_up == 0) {
7209 * We are taking all of an incomplete msg
7210 * thus we need a send lock.
7212 SCTP_TCB_SEND_LOCK(stcb);
7214 if (sp->msg_is_complete) {
7216 * the sender finished the
7223 if (sp->some_taken == 0) {
7224 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7228 /* Nothing to take. */
7229 if (sp->some_taken) {
7238 /* If we reach here, we can copy out a chunk */
7239 sctp_alloc_a_chunk(stcb, chk);
7241 /* No chunk memory */
7247 * Setup for unordered if needed by looking at the user sent info
7250 if (sp->sinfo_flags & SCTP_UNORDERED) {
7251 rcv_flags |= SCTP_DATA_UNORDERED;
7253 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7254 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7255 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7257 /* clear out the chunk before setting up */
7258 memset(chk, 0, sizeof(*chk));
7259 chk->rec.data.rcv_flags = rcv_flags;
7261 if (to_move >= length) {
7262 /* we think we can steal the whole thing */
7263 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7264 SCTP_TCB_SEND_LOCK(stcb);
7267 if (to_move < sp->length) {
7268 /* bail, it changed */
7271 chk->data = sp->data;
7272 chk->last_mbuf = sp->tail_mbuf;
7273 /* register the stealing */
7274 sp->data = sp->tail_mbuf = NULL;
7279 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
7280 chk->last_mbuf = NULL;
7281 if (chk->data == NULL) {
7282 sp->some_taken = some_taken;
7283 sctp_free_a_chunk(stcb, chk, so_locked);
7288 #ifdef SCTP_MBUF_LOGGING
7289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7292 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) {
7293 if (SCTP_BUF_IS_EXTENDED(mat)) {
7294 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7299 /* Pull off the data */
7300 m_adj(sp->data, to_move);
7301 /* Now lets work our way down and compact it */
7303 while (m && (SCTP_BUF_LEN(m) == 0)) {
7304 sp->data = SCTP_BUF_NEXT(m);
7305 SCTP_BUF_NEXT(m) = NULL;
7306 if (sp->tail_mbuf == m) {
7308 * Freeing tail? TSNH since
7309 * we supposedly were taking less
7310 * than the sp->length.
7313 panic("Huh, freing tail? - TSNH");
7315 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7316 sp->tail_mbuf = sp->data = NULL;
7325 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7326 chk->copy_by_ref = 1;
7328 chk->copy_by_ref = 0;
7331 * get last_mbuf and counts of mb useage This is ugly but hopefully
7332 * its only one mbuf.
7334 if (chk->last_mbuf == NULL) {
7335 chk->last_mbuf = chk->data;
7336 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7337 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7340 if (to_move > length) {
7341 /*- This should not happen either
7342 * since we always lower to_move to the size
7343 * of sp->length if its larger.
7346 panic("Huh, how can to_move be larger?");
7348 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7352 atomic_subtract_int(&sp->length, to_move);
7354 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7355 /* Not enough room for a chunk header, get some */
7358 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7361 * we're in trouble here. _PREPEND below will free
7362 * all the data if there is no leading space, so we
7363 * must put the data back and restore.
7365 if (send_lock_up == 0) {
7366 SCTP_TCB_SEND_LOCK(stcb);
7369 if (chk->data == NULL) {
7370 /* unsteal the data */
7371 sp->data = chk->data;
7372 sp->tail_mbuf = chk->last_mbuf;
7376 /* reassemble the data */
7378 sp->data = chk->data;
7379 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7381 sp->some_taken = some_taken;
7382 atomic_add_int(&sp->length, to_move);
7385 sctp_free_a_chunk(stcb, chk, so_locked);
7389 SCTP_BUF_LEN(m) = 0;
7390 SCTP_BUF_NEXT(m) = chk->data;
7392 M_ALIGN(chk->data, 4);
7395 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7396 if (chk->data == NULL) {
7397 /* HELP, TSNH since we assured it would not above? */
7399 panic("prepend failes HELP?");
7401 SCTP_PRINTF("prepend fails HELP?\n");
7402 sctp_free_a_chunk(stcb, chk, so_locked);
7408 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7409 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7410 chk->book_size_scale = 0;
7411 chk->sent = SCTP_DATAGRAM_UNSENT;
7414 chk->asoc = &stcb->asoc;
7415 chk->pad_inplace = 0;
7416 chk->no_fr_allowed = 0;
7417 chk->rec.data.stream_seq = strq->next_sequence_send;
7418 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7419 strq->next_sequence_send++;
7421 chk->rec.data.stream_number = sp->stream;
7422 chk->rec.data.payloadtype = sp->ppid;
7423 chk->rec.data.context = sp->context;
7424 chk->rec.data.doing_fast_retransmit = 0;
7426 chk->rec.data.timetodrop = sp->ts;
7427 chk->flags = sp->act_flags;
7430 chk->whoTo = sp->net;
7431 atomic_add_int(&chk->whoTo->ref_count, 1);
7435 if (sp->holds_key_ref) {
7436 chk->auth_keyid = sp->auth_keyid;
7437 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7438 chk->holds_key_ref = 1;
7440 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7441 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7442 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7443 (uintptr_t) stcb, sp->length,
7444 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7445 chk->rec.data.TSN_seq);
7447 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7449 * Put the rest of the things in place now. Size was done earlier in
7450 * previous loop prior to padding.
7453 #ifdef SCTP_ASOCLOG_OF_TSNS
7454 SCTP_TCB_LOCK_ASSERT(stcb);
7455 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7456 asoc->tsn_out_at = 0;
7457 asoc->tsn_out_wrapped = 1;
7459 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7460 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7461 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7462 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7463 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7464 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7465 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7466 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7470 dchkh->ch.chunk_type = SCTP_DATA;
7471 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7472 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7473 dchkh->dp.stream_id = htons(strq->stream_no);
7474 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7475 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7476 dchkh->ch.chunk_length = htons(chk->send_size);
7477 /* Now advance the chk->send_size by the actual pad needed. */
7478 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7483 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7484 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7485 chk->pad_inplace = 1;
7487 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7488 /* pad added an mbuf */
7489 chk->last_mbuf = lm;
7491 chk->send_size += pads;
7493 /* We only re-set the policy if it is on */
7494 if (sp->pr_sctp_on) {
7495 sctp_set_prsctp_policy(sp);
7496 asoc->pr_sctp_cnt++;
7497 chk->pr_sctp_on = 1;
7499 chk->pr_sctp_on = 0;
7501 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7502 /* All done pull and kill the message */
7503 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7504 if (sp->put_last_out == 0) {
7505 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7506 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7507 sp->sender_all_done,
7509 sp->msg_is_complete,
7513 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7514 SCTP_TCB_SEND_LOCK(stcb);
7517 TAILQ_REMOVE(&strq->outqueue, sp, next);
7518 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7520 sctp_free_remote_addr(sp->net);
7524 sctp_m_freem(sp->data);
7527 sctp_free_a_strmoq(stcb, sp, so_locked);
7529 /* we can't be locked to it */
7531 stcb->asoc.locked_on_sending = NULL;
7533 /* more to go, we are locked */
7536 asoc->chunks_on_out_queue++;
7537 strq->chunks_on_queues++;
7538 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7539 asoc->send_queue_cnt++;
7542 SCTP_TCB_SEND_UNLOCK(stcb);
7549 sctp_fill_outqueue(struct sctp_tcb *stcb,
7550 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7551 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7556 struct sctp_association *asoc;
7557 struct sctp_stream_out *strq;
7558 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7561 SCTP_TCB_LOCK_ASSERT(stcb);
7563 switch (net->ro._l_addr.sa.sa_family) {
7566 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7571 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7576 goal_mtu = net->mtu;
7579 /* Need an allowance for the data chunk header too */
7580 goal_mtu -= sizeof(struct sctp_data_chunk);
7582 /* must make even word boundary */
7583 goal_mtu &= 0xfffffffc;
7584 if (asoc->locked_on_sending) {
7585 /* We are stuck on one stream until the message completes. */
7586 strq = asoc->locked_on_sending;
7589 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7592 while ((goal_mtu > 0) && strq) {
7595 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7596 &giveup, eeor_mode, &bail, so_locked);
7598 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7601 asoc->locked_on_sending = strq;
7602 if ((moved_how_much == 0) || (giveup) || bail)
7603 /* no more to move for now */
7606 asoc->locked_on_sending = NULL;
7607 if ((giveup) || bail) {
7610 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7615 total_moved += moved_how_much;
7616 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7617 goal_mtu &= 0xfffffffc;
7622 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7624 if (total_moved == 0) {
7625 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7626 (net == stcb->asoc.primary_destination)) {
7627 /* ran dry for primary network net */
7628 SCTP_STAT_INCR(sctps_primary_randry);
7629 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7630 /* ran dry with CMT on */
7631 SCTP_STAT_INCR(sctps_cmt_randry);
7637 sctp_fix_ecn_echo(struct sctp_association *asoc)
7639 struct sctp_tmit_chunk *chk;
7641 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7642 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7643 chk->sent = SCTP_DATAGRAM_UNSENT;
7649 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7651 struct sctp_association *asoc;
7652 struct sctp_tmit_chunk *chk;
7653 struct sctp_stream_queue_pending *sp;
7660 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7661 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7662 if (sp->net == net) {
7663 sctp_free_remote_addr(sp->net);
7668 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7669 if (chk->whoTo == net) {
7670 sctp_free_remote_addr(chk->whoTo);
7677 sctp_med_chunk_output(struct sctp_inpcb *inp,
7678 struct sctp_tcb *stcb,
7679 struct sctp_association *asoc,
7682 int control_only, int from_where,
7683 struct timeval *now, int *now_filled, int frag_point, int so_locked
7684 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7690 * Ok this is the generic chunk service queue. we must do the
7691 * following: - Service the stream queue that is next, moving any
7692 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7693 * LAST to the out queue in one pass) and assigning TSN's - Check to
7694 * see if the cwnd/rwnd allows any output, if so we go ahead and
7695 * fomulate and send the low level chunks. Making sure to combine
7696 * any control in the control chunk queue also.
7698 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7699 struct mbuf *outchain, *endoutchain;
7700 struct sctp_tmit_chunk *chk, *nchk;
7702 /* temp arrays for unlinking */
7703 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7704 int no_fragmentflg, error;
7705 unsigned int max_rwnd_per_dest, max_send_per_dest;
7706 int one_chunk, hbflag, skip_data_for_this_net;
7707 int asconf, cookie, no_out_cnt;
7708 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7709 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7711 uint32_t auth_offset = 0;
7712 struct sctp_auth_chunk *auth = NULL;
7713 uint16_t auth_keyid;
7714 int override_ok = 1;
7715 int skip_fill_up = 0;
7716 int data_auth_reqd = 0;
7719 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7725 auth_keyid = stcb->asoc.authinfo.active_keyid;
7727 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7728 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7729 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7734 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7736 * First lets prime the pump. For each destination, if there is room
7737 * in the flight size, attempt to pull an MTU's worth out of the
7738 * stream queues into the general send_queue
7740 #ifdef SCTP_AUDITING_ENABLED
7741 sctp_audit_log(0xC2, 2);
7743 SCTP_TCB_LOCK_ASSERT(stcb);
7745 if ((control_only) || (asoc->stream_reset_outstanding))
7750 /* Nothing to possible to send? */
7751 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7752 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7753 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7754 TAILQ_EMPTY(&asoc->send_queue) &&
7755 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7760 if (asoc->peers_rwnd == 0) {
7761 /* No room in peers rwnd */
7763 if (asoc->total_flight > 0) {
7764 /* we are allowed one chunk in flight */
7768 if (stcb->asoc.ecn_echo_cnt_onq) {
7769 /* Record where a sack goes, if any */
7770 if (no_data_chunks &&
7771 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7772 /* Nothing but ECNe to send - we don't do that */
7773 goto nothing_to_send;
7775 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7776 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7777 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7778 sack_goes_to = chk->whoTo;
7783 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7784 if (stcb->sctp_socket)
7785 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7787 max_send_per_dest = 0;
7788 if (no_data_chunks == 0) {
7789 /* How many non-directed chunks are there? */
7790 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7791 if (chk->whoTo == NULL) {
7793 * We already have non-directed chunks on
7794 * the queue, no need to do a fill-up.
7802 if ((no_data_chunks == 0) &&
7803 (skip_fill_up == 0) &&
7804 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7805 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7807 * This for loop we are in takes in each net, if
7808 * its's got space in cwnd and has data sent to it
7809 * (when CMT is off) then it calls
7810 * sctp_fill_outqueue for the net. This gets data on
7811 * the send queue for that network.
7813 * In sctp_fill_outqueue TSN's are assigned and data is
7814 * copied out of the stream buffers. Note mostly
7815 * copy by reference (we hope).
7817 net->window_probe = 0;
7818 if ((net != stcb->asoc.alternate) &&
7819 ((net->dest_state & SCTP_ADDR_PF) ||
7820 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7821 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7823 sctp_log_cwnd(stcb, net, 1,
7824 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7828 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7829 (net->flight_size == 0)) {
7830 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7832 if (net->flight_size >= net->cwnd) {
7833 /* skip this network, no room - can't fill */
7834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7835 sctp_log_cwnd(stcb, net, 3,
7836 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7841 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7843 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7845 /* memory alloc failure */
7851 /* now service each destination and send out what we can for it */
7852 /* Nothing to send? */
7853 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7854 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7855 TAILQ_EMPTY(&asoc->send_queue)) {
7859 if (asoc->sctp_cmt_on_off > 0) {
7860 /* get the last start point */
7861 start_at = asoc->last_net_cmt_send_started;
7862 if (start_at == NULL) {
7863 /* null so to beginning */
7864 start_at = TAILQ_FIRST(&asoc->nets);
7866 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7867 if (start_at == NULL) {
7868 start_at = TAILQ_FIRST(&asoc->nets);
7871 asoc->last_net_cmt_send_started = start_at;
7873 start_at = TAILQ_FIRST(&asoc->nets);
7875 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7876 if (chk->whoTo == NULL) {
7877 if (asoc->alternate) {
7878 chk->whoTo = asoc->alternate;
7880 chk->whoTo = asoc->primary_destination;
7882 atomic_add_int(&chk->whoTo->ref_count, 1);
7885 old_start_at = NULL;
7886 again_one_more_time:
7887 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7888 /* how much can we send? */
7889 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7890 if (old_start_at && (old_start_at == net)) {
7891 /* through list ocmpletely. */
7895 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7896 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7897 (net->flight_size >= net->cwnd)) {
7899 * Nothing on control or asconf and flight is full,
7900 * we can skip even in the CMT case.
7905 endoutchain = outchain = NULL;
7908 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7909 skip_data_for_this_net = 1;
7911 skip_data_for_this_net = 0;
7913 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7915 * if we have a route and an ifp check to see if we
7916 * have room to send to this guy
7920 ifp = net->ro.ro_rt->rt_ifp;
7921 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7922 SCTP_STAT_INCR(sctps_ifnomemqueued);
7923 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7924 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7929 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7932 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7937 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7947 if (mtu > asoc->peers_rwnd) {
7948 if (asoc->total_flight > 0) {
7949 /* We have a packet in flight somewhere */
7950 r_mtu = asoc->peers_rwnd;
7952 /* We are always allowed to send one MTU out */
7959 /************************/
7960 /* ASCONF transmission */
7961 /************************/
7962 /* Now first lets go through the asconf queue */
7963 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7964 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7967 if (chk->whoTo == NULL) {
7968 if (asoc->alternate == NULL) {
7969 if (asoc->primary_destination != net) {
7973 if (asoc->alternate != net) {
7978 if (chk->whoTo != net) {
7982 if (chk->data == NULL) {
7985 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7986 chk->sent != SCTP_DATAGRAM_RESEND) {
7990 * if no AUTH is yet included and this chunk
7991 * requires it, make sure to account for it. We
7992 * don't apply the size until the AUTH chunk is
7993 * actually added below in case there is no room for
7994 * this chunk. NOTE: we overload the use of "omtu"
7997 if ((auth == NULL) &&
7998 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7999 stcb->asoc.peer_auth_chunks)) {
8000 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8003 /* Here we do NOT factor the r_mtu */
8004 if ((chk->send_size < (int)(mtu - omtu)) ||
8005 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8007 * We probably should glom the mbuf chain
8008 * from the chk->data for control but the
8009 * problem is it becomes yet one more level
8010 * of tracking to do if for some reason
8011 * output fails. Then I have got to
8012 * reconstruct the merged control chain.. el
8013 * yucko.. for now we take the easy way and
8017 * Add an AUTH chunk, if chunk requires it
8018 * save the offset into the chain for AUTH
8020 if ((auth == NULL) &&
8021 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8022 stcb->asoc.peer_auth_chunks))) {
8023 outchain = sctp_add_auth_chunk(outchain,
8028 chk->rec.chunk_id.id);
8029 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8031 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8032 (int)chk->rec.chunk_id.can_take_data,
8033 chk->send_size, chk->copy_by_ref);
8034 if (outchain == NULL) {
8036 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8039 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8040 /* update our MTU size */
8041 if (mtu > (chk->send_size + omtu))
8042 mtu -= (chk->send_size + omtu);
8045 to_out += (chk->send_size + omtu);
8046 /* Do clear IP_DF ? */
8047 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8050 if (chk->rec.chunk_id.can_take_data)
8053 * set hb flag since we can use these for
8059 * should sysctl this: don't bundle data
8060 * with ASCONF since it requires AUTH
8063 chk->sent = SCTP_DATAGRAM_SENT;
8064 if (chk->whoTo == NULL) {
8066 atomic_add_int(&net->ref_count, 1);
8071 * Ok we are out of room but we can
8072 * output without effecting the
8073 * flight size since this little guy
8074 * is a control only packet.
8076 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8078 * do NOT clear the asconf flag as
8079 * it is used to do appropriate
8080 * source address selection.
8082 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8083 (struct sockaddr *)&net->ro._l_addr,
8084 outchain, auth_offset, auth,
8085 stcb->asoc.authinfo.active_keyid,
8086 no_fragmentflg, 0, asconf,
8087 inp->sctp_lport, stcb->rport,
8088 htonl(stcb->asoc.peer_vtag),
8092 if (error == ENOBUFS) {
8093 asoc->ifp_had_enobuf = 1;
8094 SCTP_STAT_INCR(sctps_lowlevelerr);
8096 if (from_where == 0) {
8097 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8099 if (*now_filled == 0) {
8100 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8102 *now = net->last_sent_time;
8104 net->last_sent_time = *now;
8107 /* error, could not output */
8108 if (error == EHOSTUNREACH) {
8114 sctp_move_chunks_from_net(stcb, net);
8119 asoc->ifp_had_enobuf = 0;
8120 if (*now_filled == 0) {
8121 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8123 *now = net->last_sent_time;
8125 net->last_sent_time = *now;
8129 * increase the number we sent, if a
8130 * cookie is sent we don't tell them
8133 outchain = endoutchain = NULL;
8137 *num_out += ctl_cnt;
8138 /* recalc a clean slate and setup */
8139 switch (net->ro._l_addr.sa.sa_family) {
8142 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8147 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8160 /************************/
8161 /* Control transmission */
8162 /************************/
8163 /* Now first lets go through the control queue */
8164 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8165 if ((sack_goes_to) &&
8166 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8167 (chk->whoTo != sack_goes_to)) {
8169 * if we have a sack in queue, and we are
8170 * looking at an ecn echo that is NOT queued
8171 * to where the sack is going..
8173 if (chk->whoTo == net) {
8175 * Don't transmit it to where its
8176 * going (current net)
8179 } else if (sack_goes_to == net) {
8181 * But do transmit it to this
8184 goto skip_net_check;
8187 if (chk->whoTo == NULL) {
8188 if (asoc->alternate == NULL) {
8189 if (asoc->primary_destination != net) {
8193 if (asoc->alternate != net) {
8198 if (chk->whoTo != net) {
8203 if (chk->data == NULL) {
8206 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8208 * It must be unsent. Cookies and ASCONF's
8209 * hang around but there timers will force
8210 * when marked for resend.
8215 * if no AUTH is yet included and this chunk
8216 * requires it, make sure to account for it. We
8217 * don't apply the size until the AUTH chunk is
8218 * actually added below in case there is no room for
8219 * this chunk. NOTE: we overload the use of "omtu"
8222 if ((auth == NULL) &&
8223 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8224 stcb->asoc.peer_auth_chunks)) {
8225 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8228 /* Here we do NOT factor the r_mtu */
8229 if ((chk->send_size <= (int)(mtu - omtu)) ||
8230 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8232 * We probably should glom the mbuf chain
8233 * from the chk->data for control but the
8234 * problem is it becomes yet one more level
8235 * of tracking to do if for some reason
8236 * output fails. Then I have got to
8237 * reconstruct the merged control chain.. el
8238 * yucko.. for now we take the easy way and
8242 * Add an AUTH chunk, if chunk requires it
8243 * save the offset into the chain for AUTH
8245 if ((auth == NULL) &&
8246 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8247 stcb->asoc.peer_auth_chunks))) {
8248 outchain = sctp_add_auth_chunk(outchain,
8253 chk->rec.chunk_id.id);
8254 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8256 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8257 (int)chk->rec.chunk_id.can_take_data,
8258 chk->send_size, chk->copy_by_ref);
8259 if (outchain == NULL) {
8261 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8264 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8265 /* update our MTU size */
8266 if (mtu > (chk->send_size + omtu))
8267 mtu -= (chk->send_size + omtu);
8270 to_out += (chk->send_size + omtu);
8271 /* Do clear IP_DF ? */
8272 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8275 if (chk->rec.chunk_id.can_take_data)
8277 /* Mark things to be removed, if needed */
8278 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8279 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8280 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8281 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8282 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8283 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8284 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8285 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8286 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8287 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8288 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8289 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8292 /* remove these chunks at the end */
8293 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8294 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8295 /* turn off the timer */
8296 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8297 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8298 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8304 * Other chunks, since they have
8305 * timers running (i.e. COOKIE) we
8306 * just "trust" that it gets sent or
8310 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8313 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8315 * Increment ecne send count
8316 * here this means we may be
8317 * over-zealous in our
8318 * counting if the send
8319 * fails, but its the best
8320 * place to do it (we used
8321 * to do it in the queue of
8322 * the chunk, but that did
8323 * not tell how many times
8326 SCTP_STAT_INCR(sctps_sendecne);
8328 chk->sent = SCTP_DATAGRAM_SENT;
8329 if (chk->whoTo == NULL) {
8331 atomic_add_int(&net->ref_count, 1);
8337 * Ok we are out of room but we can
8338 * output without effecting the
8339 * flight size since this little guy
8340 * is a control only packet.
8343 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8345 * do NOT clear the asconf
8346 * flag as it is used to do
8347 * appropriate source
8348 * address selection.
8352 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8355 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8356 (struct sockaddr *)&net->ro._l_addr,
8359 stcb->asoc.authinfo.active_keyid,
8360 no_fragmentflg, 0, asconf,
8361 inp->sctp_lport, stcb->rport,
8362 htonl(stcb->asoc.peer_vtag),
8366 if (error == ENOBUFS) {
8367 asoc->ifp_had_enobuf = 1;
8368 SCTP_STAT_INCR(sctps_lowlevelerr);
8370 if (from_where == 0) {
8371 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8373 /* error, could not output */
8375 if (*now_filled == 0) {
8376 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8378 *now = net->last_sent_time;
8380 net->last_sent_time = *now;
8384 if (error == EHOSTUNREACH) {
8390 sctp_move_chunks_from_net(stcb, net);
8395 asoc->ifp_had_enobuf = 0;
8396 /* Only HB or ASCONF advances time */
8398 if (*now_filled == 0) {
8399 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8401 *now = net->last_sent_time;
8403 net->last_sent_time = *now;
8408 * increase the number we sent, if a
8409 * cookie is sent we don't tell them
8412 outchain = endoutchain = NULL;
8416 *num_out += ctl_cnt;
8417 /* recalc a clean slate and setup */
8418 switch (net->ro._l_addr.sa.sa_family) {
8421 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8426 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8439 /* JRI: if dest is in PF state, do not send data to it */
8440 if ((asoc->sctp_cmt_on_off > 0) &&
8441 (net != stcb->asoc.alternate) &&
8442 (net->dest_state & SCTP_ADDR_PF)) {
8445 if (net->flight_size >= net->cwnd) {
8448 if ((asoc->sctp_cmt_on_off > 0) &&
8449 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8450 (net->flight_size > max_rwnd_per_dest)) {
8454 * We need a specific accounting for the usage of the send
8455 * buffer. We also need to check the number of messages per
8456 * net. For now, this is better than nothing and it disabled
8459 if ((asoc->sctp_cmt_on_off > 0) &&
8460 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8461 (max_send_per_dest > 0) &&
8462 (net->flight_size > max_send_per_dest)) {
8465 /*********************/
8466 /* Data transmission */
8467 /*********************/
8469 * if AUTH for DATA is required and no AUTH has been added
8470 * yet, account for this in the mtu now... if no data can be
8471 * bundled, this adjustment won't matter anyways since the
8472 * packet will be going out...
8474 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8475 stcb->asoc.peer_auth_chunks);
8476 if (data_auth_reqd && (auth == NULL)) {
8477 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8479 /* now lets add any data within the MTU constraints */
8480 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8483 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8484 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8491 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8492 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8502 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8503 (skip_data_for_this_net == 0)) ||
8505 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8506 if (no_data_chunks) {
8507 /* let only control go out */
8511 if (net->flight_size >= net->cwnd) {
8512 /* skip this net, no room for data */
8516 if ((chk->whoTo != NULL) &&
8517 (chk->whoTo != net)) {
8518 /* Don't send the chunk on this net */
8521 if (asoc->sctp_cmt_on_off == 0) {
8522 if ((asoc->alternate) &&
8523 (asoc->alternate != net) &&
8524 (chk->whoTo == NULL)) {
8526 } else if ((net != asoc->primary_destination) &&
8527 (asoc->alternate == NULL) &&
8528 (chk->whoTo == NULL)) {
8532 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8534 * strange, we have a chunk that is
8535 * to big for its destination and
8536 * yet no fragment ok flag.
8537 * Something went wrong when the
8538 * PMTU changed...we did not mark
8539 * this chunk for some reason?? I
8540 * will fix it here by letting IP
8541 * fragment it for now and printing
8542 * a warning. This really should not
8545 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8546 chk->send_size, mtu);
8547 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8549 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8550 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8551 struct sctp_data_chunk *dchkh;
8553 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8554 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8556 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8557 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8558 /* ok we will add this one */
8561 * Add an AUTH chunk, if chunk
8562 * requires it, save the offset into
8563 * the chain for AUTH
8565 if (data_auth_reqd) {
8567 outchain = sctp_add_auth_chunk(outchain,
8573 auth_keyid = chk->auth_keyid;
8575 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8576 } else if (override_ok) {
8581 auth_keyid = chk->auth_keyid;
8583 } else if (auth_keyid != chk->auth_keyid) {
8591 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8592 chk->send_size, chk->copy_by_ref);
8593 if (outchain == NULL) {
8594 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8595 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8596 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8599 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8602 /* upate our MTU size */
8603 /* Do clear IP_DF ? */
8604 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8607 /* unsigned subtraction of mtu */
8608 if (mtu > chk->send_size)
8609 mtu -= chk->send_size;
8612 /* unsigned subtraction of r_mtu */
8613 if (r_mtu > chk->send_size)
8614 r_mtu -= chk->send_size;
8618 to_out += chk->send_size;
8619 if ((to_out > mx_mtu) && no_fragmentflg) {
8621 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8623 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8627 chk->window_probe = 0;
8628 data_list[bundle_at++] = chk;
8629 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8632 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8633 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8634 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8636 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8638 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8639 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8649 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8651 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8652 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8653 data_list[0]->window_probe = 1;
8654 net->window_probe = 1;
8660 * Must be sent in order of the
8661 * TSN's (on a network)
8665 } /* for (chunk gather loop for this net) */
8666 } /* if asoc.state OPEN */
8668 /* Is there something to send for this destination? */
8670 /* We may need to start a control timer or two */
8672 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8675 * do NOT clear the asconf flag as it is
8676 * used to do appropriate source address
8681 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8684 /* must start a send timer if data is being sent */
8685 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8687 * no timer running on this destination
8690 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8692 /* Now send it, if there is anything to send :> */
8693 if ((error = sctp_lowlevel_chunk_output(inp,
8696 (struct sockaddr *)&net->ro._l_addr,
8704 inp->sctp_lport, stcb->rport,
8705 htonl(stcb->asoc.peer_vtag),
8709 /* error, we could not output */
8710 if (error == ENOBUFS) {
8711 SCTP_STAT_INCR(sctps_lowlevelerr);
8712 asoc->ifp_had_enobuf = 1;
8714 if (from_where == 0) {
8715 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8717 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8719 if (*now_filled == 0) {
8720 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8722 *now = net->last_sent_time;
8724 net->last_sent_time = *now;
8728 if (error == EHOSTUNREACH) {
8730 * Destination went unreachable
8733 sctp_move_chunks_from_net(stcb, net);
8737 * I add this line to be paranoid. As far as
8738 * I can tell the continue, takes us back to
8739 * the top of the for, but just to make sure
8740 * I will reset these again here.
8742 ctl_cnt = bundle_at = 0;
8743 continue; /* This takes us back to the
8744 * for() for the nets. */
8746 asoc->ifp_had_enobuf = 0;
8751 if (bundle_at || hbflag) {
8752 /* For data/asconf and hb set time */
8753 if (*now_filled == 0) {
8754 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8756 *now = net->last_sent_time;
8758 net->last_sent_time = *now;
8762 *num_out += (ctl_cnt + bundle_at);
8765 /* setup for a RTO measurement */
8766 tsns_sent = data_list[0]->rec.data.TSN_seq;
8767 /* fill time if not already filled */
8768 if (*now_filled == 0) {
8769 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8771 *now = asoc->time_last_sent;
8773 asoc->time_last_sent = *now;
8775 if (net->rto_needed) {
8776 data_list[0]->do_rtt = 1;
8777 net->rto_needed = 0;
8779 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8780 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8786 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8787 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8790 if (old_start_at == NULL) {
8791 old_start_at = start_at;
8792 start_at = TAILQ_FIRST(&asoc->nets);
8794 goto again_one_more_time;
8797 * At the end there should be no NON timed chunks hanging on this
8800 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8801 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8803 if ((*num_out == 0) && (*reason_code == 0)) {
8808 sctp_clean_up_ctl(stcb, asoc, so_locked);
8813 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8816 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8817 * the control chunk queue.
8819 struct sctp_chunkhdr *hdr;
8820 struct sctp_tmit_chunk *chk;
8823 SCTP_TCB_LOCK_ASSERT(stcb);
8824 sctp_alloc_a_chunk(stcb, chk);
8827 sctp_m_freem(op_err);
8830 chk->copy_by_ref = 0;
8831 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8832 if (op_err == NULL) {
8833 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8838 while (mat != NULL) {
8839 chk->send_size += SCTP_BUF_LEN(mat);
8840 mat = SCTP_BUF_NEXT(mat);
8842 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8843 chk->rec.chunk_id.can_take_data = 1;
8844 chk->sent = SCTP_DATAGRAM_UNSENT;
8847 chk->asoc = &stcb->asoc;
8850 hdr = mtod(op_err, struct sctp_chunkhdr *);
8851 hdr->chunk_type = SCTP_OPERATION_ERROR;
8852 hdr->chunk_flags = 0;
8853 hdr->chunk_length = htons(chk->send_size);
8854 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8857 chk->asoc->ctrl_queue_cnt++;
8861 sctp_send_cookie_echo(struct mbuf *m,
8863 struct sctp_tcb *stcb,
8864 struct sctp_nets *net)
8867 * pull out the cookie and put it at the front of the control chunk
8871 struct mbuf *cookie;
8872 struct sctp_paramhdr parm, *phdr;
8873 struct sctp_chunkhdr *hdr;
8874 struct sctp_tmit_chunk *chk;
8875 uint16_t ptype, plen;
8877 /* First find the cookie in the param area */
8879 at = offset + sizeof(struct sctp_init_chunk);
8881 SCTP_TCB_LOCK_ASSERT(stcb);
8883 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8887 ptype = ntohs(phdr->param_type);
8888 plen = ntohs(phdr->param_length);
8889 if (ptype == SCTP_STATE_COOKIE) {
8892 /* found the cookie */
8893 if ((pad = (plen % 4))) {
8896 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8897 if (cookie == NULL) {
8901 #ifdef SCTP_MBUF_LOGGING
8902 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8905 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) {
8906 if (SCTP_BUF_IS_EXTENDED(mat)) {
8907 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8914 at += SCTP_SIZE32(plen);
8916 if (cookie == NULL) {
8917 /* Did not find the cookie */
8920 /* ok, we got the cookie lets change it into a cookie echo chunk */
8922 /* first the change from param to cookie */
8923 hdr = mtod(cookie, struct sctp_chunkhdr *);
8924 hdr->chunk_type = SCTP_COOKIE_ECHO;
8925 hdr->chunk_flags = 0;
8926 /* get the chunk stuff now and place it in the FRONT of the queue */
8927 sctp_alloc_a_chunk(stcb, chk);
8930 sctp_m_freem(cookie);
8933 chk->copy_by_ref = 0;
8934 chk->send_size = plen;
8935 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8936 chk->rec.chunk_id.can_take_data = 0;
8937 chk->sent = SCTP_DATAGRAM_UNSENT;
8939 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8940 chk->asoc = &stcb->asoc;
8943 atomic_add_int(&chk->whoTo->ref_count, 1);
8944 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8945 chk->asoc->ctrl_queue_cnt++;
8950 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8954 struct sctp_nets *net)
8957 * take a HB request and make it into a HB ack and send it.
8959 struct mbuf *outchain;
8960 struct sctp_chunkhdr *chdr;
8961 struct sctp_tmit_chunk *chk;
8965 /* must have a net pointer */
8968 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8969 if (outchain == NULL) {
8970 /* gak out of memory */
8973 #ifdef SCTP_MBUF_LOGGING
8974 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8977 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) {
8978 if (SCTP_BUF_IS_EXTENDED(mat)) {
8979 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8984 chdr = mtod(outchain, struct sctp_chunkhdr *);
8985 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8986 chdr->chunk_flags = 0;
8987 if (chk_length % 4) {
8989 uint32_t cpthis = 0;
8992 padlen = 4 - (chk_length % 4);
8993 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8995 sctp_alloc_a_chunk(stcb, chk);
8998 sctp_m_freem(outchain);
9001 chk->copy_by_ref = 0;
9002 chk->send_size = chk_length;
9003 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9004 chk->rec.chunk_id.can_take_data = 1;
9005 chk->sent = SCTP_DATAGRAM_UNSENT;
9008 chk->asoc = &stcb->asoc;
9009 chk->data = outchain;
9011 atomic_add_int(&chk->whoTo->ref_count, 1);
9012 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9013 chk->asoc->ctrl_queue_cnt++;
9017 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9019 /* formulate and queue a cookie-ack back to sender */
9020 struct mbuf *cookie_ack;
9021 struct sctp_chunkhdr *hdr;
9022 struct sctp_tmit_chunk *chk;
9025 SCTP_TCB_LOCK_ASSERT(stcb);
9027 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
9028 if (cookie_ack == NULL) {
9032 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9033 sctp_alloc_a_chunk(stcb, chk);
9036 sctp_m_freem(cookie_ack);
9039 chk->copy_by_ref = 0;
9040 chk->send_size = sizeof(struct sctp_chunkhdr);
9041 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9042 chk->rec.chunk_id.can_take_data = 1;
9043 chk->sent = SCTP_DATAGRAM_UNSENT;
9046 chk->asoc = &stcb->asoc;
9047 chk->data = cookie_ack;
9048 if (chk->asoc->last_control_chunk_from != NULL) {
9049 chk->whoTo = chk->asoc->last_control_chunk_from;
9050 atomic_add_int(&chk->whoTo->ref_count, 1);
9054 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9055 hdr->chunk_type = SCTP_COOKIE_ACK;
9056 hdr->chunk_flags = 0;
9057 hdr->chunk_length = htons(chk->send_size);
9058 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9059 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9060 chk->asoc->ctrl_queue_cnt++;
9066 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9068 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9069 struct mbuf *m_shutdown_ack;
9070 struct sctp_shutdown_ack_chunk *ack_cp;
9071 struct sctp_tmit_chunk *chk;
9073 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9074 if (m_shutdown_ack == NULL) {
9078 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9079 sctp_alloc_a_chunk(stcb, chk);
9082 sctp_m_freem(m_shutdown_ack);
9085 chk->copy_by_ref = 0;
9086 chk->send_size = sizeof(struct sctp_chunkhdr);
9087 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9088 chk->rec.chunk_id.can_take_data = 1;
9089 chk->sent = SCTP_DATAGRAM_UNSENT;
9092 chk->asoc = &stcb->asoc;
9093 chk->data = m_shutdown_ack;
9096 atomic_add_int(&chk->whoTo->ref_count, 1);
9098 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9099 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9100 ack_cp->ch.chunk_flags = 0;
9101 ack_cp->ch.chunk_length = htons(chk->send_size);
9102 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9103 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9104 chk->asoc->ctrl_queue_cnt++;
9109 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9111 /* formulate and queue a SHUTDOWN to the sender */
9112 struct mbuf *m_shutdown;
9113 struct sctp_shutdown_chunk *shutdown_cp;
9114 struct sctp_tmit_chunk *chk;
9116 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9117 if (m_shutdown == NULL) {
9121 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9122 sctp_alloc_a_chunk(stcb, chk);
9125 sctp_m_freem(m_shutdown);
9128 chk->copy_by_ref = 0;
9129 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9130 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9131 chk->rec.chunk_id.can_take_data = 1;
9132 chk->sent = SCTP_DATAGRAM_UNSENT;
9135 chk->asoc = &stcb->asoc;
9136 chk->data = m_shutdown;
9139 atomic_add_int(&chk->whoTo->ref_count, 1);
9141 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9142 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9143 shutdown_cp->ch.chunk_flags = 0;
9144 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9145 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9146 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9147 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9148 chk->asoc->ctrl_queue_cnt++;
9153 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9156 * formulate and queue an ASCONF to the peer. ASCONF parameters
9157 * should be queued on the assoc queue.
9159 struct sctp_tmit_chunk *chk;
9160 struct mbuf *m_asconf;
9163 SCTP_TCB_LOCK_ASSERT(stcb);
9165 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9166 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9167 /* can't send a new one if there is one in flight already */
9170 /* compose an ASCONF chunk, maximum length is PMTU */
9171 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9172 if (m_asconf == NULL) {
9175 sctp_alloc_a_chunk(stcb, chk);
9178 sctp_m_freem(m_asconf);
9181 chk->copy_by_ref = 0;
9182 chk->data = m_asconf;
9183 chk->send_size = len;
9184 chk->rec.chunk_id.id = SCTP_ASCONF;
9185 chk->rec.chunk_id.can_take_data = 0;
9186 chk->sent = SCTP_DATAGRAM_UNSENT;
9188 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9189 chk->asoc = &stcb->asoc;
9192 atomic_add_int(&chk->whoTo->ref_count, 1);
9194 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9195 chk->asoc->ctrl_queue_cnt++;
9200 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9203 * formulate and queue a asconf-ack back to sender. the asconf-ack
9204 * must be stored in the tcb.
9206 struct sctp_tmit_chunk *chk;
9207 struct sctp_asconf_ack *ack, *latest_ack;
9209 struct sctp_nets *net = NULL;
9211 SCTP_TCB_LOCK_ASSERT(stcb);
9212 /* Get the latest ASCONF-ACK */
9213 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9214 if (latest_ack == NULL) {
9217 if (latest_ack->last_sent_to != NULL &&
9218 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9219 /* we're doing a retransmission */
9220 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9223 if (stcb->asoc.last_control_chunk_from == NULL) {
9224 if (stcb->asoc.alternate) {
9225 net = stcb->asoc.alternate;
9227 net = stcb->asoc.primary_destination;
9230 net = stcb->asoc.last_control_chunk_from;
9235 if (stcb->asoc.last_control_chunk_from == NULL) {
9236 if (stcb->asoc.alternate) {
9237 net = stcb->asoc.alternate;
9239 net = stcb->asoc.primary_destination;
9242 net = stcb->asoc.last_control_chunk_from;
9245 latest_ack->last_sent_to = net;
9247 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9248 if (ack->data == NULL) {
9251 /* copy the asconf_ack */
9252 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
9253 if (m_ack == NULL) {
9254 /* couldn't copy it */
9257 #ifdef SCTP_MBUF_LOGGING
9258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9261 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) {
9262 if (SCTP_BUF_IS_EXTENDED(mat)) {
9263 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9269 sctp_alloc_a_chunk(stcb, chk);
9273 sctp_m_freem(m_ack);
9276 chk->copy_by_ref = 0;
9280 atomic_add_int(&chk->whoTo->ref_count, 1);
9285 chk->send_size = ack->len;
9286 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9287 chk->rec.chunk_id.can_take_data = 1;
9288 chk->sent = SCTP_DATAGRAM_UNSENT;
9290 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9291 chk->asoc = &stcb->asoc;
9293 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9294 chk->asoc->ctrl_queue_cnt++;
9301 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9302 struct sctp_tcb *stcb,
9303 struct sctp_association *asoc,
9304 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9305 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9311 * send out one MTU of retransmission. If fast_retransmit is
9312 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9313 * rwnd. For a Cookie or Asconf in the control chunk queue we
9314 * retransmit them by themselves.
9316 * For data chunks we will pick out the lowest TSN's in the sent_queue
9317 * marked for resend and bundle them all together (up to a MTU of
9318 * destination). The address to send to should have been
9319 * selected/changed where the retransmission was marked (i.e. in FR
9320 * or t3-timeout routines).
9322 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9323 struct sctp_tmit_chunk *chk, *fwd;
9324 struct mbuf *m, *endofchain;
9325 struct sctp_nets *net = NULL;
9326 uint32_t tsns_sent = 0;
9327 int no_fragmentflg, bundle_at, cnt_thru;
9329 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9330 struct sctp_auth_chunk *auth = NULL;
9331 uint32_t auth_offset = 0;
9332 uint16_t auth_keyid;
9333 int override_ok = 1;
9334 int data_auth_reqd = 0;
9337 SCTP_TCB_LOCK_ASSERT(stcb);
9338 tmr_started = ctl_cnt = bundle_at = error = 0;
9343 endofchain = m = NULL;
9344 auth_keyid = stcb->asoc.authinfo.active_keyid;
9345 #ifdef SCTP_AUDITING_ENABLED
9346 sctp_audit_log(0xC3, 1);
9348 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9349 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9350 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9351 asoc->sent_queue_retran_cnt);
9352 asoc->sent_queue_cnt = 0;
9353 asoc->sent_queue_cnt_removeable = 0;
9354 /* send back 0/0 so we enter normal transmission */
9358 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9359 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9360 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9361 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9362 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9365 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9366 if (chk != asoc->str_reset) {
9368 * not eligible for retran if its
9375 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9379 * Add an AUTH chunk, if chunk requires it save the
9380 * offset into the chain for AUTH
9382 if ((auth == NULL) &&
9383 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9384 stcb->asoc.peer_auth_chunks))) {
9385 m = sctp_add_auth_chunk(m, &endofchain,
9386 &auth, &auth_offset,
9388 chk->rec.chunk_id.id);
9389 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9391 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9397 /* do we have control chunks to retransmit? */
9399 /* Start a timer no matter if we suceed or fail */
9400 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9401 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9402 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9403 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9404 chk->snd_count++; /* update our count */
9405 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9406 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9407 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9408 no_fragmentflg, 0, 0,
9409 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9410 chk->whoTo->port, NULL,
9413 SCTP_STAT_INCR(sctps_lowlevelerr);
9420 * We don't want to mark the net->sent time here since this
9421 * we use this for HB and retrans cannot measure RTT
9423 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9425 chk->sent = SCTP_DATAGRAM_SENT;
9426 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9430 /* Clean up the fwd-tsn list */
9431 sctp_clean_up_ctl(stcb, asoc, so_locked);
9436 * Ok, it is just data retransmission we need to do or that and a
9437 * fwd-tsn with it all.
9439 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9440 return (SCTP_RETRAN_DONE);
9442 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9443 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9444 /* not yet open, resend the cookie and that is it */
9447 #ifdef SCTP_AUDITING_ENABLED
9448 sctp_auditing(20, inp, stcb, NULL);
9450 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9451 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9452 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9453 /* No, not sent to this net or not ready for rtx */
9456 if (chk->data == NULL) {
9457 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9458 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9461 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9462 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9463 /* Gak, we have exceeded max unlucky retran, abort! */
9464 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9466 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9467 atomic_add_int(&stcb->asoc.refcnt, 1);
9468 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked);
9469 SCTP_TCB_LOCK(stcb);
9470 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9471 return (SCTP_RETRAN_EXIT);
9473 /* pick up the net */
9475 switch (net->ro._l_addr.sa.sa_family) {
9478 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9483 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9492 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9493 /* No room in peers rwnd */
9496 tsn = asoc->last_acked_seq + 1;
9497 if (tsn == chk->rec.data.TSN_seq) {
9499 * we make a special exception for this
9500 * case. The peer has no rwnd but is missing
9501 * the lowest chunk.. which is probably what
9502 * is holding up the rwnd.
9504 goto one_chunk_around;
9509 if (asoc->peers_rwnd < mtu) {
9511 if ((asoc->peers_rwnd == 0) &&
9512 (asoc->total_flight == 0)) {
9513 chk->window_probe = 1;
9514 chk->whoTo->window_probe = 1;
9517 #ifdef SCTP_AUDITING_ENABLED
9518 sctp_audit_log(0xC3, 2);
9522 net->fast_retran_ip = 0;
9523 if (chk->rec.data.doing_fast_retransmit == 0) {
9525 * if no FR in progress skip destination that have
9526 * flight_size > cwnd.
9528 if (net->flight_size >= net->cwnd) {
9533 * Mark the destination net to have FR recovery
9537 net->fast_retran_ip = 1;
9541 * if no AUTH is yet included and this chunk requires it,
9542 * make sure to account for it. We don't apply the size
9543 * until the AUTH chunk is actually added below in case
9544 * there is no room for this chunk.
9546 if (data_auth_reqd && (auth == NULL)) {
9547 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9551 if ((chk->send_size <= (mtu - dmtu)) ||
9552 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9553 /* ok we will add this one */
9554 if (data_auth_reqd) {
9556 m = sctp_add_auth_chunk(m,
9562 auth_keyid = chk->auth_keyid;
9564 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9565 } else if (override_ok) {
9566 auth_keyid = chk->auth_keyid;
9568 } else if (chk->auth_keyid != auth_keyid) {
9569 /* different keyid, so done bundling */
9573 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9575 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9578 /* Do clear IP_DF ? */
9579 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9582 /* upate our MTU size */
9583 if (mtu > (chk->send_size + dmtu))
9584 mtu -= (chk->send_size + dmtu);
9587 data_list[bundle_at++] = chk;
9588 if (one_chunk && (asoc->total_flight <= 0)) {
9589 SCTP_STAT_INCR(sctps_windowprobed);
9592 if (one_chunk == 0) {
9594 * now are there anymore forward from chk to pick
9597 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9598 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9599 /* Nope, not for retran */
9602 if (fwd->whoTo != net) {
9603 /* Nope, not the net in question */
9606 if (data_auth_reqd && (auth == NULL)) {
9607 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9610 if (fwd->send_size <= (mtu - dmtu)) {
9611 if (data_auth_reqd) {
9613 m = sctp_add_auth_chunk(m,
9619 auth_keyid = fwd->auth_keyid;
9621 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9622 } else if (override_ok) {
9623 auth_keyid = fwd->auth_keyid;
9625 } else if (fwd->auth_keyid != auth_keyid) {
9633 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9635 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9638 /* Do clear IP_DF ? */
9639 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9642 /* upate our MTU size */
9643 if (mtu > (fwd->send_size + dmtu))
9644 mtu -= (fwd->send_size + dmtu);
9647 data_list[bundle_at++] = fwd;
9648 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9652 /* can't fit so we are done */
9657 /* Is there something to send for this destination? */
9660 * No matter if we fail/or suceed we should start a
9661 * timer. A failure is like a lost IP packet :-)
9663 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9665 * no timer running on this destination
9668 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9671 /* Now lets send it, if there is anything to send :> */
9672 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9673 (struct sockaddr *)&net->ro._l_addr, m,
9674 auth_offset, auth, auth_keyid,
9675 no_fragmentflg, 0, 0,
9676 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9680 /* error, we could not output */
9681 SCTP_STAT_INCR(sctps_lowlevelerr);
9689 * We don't want to mark the net->sent time here
9690 * since this we use this for HB and retrans cannot
9693 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9695 /* For auto-close */
9697 if (*now_filled == 0) {
9698 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9699 *now = asoc->time_last_sent;
9702 asoc->time_last_sent = *now;
9704 *cnt_out += bundle_at;
9705 #ifdef SCTP_AUDITING_ENABLED
9706 sctp_audit_log(0xC4, bundle_at);
9709 tsns_sent = data_list[0]->rec.data.TSN_seq;
9711 for (i = 0; i < bundle_at; i++) {
9712 SCTP_STAT_INCR(sctps_sendretransdata);
9713 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9715 * When we have a revoked data, and we
9716 * retransmit it, then we clear the revoked
9717 * flag since this flag dictates if we
9718 * subtracted from the fs
9720 if (data_list[i]->rec.data.chunk_was_revoked) {
9721 /* Deflate the cwnd */
9722 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9723 data_list[i]->rec.data.chunk_was_revoked = 0;
9725 data_list[i]->snd_count++;
9726 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9727 /* record the time */
9728 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9729 if (data_list[i]->book_size_scale) {
9731 * need to double the book size on
9734 data_list[i]->book_size_scale = 0;
9736 * Since we double the booksize, we
9737 * must also double the output queue
9738 * size, since this get shrunk when
9739 * we free by this amount.
9741 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9742 data_list[i]->book_size *= 2;
9746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9747 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9748 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9750 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9751 (uint32_t) (data_list[i]->send_size +
9752 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9754 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9755 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9756 data_list[i]->whoTo->flight_size,
9757 data_list[i]->book_size,
9758 (uintptr_t) data_list[i]->whoTo,
9759 data_list[i]->rec.data.TSN_seq);
9761 sctp_flight_size_increase(data_list[i]);
9762 sctp_total_flight_increase(stcb, data_list[i]);
9763 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9764 /* SWS sender side engages */
9765 asoc->peers_rwnd = 0;
9768 (data_list[i]->rec.data.doing_fast_retransmit)) {
9769 SCTP_STAT_INCR(sctps_sendfastretrans);
9770 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9771 (tmr_started == 0)) {
9773 * ok we just fast-retrans'd
9774 * the lowest TSN, i.e the
9775 * first on the list. In
9776 * this case we want to give
9777 * some more time to get a
9778 * SACK back without a
9781 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9782 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9783 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9787 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9788 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9790 #ifdef SCTP_AUDITING_ENABLED
9791 sctp_auditing(21, inp, stcb, NULL);
9797 if (asoc->sent_queue_retran_cnt <= 0) {
9798 /* all done we have no more to retran */
9799 asoc->sent_queue_retran_cnt = 0;
9803 /* No more room in rwnd */
9806 /* stop the for loop here. we sent out a packet */
9813 sctp_timer_validation(struct sctp_inpcb *inp,
9814 struct sctp_tcb *stcb,
9815 struct sctp_association *asoc)
9817 struct sctp_nets *net;
9819 /* Validate that a timer is running somewhere */
9820 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9821 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9822 /* Here is a timer */
9826 SCTP_TCB_LOCK_ASSERT(stcb);
9827 /* Gak, we did not have a timer somewhere */
9828 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9829 if (asoc->alternate) {
9830 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9832 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9838 sctp_chunk_output(struct sctp_inpcb *inp,
9839 struct sctp_tcb *stcb,
9842 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9848 * Ok this is the generic chunk service queue. we must do the
9850 * - See if there are retransmits pending, if so we must
9852 * - Service the stream queue that is next, moving any
9853 * message (note I must get a complete message i.e.
9854 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9856 * - Check to see if the cwnd/rwnd allows any output, if so we
9857 * go ahead and fomulate and send the low level chunks. Making sure
9858 * to combine any control in the control chunk queue also.
9860 struct sctp_association *asoc;
9861 struct sctp_nets *net;
9862 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9863 unsigned int burst_cnt = 0;
9867 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9870 unsigned int tot_frs = 0;
9873 /* The Nagle algorithm is only applied when handling a send call. */
9874 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9875 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9883 SCTP_TCB_LOCK_ASSERT(stcb);
9885 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9887 if ((un_sent <= 0) &&
9888 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9889 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9890 (asoc->sent_queue_retran_cnt == 0)) {
9891 /* Nothing to do unless there is something to be sent left */
9895 * Do we have something to send, data or control AND a sack timer
9896 * running, if so piggy-back the sack.
9898 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9899 sctp_send_sack(stcb, so_locked);
9900 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9902 while (asoc->sent_queue_retran_cnt) {
9904 * Ok, it is retransmission time only, we send out only ONE
9905 * packet with a single call off to the retran code.
9907 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9909 * Special hook for handling cookiess discarded
9910 * by peer that carried data. Send cookie-ack only
9911 * and then the next call with get the retran's.
9913 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9915 &now, &now_filled, frag_point, so_locked);
9917 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9918 /* if its not from a HB then do it */
9920 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9926 * its from any other place, we don't allow retran
9927 * output (only control)
9932 /* Can't send anymore */
9934 * now lets push out control by calling med-level
9935 * output once. this assures that we WILL send HB's
9938 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9940 &now, &now_filled, frag_point, so_locked);
9941 #ifdef SCTP_AUDITING_ENABLED
9942 sctp_auditing(8, inp, stcb, NULL);
9944 sctp_timer_validation(inp, stcb, asoc);
9949 * The count was off.. retran is not happening so do
9950 * the normal retransmission.
9952 #ifdef SCTP_AUDITING_ENABLED
9953 sctp_auditing(9, inp, stcb, NULL);
9955 if (ret == SCTP_RETRAN_EXIT) {
9960 if (from_where == SCTP_OUTPUT_FROM_T3) {
9961 /* Only one transmission allowed out of a timeout */
9962 #ifdef SCTP_AUDITING_ENABLED
9963 sctp_auditing(10, inp, stcb, NULL);
9965 /* Push out any control */
9966 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9967 &now, &now_filled, frag_point, so_locked);
9970 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
9971 /* Hit FR burst limit */
9974 if ((num_out == 0) && (ret == 0)) {
9975 /* No more retrans to send */
9979 #ifdef SCTP_AUDITING_ENABLED
9980 sctp_auditing(12, inp, stcb, NULL);
9982 /* Check for bad destinations, if they exist move chunks around. */
9983 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9984 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
9986 * if possible move things off of this address we
9987 * still may send below due to the dormant state but
9988 * we try to find an alternate address to send to
9989 * and if we have one we move all queued data on the
9990 * out wheel to this alternate address.
9992 if (net->ref_count > 1)
9993 sctp_move_chunks_from_net(stcb, net);
9996 * if ((asoc->sat_network) || (net->addr_is_local))
9997 * { burst_limit = asoc->max_burst *
9998 * SCTP_SAT_NETWORK_BURST_INCR; }
10000 if (asoc->max_burst > 0) {
10001 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10002 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10004 * JRS - Use the congestion
10005 * control given in the
10006 * congestion control module
10008 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10010 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10012 SCTP_STAT_INCR(sctps_maxburstqueued);
10014 net->fast_retran_ip = 0;
10016 if (net->flight_size == 0) {
10018 * Should be decaying the
10030 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10031 &reason_code, 0, from_where,
10032 &now, &now_filled, frag_point, so_locked);
10034 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10036 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10038 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10039 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10040 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10044 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10046 tot_out += num_out;
10048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10049 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10050 if (num_out == 0) {
10051 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10056 * When the Nagle algorithm is used, look at how
10057 * much is unsent, then if its smaller than an MTU
10058 * and we have data in flight we stop, except if we
10059 * are handling a fragmented user message.
10061 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10062 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10063 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10064 (stcb->asoc.total_flight > 0) &&
10065 ((stcb->asoc.locked_on_sending == NULL) ||
10066 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10070 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10071 TAILQ_EMPTY(&asoc->send_queue) &&
10072 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10073 /* Nothing left to send */
10076 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10077 /* Nothing left to send */
10080 } while (num_out &&
10081 ((asoc->max_burst == 0) ||
10082 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10083 (burst_cnt < asoc->max_burst)));
10085 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10086 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10087 SCTP_STAT_INCR(sctps_maxburstqueued);
10088 asoc->burst_limit_applied = 1;
10089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10090 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10093 asoc->burst_limit_applied = 0;
10096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10097 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10099 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10103 * Now we need to clean up the control chunk chain if a ECNE is on
10104 * it. It must be marked as UNSENT again so next call will continue
10105 * to send it until such time that we get a CWR, to remove it.
10107 if (stcb->asoc.ecn_echo_cnt_onq)
10108 sctp_fix_ecn_echo(asoc);
10115 struct sctp_inpcb *inp,
10117 struct sockaddr *addr,
10118 struct mbuf *control,
10123 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10126 if (inp->sctp_socket == NULL) {
10127 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10130 return (sctp_sosend(inp->sctp_socket,
10132 (struct uio *)NULL,
10140 send_forward_tsn(struct sctp_tcb *stcb,
10141 struct sctp_association *asoc)
10143 struct sctp_tmit_chunk *chk;
10144 struct sctp_forward_tsn_chunk *fwdtsn;
10145 uint32_t advance_peer_ack_point;
10147 SCTP_TCB_LOCK_ASSERT(stcb);
10148 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10149 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10150 /* mark it to unsent */
10151 chk->sent = SCTP_DATAGRAM_UNSENT;
10152 chk->snd_count = 0;
10153 /* Do we correct its output location? */
10155 sctp_free_remote_addr(chk->whoTo);
10158 goto sctp_fill_in_rest;
10161 /* Ok if we reach here we must build one */
10162 sctp_alloc_a_chunk(stcb, chk);
10166 asoc->fwd_tsn_cnt++;
10167 chk->copy_by_ref = 0;
10168 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10169 chk->rec.chunk_id.can_take_data = 0;
10172 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10173 if (chk->data == NULL) {
10174 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10177 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10178 chk->sent = SCTP_DATAGRAM_UNSENT;
10179 chk->snd_count = 0;
10180 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10181 asoc->ctrl_queue_cnt++;
10184 * Here we go through and fill out the part that deals with
10185 * stream/seq of the ones we skip.
10187 SCTP_BUF_LEN(chk->data) = 0;
10189 struct sctp_tmit_chunk *at, *tp1, *last;
10190 struct sctp_strseq *strseq;
10191 unsigned int cnt_of_space, i, ovh;
10192 unsigned int space_needed;
10193 unsigned int cnt_of_skipped = 0;
10195 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10196 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10197 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10198 /* no more to look at */
10201 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10202 /* We don't report these */
10207 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10208 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10210 cnt_of_space = M_TRAILINGSPACE(chk->data);
10212 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10213 ovh = SCTP_MIN_OVERHEAD;
10215 ovh = SCTP_MIN_V4_OVERHEAD;
10217 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10218 /* trim to a mtu size */
10219 cnt_of_space = asoc->smallest_mtu - ovh;
10221 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10222 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10223 0xff, 0, cnt_of_skipped,
10224 asoc->advanced_peer_ack_point);
10227 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10228 if (cnt_of_space < space_needed) {
10230 * ok we must trim down the chunk by lowering the
10231 * advance peer ack point.
10233 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10234 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10235 0xff, 0xff, cnt_of_space,
10238 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10239 cnt_of_skipped /= sizeof(struct sctp_strseq);
10241 * Go through and find the TSN that will be the one
10244 at = TAILQ_FIRST(&asoc->sent_queue);
10246 for (i = 0; i < cnt_of_skipped; i++) {
10247 tp1 = TAILQ_NEXT(at, sctp_next);
10254 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10255 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10256 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10257 asoc->advanced_peer_ack_point);
10261 * last now points to last one I can report, update
10265 advance_peer_ack_point = last->rec.data.TSN_seq;
10266 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10267 cnt_of_skipped * sizeof(struct sctp_strseq);
10269 chk->send_size = space_needed;
10270 /* Setup the chunk */
10271 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10272 fwdtsn->ch.chunk_length = htons(chk->send_size);
10273 fwdtsn->ch.chunk_flags = 0;
10274 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10275 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10276 SCTP_BUF_LEN(chk->data) = chk->send_size;
10279 * Move pointer to after the fwdtsn and transfer to the
10282 strseq = (struct sctp_strseq *)fwdtsn;
10284 * Now populate the strseq list. This is done blindly
10285 * without pulling out duplicate stream info. This is
10286 * inefficent but won't harm the process since the peer will
10287 * look at these in sequence and will thus release anything.
10288 * It could mean we exceed the PMTU and chop off some that
10289 * we could have included.. but this is unlikely (aka 1432/4
10290 * would mean 300+ stream seq's would have to be reported in
10291 * one FWD-TSN. With a bit of work we can later FIX this to
10292 * optimize and pull out duplcates.. but it does add more
10293 * overhead. So for now... not!
10295 at = TAILQ_FIRST(&asoc->sent_queue);
10296 for (i = 0; i < cnt_of_skipped; i++) {
10297 tp1 = TAILQ_NEXT(at, sctp_next);
10300 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10301 /* We don't report these */
10306 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10307 at->rec.data.fwd_tsn_cnt = 0;
10309 strseq->stream = ntohs(at->rec.data.stream_number);
10310 strseq->sequence = ntohs(at->rec.data.stream_seq);
10319 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10320 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10326 * Queue up a SACK or NR-SACK in the control queue.
10327 * We must first check to see if a SACK or NR-SACK is
10328 * somehow on the control queue.
10329 * If so, we will take and and remove the old one.
10331 struct sctp_association *asoc;
10332 struct sctp_tmit_chunk *chk, *a_chk;
10333 struct sctp_sack_chunk *sack;
10334 struct sctp_nr_sack_chunk *nr_sack;
10335 struct sctp_gap_ack_block *gap_descriptor;
10336 struct sack_track *selector;
10341 int limit_reached = 0;
10342 unsigned int i, siz, j;
10343 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10346 uint32_t highest_tsn;
10351 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10352 (stcb->asoc.peer_supports_nr_sack == 1)) {
10353 type = SCTP_NR_SELECTIVE_ACK;
10355 type = SCTP_SELECTIVE_ACK;
10358 asoc = &stcb->asoc;
10359 SCTP_TCB_LOCK_ASSERT(stcb);
10360 if (asoc->last_data_chunk_from == NULL) {
10361 /* Hmm we never received anything */
10364 sctp_slide_mapping_arrays(stcb);
10365 sctp_set_rwnd(stcb, asoc);
10366 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10367 if (chk->rec.chunk_id.id == type) {
10368 /* Hmm, found a sack already on queue, remove it */
10369 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10370 asoc->ctrl_queue_cnt--;
10373 sctp_m_freem(a_chk->data);
10374 a_chk->data = NULL;
10376 if (a_chk->whoTo) {
10377 sctp_free_remote_addr(a_chk->whoTo);
10378 a_chk->whoTo = NULL;
10383 if (a_chk == NULL) {
10384 sctp_alloc_a_chunk(stcb, a_chk);
10385 if (a_chk == NULL) {
10386 /* No memory so we drop the idea, and set a timer */
10387 if (stcb->asoc.delayed_ack) {
10388 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10389 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10390 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10391 stcb->sctp_ep, stcb, NULL);
10393 stcb->asoc.send_sack = 1;
10397 a_chk->copy_by_ref = 0;
10398 a_chk->rec.chunk_id.id = type;
10399 a_chk->rec.chunk_id.can_take_data = 1;
10401 /* Clear our pkt counts */
10402 asoc->data_pkts_seen = 0;
10404 a_chk->asoc = asoc;
10405 a_chk->snd_count = 0;
10406 a_chk->send_size = 0; /* fill in later */
10407 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10408 a_chk->whoTo = NULL;
10410 if ((asoc->numduptsns) ||
10411 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10413 * Ok, we have some duplicates or the destination for the
10414 * sack is unreachable, lets see if we can select an
10415 * alternate than asoc->last_data_chunk_from
10417 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10418 (asoc->used_alt_onsack > asoc->numnets)) {
10419 /* We used an alt last time, don't this time */
10420 a_chk->whoTo = NULL;
10422 asoc->used_alt_onsack++;
10423 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10425 if (a_chk->whoTo == NULL) {
10426 /* Nope, no alternate */
10427 a_chk->whoTo = asoc->last_data_chunk_from;
10428 asoc->used_alt_onsack = 0;
10432 * No duplicates so we use the last place we received data
10435 asoc->used_alt_onsack = 0;
10436 a_chk->whoTo = asoc->last_data_chunk_from;
10438 if (a_chk->whoTo) {
10439 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10441 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10442 highest_tsn = asoc->highest_tsn_inside_map;
10444 highest_tsn = asoc->highest_tsn_inside_nr_map;
10446 if (highest_tsn == asoc->cumulative_tsn) {
10448 if (type == SCTP_SELECTIVE_ACK) {
10449 space_req = sizeof(struct sctp_sack_chunk);
10451 space_req = sizeof(struct sctp_nr_sack_chunk);
10454 /* gaps get a cluster */
10455 space_req = MCLBYTES;
10457 /* Ok now lets formulate a MBUF with our sack */
10458 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10459 if ((a_chk->data == NULL) ||
10460 (a_chk->whoTo == NULL)) {
10461 /* rats, no mbuf memory */
10463 /* was a problem with the destination */
10464 sctp_m_freem(a_chk->data);
10465 a_chk->data = NULL;
10467 sctp_free_a_chunk(stcb, a_chk, so_locked);
10468 /* sa_ignore NO_NULL_CHK */
10469 if (stcb->asoc.delayed_ack) {
10470 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10471 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10472 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10473 stcb->sctp_ep, stcb, NULL);
10475 stcb->asoc.send_sack = 1;
10479 /* ok, lets go through and fill it in */
10480 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10481 space = M_TRAILINGSPACE(a_chk->data);
10482 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10483 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10485 limit = mtod(a_chk->data, caddr_t);
10490 if ((asoc->sctp_cmt_on_off > 0) &&
10491 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10493 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10494 * received, then set high bit to 1, else 0. Reset
10497 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10498 asoc->cmt_dac_pkts_rcvd = 0;
10500 #ifdef SCTP_ASOCLOG_OF_TSNS
10501 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10502 stcb->asoc.cumack_log_atsnt++;
10503 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10504 stcb->asoc.cumack_log_atsnt = 0;
10507 /* reset the readers interpretation */
10508 stcb->freed_by_sorcv_sincelast = 0;
10510 if (type == SCTP_SELECTIVE_ACK) {
10511 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10513 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10514 if (highest_tsn > asoc->mapping_array_base_tsn) {
10515 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10517 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10521 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10522 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10523 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10524 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10526 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10530 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10533 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10535 if (((type == SCTP_SELECTIVE_ACK) &&
10536 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10537 ((type == SCTP_NR_SELECTIVE_ACK) &&
10538 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10539 /* we have a gap .. maybe */
10540 for (i = 0; i < siz; i++) {
10541 tsn_map = asoc->mapping_array[i];
10542 if (type == SCTP_SELECTIVE_ACK) {
10543 tsn_map |= asoc->nr_mapping_array[i];
10547 * Clear all bits corresponding to TSNs
10548 * smaller or equal to the cumulative TSN.
10550 tsn_map &= (~0 << (1 - offset));
10552 selector = &sack_array[tsn_map];
10553 if (mergeable && selector->right_edge) {
10555 * Backup, left and right edges were ok to
10561 if (selector->num_entries == 0)
10564 for (j = 0; j < selector->num_entries; j++) {
10565 if (mergeable && selector->right_edge) {
10567 * do a merge by NOT setting
10573 * no merge, set the left
10577 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10579 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10582 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10588 if (selector->left_edge) {
10592 if (limit_reached) {
10593 /* Reached the limit stop */
10599 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10600 (limit_reached == 0)) {
10604 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10605 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10607 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10610 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10613 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10615 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10616 /* we have a gap .. maybe */
10617 for (i = 0; i < siz; i++) {
10618 tsn_map = asoc->nr_mapping_array[i];
10621 * Clear all bits corresponding to
10622 * TSNs smaller or equal to the
10625 tsn_map &= (~0 << (1 - offset));
10627 selector = &sack_array[tsn_map];
10628 if (mergeable && selector->right_edge) {
10630 * Backup, left and right edges were
10633 num_nr_gap_blocks--;
10636 if (selector->num_entries == 0)
10639 for (j = 0; j < selector->num_entries; j++) {
10640 if (mergeable && selector->right_edge) {
10642 * do a merge by NOT
10649 * no merge, set the
10653 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10655 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10656 num_nr_gap_blocks++;
10658 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10664 if (selector->left_edge) {
10668 if (limit_reached) {
10669 /* Reached the limit stop */
10676 /* now we must add any dups we are going to report. */
10677 if ((limit_reached == 0) && (asoc->numduptsns)) {
10678 dup = (uint32_t *) gap_descriptor;
10679 for (i = 0; i < asoc->numduptsns; i++) {
10680 *dup = htonl(asoc->dup_tsns[i]);
10683 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10688 asoc->numduptsns = 0;
10691 * now that the chunk is prepared queue it to the control chunk
10694 if (type == SCTP_SELECTIVE_ACK) {
10695 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10696 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10697 num_dups * sizeof(int32_t);
10698 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10699 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10700 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10701 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10702 sack->sack.num_dup_tsns = htons(num_dups);
10703 sack->ch.chunk_type = type;
10704 sack->ch.chunk_flags = flags;
10705 sack->ch.chunk_length = htons(a_chk->send_size);
10707 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10708 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10709 num_dups * sizeof(int32_t);
10710 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10711 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10712 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10713 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10714 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10715 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10716 nr_sack->nr_sack.reserved = 0;
10717 nr_sack->ch.chunk_type = type;
10718 nr_sack->ch.chunk_flags = flags;
10719 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10721 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10722 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10723 asoc->ctrl_queue_cnt++;
10724 asoc->send_sack = 0;
10725 SCTP_STAT_INCR(sctps_sendsacks);
10730 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10731 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10736 struct mbuf *m_abort, *m, *m_last;
10737 struct mbuf *m_out, *m_end = NULL;
10738 struct sctp_abort_chunk *abort;
10739 struct sctp_auth_chunk *auth = NULL;
10740 struct sctp_nets *net;
10741 uint32_t auth_offset = 0;
10742 uint16_t cause_len, chunk_len, padding_len;
10744 SCTP_TCB_LOCK_ASSERT(stcb);
10746 * Add an AUTH chunk, if chunk requires it and save the offset into
10747 * the chain for AUTH
10749 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10750 stcb->asoc.peer_auth_chunks)) {
10751 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10752 stcb, SCTP_ABORT_ASSOCIATION);
10753 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10757 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10758 if (m_abort == NULL) {
10760 sctp_m_freem(m_out);
10763 sctp_m_freem(operr);
10767 /* link in any error */
10768 SCTP_BUF_NEXT(m_abort) = operr;
10771 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10772 cause_len += (uint16_t) SCTP_BUF_LEN(m);
10773 if (SCTP_BUF_NEXT(m) == NULL) {
10777 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10778 chunk_len = (uint16_t) sizeof(struct sctp_abort_chunk) + cause_len;
10779 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10780 if (m_out == NULL) {
10781 /* NO Auth chunk prepended, so reserve space in front */
10782 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10785 /* Put AUTH chunk at the front of the chain */
10786 SCTP_BUF_NEXT(m_end) = m_abort;
10788 if (stcb->asoc.alternate) {
10789 net = stcb->asoc.alternate;
10791 net = stcb->asoc.primary_destination;
10793 /* Fill in the ABORT chunk header. */
10794 abort = mtod(m_abort, struct sctp_abort_chunk *);
10795 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10796 abort->ch.chunk_flags = 0;
10797 abort->ch.chunk_length = htons(chunk_len);
10798 /* Add padding, if necessary. */
10799 if (padding_len > 0) {
10800 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) {
10801 sctp_m_freem(m_out);
10805 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10806 (struct sockaddr *)&net->ro._l_addr,
10807 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10808 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10809 stcb->asoc.primary_destination->port, NULL,
10812 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10816 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10817 struct sctp_nets *net,
10820 /* formulate and SEND a SHUTDOWN-COMPLETE */
10821 struct mbuf *m_shutdown_comp;
10822 struct sctp_shutdown_complete_chunk *shutdown_complete;
10826 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10827 if (m_shutdown_comp == NULL) {
10831 if (reflect_vtag) {
10832 flags = SCTP_HAD_NO_TCB;
10833 vtag = stcb->asoc.my_vtag;
10836 vtag = stcb->asoc.peer_vtag;
10838 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10839 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10840 shutdown_complete->ch.chunk_flags = flags;
10841 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10842 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10843 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10844 (struct sockaddr *)&net->ro._l_addr,
10845 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10846 stcb->sctp_ep->sctp_lport, stcb->rport,
10850 SCTP_SO_NOT_LOCKED);
10851 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10856 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
10857 struct sctphdr *sh, uint32_t vtag,
10858 uint8_t type, struct mbuf *cause,
10859 uint8_t use_mflowid, uint32_t mflowid,
10860 uint32_t vrf_id, uint16_t port)
10862 struct mbuf *o_pak;
10864 struct sctphdr *shout;
10865 struct sctp_chunkhdr *ch;
10866 struct udphdr *udp;
10867 int len, cause_len, padding_len, ret;
10870 struct sockaddr_in *src_sin, *dst_sin;
10875 struct sockaddr_in6 *src_sin6, *dst_sin6;
10876 struct ip6_hdr *ip6;
10880 /* Compute the length of the cause and add final padding. */
10882 if (cause != NULL) {
10883 struct mbuf *m_at, *m_last = NULL;
10885 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
10886 if (SCTP_BUF_NEXT(m_at) == NULL)
10888 cause_len += SCTP_BUF_LEN(m_at);
10890 padding_len = cause_len % 4;
10891 if (padding_len != 0) {
10892 padding_len = 4 - padding_len;
10894 if (padding_len != 0) {
10895 if (sctp_add_pad_tombuf(m_last, padding_len)) {
10896 sctp_m_freem(cause);
10903 /* Get an mbuf for the header. */
10904 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
10905 switch (dst->sa_family) {
10908 len += sizeof(struct ip);
10913 len += sizeof(struct ip6_hdr);
10920 len += sizeof(struct udphdr);
10922 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10923 if (mout == NULL) {
10925 sctp_m_freem(cause);
10929 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10930 SCTP_BUF_LEN(mout) = len;
10931 SCTP_BUF_NEXT(mout) = cause;
10932 if (use_mflowid != 0) {
10933 mout->m_pkthdr.flowid = mflowid;
10934 mout->m_flags |= M_FLOWID;
10942 switch (dst->sa_family) {
10945 src_sin = (struct sockaddr_in *)src;
10946 dst_sin = (struct sockaddr_in *)dst;
10947 ip = mtod(mout, struct ip *);
10948 ip->ip_v = IPVERSION;
10949 ip->ip_hl = (sizeof(struct ip) >> 2);
10951 ip->ip_id = ip_newid();
10953 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
10955 ip->ip_p = IPPROTO_UDP;
10957 ip->ip_p = IPPROTO_SCTP;
10959 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
10960 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
10962 len = sizeof(struct ip);
10963 shout = (struct sctphdr *)((caddr_t)ip + len);
10968 src_sin6 = (struct sockaddr_in6 *)src;
10969 dst_sin6 = (struct sockaddr_in6 *)dst;
10970 ip6 = mtod(mout, struct ip6_hdr *);
10971 ip6->ip6_flow = htonl(0x60000000);
10972 if (V_ip6_auto_flowlabel) {
10973 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
10975 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10977 ip6->ip6_nxt = IPPROTO_UDP;
10979 ip6->ip6_nxt = IPPROTO_SCTP;
10981 ip6->ip6_src = dst_sin6->sin6_addr;
10982 ip6->ip6_dst = src_sin6->sin6_addr;
10983 len = sizeof(struct ip6_hdr);
10984 shout = (struct sctphdr *)((caddr_t)ip6 + len);
10989 shout = mtod(mout, struct sctphdr *);
10993 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
10994 sctp_m_freem(mout);
10997 udp = (struct udphdr *)shout;
10998 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10999 udp->uh_dport = port;
11001 udp->uh_ulen = htons(sizeof(struct udphdr) +
11002 sizeof(struct sctphdr) +
11003 sizeof(struct sctp_chunkhdr) +
11004 cause_len + padding_len);
11005 len += sizeof(struct udphdr);
11006 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11010 shout->src_port = sh->dest_port;
11011 shout->dest_port = sh->src_port;
11012 shout->checksum = 0;
11014 shout->v_tag = htonl(vtag);
11016 shout->v_tag = sh->v_tag;
11018 len += sizeof(struct sctphdr);
11019 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11020 ch->chunk_type = type;
11022 ch->chunk_flags = 0;
11024 ch->chunk_flags = SCTP_HAD_NO_TCB;
11026 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11027 len += sizeof(struct sctp_chunkhdr);
11028 len += cause_len + padding_len;
11030 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11031 sctp_m_freem(mout);
11034 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11035 switch (dst->sa_family) {
11040 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11047 #if defined(SCTP_WITH_NO_CSUM)
11048 SCTP_STAT_INCR(sctps_sendnocrc);
11050 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11051 SCTP_STAT_INCR(sctps_sendswcrc);
11054 SCTP_ENABLE_UDP_CSUM(o_pak);
11057 #if defined(SCTP_WITH_NO_CSUM)
11058 SCTP_STAT_INCR(sctps_sendnocrc);
11060 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11061 mout->m_pkthdr.csum_data = 0;
11062 SCTP_STAT_INCR(sctps_sendhwcrc);
11065 #ifdef SCTP_PACKET_LOGGING
11066 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11067 sctp_packet_log(o_pak);
11070 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11075 ip6->ip6_plen = len - sizeof(struct ip6_hdr);
11077 #if defined(SCTP_WITH_NO_CSUM)
11078 SCTP_STAT_INCR(sctps_sendnocrc);
11080 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11081 SCTP_STAT_INCR(sctps_sendswcrc);
11083 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11084 udp->uh_sum = 0xffff;
11087 #if defined(SCTP_WITH_NO_CSUM)
11088 SCTP_STAT_INCR(sctps_sendnocrc);
11090 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11091 mout->m_pkthdr.csum_data = 0;
11092 SCTP_STAT_INCR(sctps_sendhwcrc);
11095 #ifdef SCTP_PACKET_LOGGING
11096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11097 sctp_packet_log(o_pak);
11100 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11104 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11106 sctp_m_freem(mout);
11107 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11110 SCTP_STAT_INCR(sctps_sendpackets);
11111 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11112 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11117 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11118 struct sctphdr *sh,
11119 uint8_t use_mflowid, uint32_t mflowid,
11120 uint32_t vrf_id, uint16_t port)
11122 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11123 use_mflowid, mflowid,
11128 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11129 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11134 struct sctp_tmit_chunk *chk;
11135 struct sctp_heartbeat_chunk *hb;
11136 struct timeval now;
11138 SCTP_TCB_LOCK_ASSERT(stcb);
11142 (void)SCTP_GETTIME_TIMEVAL(&now);
11143 switch (net->ro._l_addr.sa.sa_family) {
11155 sctp_alloc_a_chunk(stcb, chk);
11157 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11160 chk->copy_by_ref = 0;
11161 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11162 chk->rec.chunk_id.can_take_data = 1;
11163 chk->asoc = &stcb->asoc;
11164 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11166 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11167 if (chk->data == NULL) {
11168 sctp_free_a_chunk(stcb, chk, so_locked);
11171 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11172 SCTP_BUF_LEN(chk->data) = chk->send_size;
11173 chk->sent = SCTP_DATAGRAM_UNSENT;
11174 chk->snd_count = 0;
11176 atomic_add_int(&chk->whoTo->ref_count, 1);
11177 /* Now we have a mbuf that we can fill in with the details */
11178 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11179 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11180 /* fill out chunk header */
11181 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11182 hb->ch.chunk_flags = 0;
11183 hb->ch.chunk_length = htons(chk->send_size);
11184 /* Fill out hb parameter */
11185 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11186 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11187 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11188 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11189 /* Did our user request this one, put it in */
11190 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11191 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11192 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11194 * we only take from the entropy pool if the address is not
11197 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11198 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11200 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11201 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11203 switch (net->ro._l_addr.sa.sa_family) {
11206 memcpy(hb->heartbeat.hb_info.address,
11207 &net->ro._l_addr.sin.sin_addr,
11208 sizeof(net->ro._l_addr.sin.sin_addr));
11213 memcpy(hb->heartbeat.hb_info.address,
11214 &net->ro._l_addr.sin6.sin6_addr,
11215 sizeof(net->ro._l_addr.sin6.sin6_addr));
11222 net->hb_responded = 0;
11223 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11224 stcb->asoc.ctrl_queue_cnt++;
11225 SCTP_STAT_INCR(sctps_sendheartbeat);
11230 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11233 struct sctp_association *asoc;
11234 struct sctp_ecne_chunk *ecne;
11235 struct sctp_tmit_chunk *chk;
11240 asoc = &stcb->asoc;
11241 SCTP_TCB_LOCK_ASSERT(stcb);
11242 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11243 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11244 /* found a previous ECN_ECHO update it if needed */
11245 uint32_t cnt, ctsn;
11247 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11248 ctsn = ntohl(ecne->tsn);
11249 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11250 ecne->tsn = htonl(high_tsn);
11251 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11253 cnt = ntohl(ecne->num_pkts_since_cwr);
11255 ecne->num_pkts_since_cwr = htonl(cnt);
11259 /* nope could not find one to update so we must build one */
11260 sctp_alloc_a_chunk(stcb, chk);
11264 chk->copy_by_ref = 0;
11265 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11266 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11267 chk->rec.chunk_id.can_take_data = 0;
11268 chk->asoc = &stcb->asoc;
11269 chk->send_size = sizeof(struct sctp_ecne_chunk);
11270 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11271 if (chk->data == NULL) {
11272 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11275 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11276 SCTP_BUF_LEN(chk->data) = chk->send_size;
11277 chk->sent = SCTP_DATAGRAM_UNSENT;
11278 chk->snd_count = 0;
11280 atomic_add_int(&chk->whoTo->ref_count, 1);
11282 stcb->asoc.ecn_echo_cnt_onq++;
11283 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11284 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11285 ecne->ch.chunk_flags = 0;
11286 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11287 ecne->tsn = htonl(high_tsn);
11288 ecne->num_pkts_since_cwr = htonl(1);
11289 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11290 asoc->ctrl_queue_cnt++;
11294 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11295 struct mbuf *m, int len, int iphlen, int bad_crc)
11297 struct sctp_association *asoc;
11298 struct sctp_pktdrop_chunk *drp;
11299 struct sctp_tmit_chunk *chk;
11305 struct sctp_chunkhdr *ch, chunk_buf;
11306 unsigned int chk_length;
11311 asoc = &stcb->asoc;
11312 SCTP_TCB_LOCK_ASSERT(stcb);
11313 if (asoc->peer_supports_pktdrop == 0) {
11315 * peer must declare support before I send one.
11319 if (stcb->sctp_socket == NULL) {
11322 sctp_alloc_a_chunk(stcb, chk);
11326 chk->copy_by_ref = 0;
11328 chk->send_size = len;
11329 /* Validate that we do not have an ABORT in here. */
11330 offset = iphlen + sizeof(struct sctphdr);
11331 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11332 sizeof(*ch), (uint8_t *) & chunk_buf);
11333 while (ch != NULL) {
11334 chk_length = ntohs(ch->chunk_length);
11335 if (chk_length < sizeof(*ch)) {
11336 /* break to abort land */
11339 switch (ch->chunk_type) {
11340 case SCTP_PACKET_DROPPED:
11341 case SCTP_ABORT_ASSOCIATION:
11342 case SCTP_INITIATION_ACK:
11344 * We don't respond with an PKT-DROP to an ABORT
11345 * or PKT-DROP. We also do not respond to an
11346 * INIT-ACK, because we can't know if the initiation
11347 * tag is correct or not.
11349 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11354 offset += SCTP_SIZE32(chk_length);
11355 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11356 sizeof(*ch), (uint8_t *) & chunk_buf);
11359 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11360 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11362 * only send 1 mtu worth, trim off the excess on the end.
11365 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11368 chk->asoc = &stcb->asoc;
11369 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11370 if (chk->data == NULL) {
11372 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11375 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11376 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11378 sctp_m_freem(chk->data);
11382 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11383 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11384 chk->book_size_scale = 0;
11386 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11387 drp->trunc_len = htons(fullsz);
11389 * Len is already adjusted to size minus overhead above take
11390 * out the pkt_drop chunk itself from it.
11392 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11393 len = chk->send_size;
11395 /* no truncation needed */
11396 drp->ch.chunk_flags = 0;
11397 drp->trunc_len = htons(0);
11400 drp->ch.chunk_flags |= SCTP_BADCRC;
11402 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11403 SCTP_BUF_LEN(chk->data) = chk->send_size;
11404 chk->sent = SCTP_DATAGRAM_UNSENT;
11405 chk->snd_count = 0;
11407 /* we should hit here */
11409 atomic_add_int(&chk->whoTo->ref_count, 1);
11413 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11414 chk->rec.chunk_id.can_take_data = 1;
11415 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11416 drp->ch.chunk_length = htons(chk->send_size);
11417 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11421 drp->bottle_bw = htonl(spc);
11422 if (asoc->my_rwnd) {
11423 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11424 asoc->size_on_all_streams +
11425 asoc->my_rwnd_control_len +
11426 stcb->sctp_socket->so_rcv.sb_cc);
11429 * If my rwnd is 0, possibly from mbuf depletion as well as
11430 * space used, tell the peer there is NO space aka onq == bw
11432 drp->current_onq = htonl(spc);
11436 m_copydata(m, iphlen, len, (caddr_t)datap);
11437 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11438 asoc->ctrl_queue_cnt++;
11442 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11444 struct sctp_association *asoc;
11445 struct sctp_cwr_chunk *cwr;
11446 struct sctp_tmit_chunk *chk;
11448 SCTP_TCB_LOCK_ASSERT(stcb);
11452 asoc = &stcb->asoc;
11453 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11454 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11456 * found a previous CWR queued to same destination
11457 * update it if needed
11461 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11462 ctsn = ntohl(cwr->tsn);
11463 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11464 cwr->tsn = htonl(high_tsn);
11466 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11467 /* Make sure override is carried */
11468 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11473 sctp_alloc_a_chunk(stcb, chk);
11477 chk->copy_by_ref = 0;
11478 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11479 chk->rec.chunk_id.can_take_data = 1;
11480 chk->asoc = &stcb->asoc;
11481 chk->send_size = sizeof(struct sctp_cwr_chunk);
11482 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11483 if (chk->data == NULL) {
11484 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11487 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11488 SCTP_BUF_LEN(chk->data) = chk->send_size;
11489 chk->sent = SCTP_DATAGRAM_UNSENT;
11490 chk->snd_count = 0;
11492 atomic_add_int(&chk->whoTo->ref_count, 1);
11493 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11494 cwr->ch.chunk_type = SCTP_ECN_CWR;
11495 cwr->ch.chunk_flags = override;
11496 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11497 cwr->tsn = htonl(high_tsn);
11498 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11499 asoc->ctrl_queue_cnt++;
11503 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11504 int number_entries, uint16_t * list,
11505 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11507 uint16_t len, old_len, i;
11508 struct sctp_stream_reset_out_request *req_out;
11509 struct sctp_chunkhdr *ch;
11511 ch = mtod(chk->data, struct sctp_chunkhdr *);
11512 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11514 /* get to new offset for the param. */
11515 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11516 /* now how long will this param be? */
11517 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11518 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11519 req_out->ph.param_length = htons(len);
11520 req_out->request_seq = htonl(seq);
11521 req_out->response_seq = htonl(resp_seq);
11522 req_out->send_reset_at_tsn = htonl(last_sent);
11523 if (number_entries) {
11524 for (i = 0; i < number_entries; i++) {
11525 req_out->list_of_streams[i] = htons(list[i]);
11528 if (SCTP_SIZE32(len) > len) {
11530 * Need to worry about the pad we may end up adding to the
11531 * end. This is easy since the struct is either aligned to 4
11532 * bytes or 2 bytes off.
11534 req_out->list_of_streams[number_entries] = 0;
11536 /* now fix the chunk length */
11537 ch->chunk_length = htons(len + old_len);
11538 chk->book_size = len + old_len;
11539 chk->book_size_scale = 0;
11540 chk->send_size = SCTP_SIZE32(chk->book_size);
11541 SCTP_BUF_LEN(chk->data) = chk->send_size;
11546 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11547 int number_entries, uint16_t * list,
11550 uint16_t len, old_len, i;
11551 struct sctp_stream_reset_in_request *req_in;
11552 struct sctp_chunkhdr *ch;
11554 ch = mtod(chk->data, struct sctp_chunkhdr *);
11555 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11557 /* get to new offset for the param. */
11558 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11559 /* now how long will this param be? */
11560 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11561 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11562 req_in->ph.param_length = htons(len);
11563 req_in->request_seq = htonl(seq);
11564 if (number_entries) {
11565 for (i = 0; i < number_entries; i++) {
11566 req_in->list_of_streams[i] = htons(list[i]);
11569 if (SCTP_SIZE32(len) > len) {
11571 * Need to worry about the pad we may end up adding to the
11572 * end. This is easy since the struct is either aligned to 4
11573 * bytes or 2 bytes off.
11575 req_in->list_of_streams[number_entries] = 0;
11577 /* now fix the chunk length */
11578 ch->chunk_length = htons(len + old_len);
11579 chk->book_size = len + old_len;
11580 chk->book_size_scale = 0;
11581 chk->send_size = SCTP_SIZE32(chk->book_size);
11582 SCTP_BUF_LEN(chk->data) = chk->send_size;
11587 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11590 uint16_t len, old_len;
11591 struct sctp_stream_reset_tsn_request *req_tsn;
11592 struct sctp_chunkhdr *ch;
11594 ch = mtod(chk->data, struct sctp_chunkhdr *);
11595 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11597 /* get to new offset for the param. */
11598 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11599 /* now how long will this param be? */
11600 len = sizeof(struct sctp_stream_reset_tsn_request);
11601 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11602 req_tsn->ph.param_length = htons(len);
11603 req_tsn->request_seq = htonl(seq);
11605 /* now fix the chunk length */
11606 ch->chunk_length = htons(len + old_len);
11607 chk->send_size = len + old_len;
11608 chk->book_size = SCTP_SIZE32(chk->send_size);
11609 chk->book_size_scale = 0;
11610 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11615 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11616 uint32_t resp_seq, uint32_t result)
11618 uint16_t len, old_len;
11619 struct sctp_stream_reset_response *resp;
11620 struct sctp_chunkhdr *ch;
11622 ch = mtod(chk->data, struct sctp_chunkhdr *);
11623 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11625 /* get to new offset for the param. */
11626 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11627 /* now how long will this param be? */
11628 len = sizeof(struct sctp_stream_reset_response);
11629 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11630 resp->ph.param_length = htons(len);
11631 resp->response_seq = htonl(resp_seq);
11632 resp->result = ntohl(result);
11634 /* now fix the chunk length */
11635 ch->chunk_length = htons(len + old_len);
11636 chk->book_size = len + old_len;
11637 chk->book_size_scale = 0;
11638 chk->send_size = SCTP_SIZE32(chk->book_size);
11639 SCTP_BUF_LEN(chk->data) = chk->send_size;
11644 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11645 uint32_t resp_seq, uint32_t result,
11646 uint32_t send_una, uint32_t recv_next)
11648 uint16_t len, old_len;
11649 struct sctp_stream_reset_response_tsn *resp;
11650 struct sctp_chunkhdr *ch;
11652 ch = mtod(chk->data, struct sctp_chunkhdr *);
11653 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11655 /* get to new offset for the param. */
11656 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11657 /* now how long will this param be? */
11658 len = sizeof(struct sctp_stream_reset_response_tsn);
11659 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11660 resp->ph.param_length = htons(len);
11661 resp->response_seq = htonl(resp_seq);
11662 resp->result = htonl(result);
11663 resp->senders_next_tsn = htonl(send_una);
11664 resp->receivers_next_tsn = htonl(recv_next);
11666 /* now fix the chunk length */
11667 ch->chunk_length = htons(len + old_len);
11668 chk->book_size = len + old_len;
11669 chk->send_size = SCTP_SIZE32(chk->book_size);
11670 chk->book_size_scale = 0;
11671 SCTP_BUF_LEN(chk->data) = chk->send_size;
11676 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11680 uint16_t len, old_len;
11681 struct sctp_chunkhdr *ch;
11682 struct sctp_stream_reset_add_strm *addstr;
11684 ch = mtod(chk->data, struct sctp_chunkhdr *);
11685 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11687 /* get to new offset for the param. */
11688 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11689 /* now how long will this param be? */
11690 len = sizeof(struct sctp_stream_reset_add_strm);
11693 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11694 addstr->ph.param_length = htons(len);
11695 addstr->request_seq = htonl(seq);
11696 addstr->number_of_streams = htons(adding);
11697 addstr->reserved = 0;
11699 /* now fix the chunk length */
11700 ch->chunk_length = htons(len + old_len);
11701 chk->send_size = len + old_len;
11702 chk->book_size = SCTP_SIZE32(chk->send_size);
11703 chk->book_size_scale = 0;
11704 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11709 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11713 uint16_t len, old_len;
11714 struct sctp_chunkhdr *ch;
11715 struct sctp_stream_reset_add_strm *addstr;
11717 ch = mtod(chk->data, struct sctp_chunkhdr *);
11718 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11720 /* get to new offset for the param. */
11721 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11722 /* now how long will this param be? */
11723 len = sizeof(struct sctp_stream_reset_add_strm);
11725 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11726 addstr->ph.param_length = htons(len);
11727 addstr->request_seq = htonl(seq);
11728 addstr->number_of_streams = htons(adding);
11729 addstr->reserved = 0;
11731 /* now fix the chunk length */
11732 ch->chunk_length = htons(len + old_len);
11733 chk->send_size = len + old_len;
11734 chk->book_size = SCTP_SIZE32(chk->send_size);
11735 chk->book_size_scale = 0;
11736 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11741 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11742 int number_entries, uint16_t * list,
11743 uint8_t send_out_req,
11744 uint8_t send_in_req,
11745 uint8_t send_tsn_req,
11746 uint8_t add_stream,
11748 uint16_t adding_i, uint8_t peer_asked)
11751 struct sctp_association *asoc;
11752 struct sctp_tmit_chunk *chk;
11753 struct sctp_chunkhdr *ch;
11756 asoc = &stcb->asoc;
11757 if (asoc->stream_reset_outstanding) {
11759 * Already one pending, must get ACK back to clear the flag.
11761 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11764 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11765 (add_stream == 0)) {
11766 /* nothing to do */
11767 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11770 if (send_tsn_req && (send_out_req || send_in_req)) {
11771 /* error, can't do that */
11772 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11775 sctp_alloc_a_chunk(stcb, chk);
11777 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11780 chk->copy_by_ref = 0;
11781 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11782 chk->rec.chunk_id.can_take_data = 0;
11783 chk->asoc = &stcb->asoc;
11784 chk->book_size = sizeof(struct sctp_chunkhdr);
11785 chk->send_size = SCTP_SIZE32(chk->book_size);
11786 chk->book_size_scale = 0;
11788 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11789 if (chk->data == NULL) {
11790 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11791 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11794 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11796 /* setup chunk parameters */
11797 chk->sent = SCTP_DATAGRAM_UNSENT;
11798 chk->snd_count = 0;
11799 if (stcb->asoc.alternate) {
11800 chk->whoTo = stcb->asoc.alternate;
11802 chk->whoTo = stcb->asoc.primary_destination;
11804 atomic_add_int(&chk->whoTo->ref_count, 1);
11805 ch = mtod(chk->data, struct sctp_chunkhdr *);
11806 ch->chunk_type = SCTP_STREAM_RESET;
11807 ch->chunk_flags = 0;
11808 ch->chunk_length = htons(chk->book_size);
11809 SCTP_BUF_LEN(chk->data) = chk->send_size;
11811 seq = stcb->asoc.str_reset_seq_out;
11812 if (send_out_req) {
11813 sctp_add_stream_reset_out(chk, number_entries, list,
11814 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
11815 asoc->stream_reset_out_is_outstanding = 1;
11817 asoc->stream_reset_outstanding++;
11819 if ((add_stream & 1) &&
11820 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
11821 /* Need to allocate more */
11822 struct sctp_stream_out *oldstream;
11823 struct sctp_stream_queue_pending *sp, *nsp;
11826 oldstream = stcb->asoc.strmout;
11827 /* get some more */
11828 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
11829 ((stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out)),
11831 if (stcb->asoc.strmout == NULL) {
11834 stcb->asoc.strmout = oldstream;
11835 /* Turn off the bit */
11836 x = add_stream & 0xfe;
11841 * Ok now we proceed with copying the old out stuff and
11842 * initializing the new stuff.
11844 SCTP_TCB_SEND_LOCK(stcb);
11845 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
11846 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11847 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11848 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
11849 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send;
11850 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
11851 stcb->asoc.strmout[i].stream_no = i;
11852 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]);
11853 /* now anything on those queues? */
11854 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
11855 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
11856 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
11858 /* Now move assoc pointers too */
11859 if (stcb->asoc.last_out_stream == &oldstream[i]) {
11860 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
11862 if (stcb->asoc.locked_on_sending == &oldstream[i]) {
11863 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
11866 /* now the new streams */
11867 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
11868 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
11869 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
11870 stcb->asoc.strmout[i].chunks_on_queues = 0;
11871 stcb->asoc.strmout[i].next_sequence_send = 0x0;
11872 stcb->asoc.strmout[i].stream_no = i;
11873 stcb->asoc.strmout[i].last_msg_incomplete = 0;
11874 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
11876 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
11877 SCTP_FREE(oldstream, SCTP_M_STRMO);
11878 SCTP_TCB_SEND_UNLOCK(stcb);
11881 if ((add_stream & 1) && (adding_o > 0)) {
11882 asoc->strm_pending_add_size = adding_o;
11883 asoc->peer_req_out = peer_asked;
11884 sctp_add_an_out_stream(chk, seq, adding_o);
11886 asoc->stream_reset_outstanding++;
11888 if ((add_stream & 2) && (adding_i > 0)) {
11889 sctp_add_an_in_stream(chk, seq, adding_i);
11891 asoc->stream_reset_outstanding++;
11894 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11896 asoc->stream_reset_outstanding++;
11898 if (send_tsn_req) {
11899 sctp_add_stream_reset_tsn(chk, seq);
11900 asoc->stream_reset_outstanding++;
11902 asoc->str_reset = chk;
11903 /* insert the chunk for sending */
11904 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11907 asoc->ctrl_queue_cnt++;
11908 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11913 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
11914 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
11915 uint8_t use_mflowid, uint32_t mflowid,
11916 uint32_t vrf_id, uint16_t port)
11918 /* Don't respond to an ABORT with an ABORT. */
11919 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11921 sctp_m_freem(cause);
11924 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
11925 use_mflowid, mflowid,
11931 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
11932 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
11933 uint8_t use_mflowid, uint32_t mflowid,
11934 uint32_t vrf_id, uint16_t port)
11936 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
11937 use_mflowid, mflowid,
11942 static struct mbuf *
11943 sctp_copy_resume(struct uio *uio,
11945 int user_marks_eor,
11948 struct mbuf **new_tail)
11952 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
11953 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
11955 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11958 *sndout = m_length(m, NULL);
11959 *new_tail = m_last(m);
11965 sctp_copy_one(struct sctp_stream_queue_pending *sp,
11972 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
11974 if (sp->data == NULL) {
11975 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11978 sp->tail_mbuf = m_last(sp->data);
11984 static struct sctp_stream_queue_pending *
11985 sctp_copy_it_in(struct sctp_tcb *stcb,
11986 struct sctp_association *asoc,
11987 struct sctp_sndrcvinfo *srcv,
11989 struct sctp_nets *net,
11991 int user_marks_eor,
11995 * This routine must be very careful in its work. Protocol
11996 * processing is up and running so care must be taken to spl...()
11997 * when you need to do something that may effect the stcb/asoc. The
11998 * sb is locked however. When data is copied the protocol processing
11999 * should be enabled since this is a slower operation...
12001 struct sctp_stream_queue_pending *sp = NULL;
12005 /* Now can we send this? */
12006 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12007 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12008 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12009 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12010 /* got data while shutting down */
12011 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12012 *error = ECONNRESET;
12015 sctp_alloc_a_strmoq(stcb, sp);
12017 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12022 sp->sender_all_done = 0;
12023 sp->sinfo_flags = srcv->sinfo_flags;
12024 sp->timetolive = srcv->sinfo_timetolive;
12025 sp->ppid = srcv->sinfo_ppid;
12026 sp->context = srcv->sinfo_context;
12027 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12029 sp->stream = srcv->sinfo_stream;
12030 sp->length = min(uio->uio_resid, max_send_len);
12031 if ((sp->length == (uint32_t) uio->uio_resid) &&
12032 ((user_marks_eor == 0) ||
12033 (srcv->sinfo_flags & SCTP_EOF) ||
12034 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12035 sp->msg_is_complete = 1;
12037 sp->msg_is_complete = 0;
12039 sp->sender_all_done = 0;
12040 sp->some_taken = 0;
12041 sp->put_last_out = 0;
12042 resv_in_first = sizeof(struct sctp_data_chunk);
12043 sp->data = sp->tail_mbuf = NULL;
12044 if (sp->length == 0) {
12048 if (srcv->sinfo_keynumber_valid) {
12049 sp->auth_keyid = srcv->sinfo_keynumber;
12051 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12053 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12054 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12055 sp->holds_key_ref = 1;
12057 *error = sctp_copy_one(sp, uio, resv_in_first);
12060 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12063 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12065 atomic_add_int(&sp->net->ref_count, 1);
12069 sctp_set_prsctp_policy(sp);
12077 sctp_sosend(struct socket *so,
12078 struct sockaddr *addr,
12081 struct mbuf *control,
12086 int error, use_sndinfo = 0;
12087 struct sctp_sndrcvinfo sndrcvninfo;
12088 struct sockaddr *addr_to_use;
12090 #if defined(INET) && defined(INET6)
12091 struct sockaddr_in sin;
12096 /* process cmsg snd/rcv info (maybe a assoc-id) */
12097 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12098 sizeof(sndrcvninfo))) {
12103 addr_to_use = addr;
12104 #if defined(INET) && defined(INET6)
12105 if ((addr) && (addr->sa_family == AF_INET6)) {
12106 struct sockaddr_in6 *sin6;
12108 sin6 = (struct sockaddr_in6 *)addr;
12109 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12110 in6_sin6_2_sin(&sin, sin6);
12111 addr_to_use = (struct sockaddr *)&sin;
12115 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12118 use_sndinfo ? &sndrcvninfo : NULL
12126 sctp_lower_sosend(struct socket *so,
12127 struct sockaddr *addr,
12129 struct mbuf *i_pak,
12130 struct mbuf *control,
12132 struct sctp_sndrcvinfo *srcv
12137 unsigned int sndlen = 0, max_len;
12139 struct mbuf *top = NULL;
12140 int queue_only = 0, queue_only_for_init = 0;
12141 int free_cnt_applied = 0;
12143 int now_filled = 0;
12144 unsigned int inqueue_bytes = 0;
12145 struct sctp_block_entry be;
12146 struct sctp_inpcb *inp;
12147 struct sctp_tcb *stcb = NULL;
12148 struct timeval now;
12149 struct sctp_nets *net;
12150 struct sctp_association *asoc;
12151 struct sctp_inpcb *t_inp;
12152 int user_marks_eor;
12153 int create_lock_applied = 0;
12154 int nagle_applies = 0;
12155 int some_on_control = 0;
12156 int got_all_of_the_send = 0;
12157 int hold_tcblock = 0;
12158 int non_blocking = 0;
12159 uint32_t local_add_more, local_soresv = 0;
12161 uint16_t sinfo_flags;
12162 sctp_assoc_t sinfo_assoc_id;
12169 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12171 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12174 SCTP_RELEASE_PKT(i_pak);
12178 if ((uio == NULL) && (i_pak == NULL)) {
12179 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12182 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12183 atomic_add_int(&inp->total_sends, 1);
12185 if (uio->uio_resid < 0) {
12186 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12189 sndlen = uio->uio_resid;
12191 top = SCTP_HEADER_TO_CHAIN(i_pak);
12192 sndlen = SCTP_HEADER_LEN(i_pak);
12194 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12197 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12198 (inp->sctp_socket->so_qlimit)) {
12199 /* The listener can NOT send */
12200 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12205 * Pre-screen address, if one is given the sin-len
12206 * must be set correctly!
12209 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12211 switch (raddr->sa.sa_family) {
12214 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12215 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12219 port = raddr->sin.sin_port;
12224 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12225 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12229 port = raddr->sin6.sin6_port;
12233 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12234 error = EAFNOSUPPORT;
12241 sinfo_flags = srcv->sinfo_flags;
12242 sinfo_assoc_id = srcv->sinfo_assoc_id;
12243 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12244 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12245 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12249 if (srcv->sinfo_flags)
12250 SCTP_STAT_INCR(sctps_sends_with_flags);
12252 sinfo_flags = inp->def_send.sinfo_flags;
12253 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12255 if (sinfo_flags & SCTP_SENDALL) {
12256 /* its a sendall */
12257 error = sctp_sendall(inp, uio, top, srcv);
12261 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12262 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12266 /* now we must find the assoc */
12267 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12268 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12269 SCTP_INP_RLOCK(inp);
12270 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12272 SCTP_TCB_LOCK(stcb);
12275 SCTP_INP_RUNLOCK(inp);
12276 } else if (sinfo_assoc_id) {
12277 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12280 * Since we did not use findep we must
12281 * increment it, and if we don't find a tcb
12284 SCTP_INP_WLOCK(inp);
12285 SCTP_INP_INCR_REF(inp);
12286 SCTP_INP_WUNLOCK(inp);
12287 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12288 if (stcb == NULL) {
12289 SCTP_INP_WLOCK(inp);
12290 SCTP_INP_DECR_REF(inp);
12291 SCTP_INP_WUNLOCK(inp);
12296 if ((stcb == NULL) && (addr)) {
12297 /* Possible implicit send? */
12298 SCTP_ASOC_CREATE_LOCK(inp);
12299 create_lock_applied = 1;
12300 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12301 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12302 /* Should I really unlock ? */
12303 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12308 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12309 (addr->sa_family == AF_INET6)) {
12310 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12314 SCTP_INP_WLOCK(inp);
12315 SCTP_INP_INCR_REF(inp);
12316 SCTP_INP_WUNLOCK(inp);
12317 /* With the lock applied look again */
12318 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12319 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12320 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12322 if (stcb == NULL) {
12323 SCTP_INP_WLOCK(inp);
12324 SCTP_INP_DECR_REF(inp);
12325 SCTP_INP_WUNLOCK(inp);
12332 if (t_inp != inp) {
12333 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12338 if (stcb == NULL) {
12339 if (addr == NULL) {
12340 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12344 /* We must go ahead and start the INIT process */
12347 if ((sinfo_flags & SCTP_ABORT) ||
12348 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12350 * User asks to abort a non-existant assoc,
12351 * or EOF a non-existant assoc with no data
12353 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12357 /* get an asoc/stcb struct */
12358 vrf_id = inp->def_vrf_id;
12360 if (create_lock_applied == 0) {
12361 panic("Error, should hold create lock and I don't?");
12364 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12367 if (stcb == NULL) {
12368 /* Error is setup for us in the call */
12371 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12372 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12374 * Set the connected flag so we can queue
12377 soisconnecting(so);
12380 if (create_lock_applied) {
12381 SCTP_ASOC_CREATE_UNLOCK(inp);
12382 create_lock_applied = 0;
12384 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12387 * Turn on queue only flag to prevent data from
12391 asoc = &stcb->asoc;
12392 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12393 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12395 /* initialize authentication params for the assoc */
12396 sctp_initialize_auth_params(inp, stcb);
12399 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12400 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12406 /* out with the INIT */
12407 queue_only_for_init = 1;
12409 * we may want to dig in after this call and adjust the MTU
12410 * value. It defaulted to 1500 (constant) but the ro
12411 * structure may now have an update and thus we may need to
12412 * change it BEFORE we append the message.
12416 asoc = &stcb->asoc;
12418 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12419 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12421 net = sctp_findnet(stcb, addr);
12424 if ((net == NULL) ||
12425 ((port != 0) && (port != stcb->rport))) {
12426 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12431 if (stcb->asoc.alternate) {
12432 net = stcb->asoc.alternate;
12434 net = stcb->asoc.primary_destination;
12437 atomic_add_int(&stcb->total_sends, 1);
12438 /* Keep the stcb from being freed under our feet */
12439 atomic_add_int(&asoc->refcnt, 1);
12440 free_cnt_applied = 1;
12442 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12443 if (sndlen > asoc->smallest_mtu) {
12444 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12449 if (SCTP_SO_IS_NBIO(so)
12450 || (flags & MSG_NBIO)
12454 /* would we block? */
12455 if (non_blocking) {
12456 if (hold_tcblock == 0) {
12457 SCTP_TCB_LOCK(stcb);
12460 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12461 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12462 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12463 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12464 if (sndlen > SCTP_SB_LIMIT_SND(so))
12467 error = EWOULDBLOCK;
12470 stcb->asoc.sb_send_resv += sndlen;
12471 SCTP_TCB_UNLOCK(stcb);
12474 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12476 local_soresv = sndlen;
12477 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12478 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12479 error = ECONNRESET;
12482 if (create_lock_applied) {
12483 SCTP_ASOC_CREATE_UNLOCK(inp);
12484 create_lock_applied = 0;
12486 if (asoc->stream_reset_outstanding) {
12488 * Can't queue any data while stream reset is underway.
12490 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12494 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12495 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12498 /* we are now done with all control */
12500 sctp_m_freem(control);
12503 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12504 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12505 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12506 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12507 if (srcv->sinfo_flags & SCTP_ABORT) {
12510 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12511 error = ECONNRESET;
12515 /* Ok, we will attempt a msgsnd :> */
12517 p->td_ru.ru_msgsnd++;
12519 /* Are we aborting? */
12520 if (srcv->sinfo_flags & SCTP_ABORT) {
12522 int tot_demand, tot_out = 0, max_out;
12524 SCTP_STAT_INCR(sctps_sends_with_abort);
12525 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12526 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12527 /* It has to be up before we abort */
12528 /* how big is the user initiated abort? */
12529 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12533 if (hold_tcblock) {
12534 SCTP_TCB_UNLOCK(stcb);
12538 struct mbuf *cntm = NULL;
12540 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAIT, 1, MT_DATA);
12542 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12543 tot_out += SCTP_BUF_LEN(cntm);
12547 /* Must fit in a MTU */
12549 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12550 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12552 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12556 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12559 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12563 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12564 max_out -= sizeof(struct sctp_abort_msg);
12565 if (tot_out > max_out) {
12569 struct sctp_paramhdr *ph;
12571 /* now move forward the data pointer */
12572 ph = mtod(mm, struct sctp_paramhdr *);
12573 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12574 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12576 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12578 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12581 * Here if we can't get his data we
12582 * still abort we just don't get to
12583 * send the users note :-0
12590 SCTP_BUF_NEXT(mm) = top;
12594 if (hold_tcblock == 0) {
12595 SCTP_TCB_LOCK(stcb);
12597 atomic_add_int(&stcb->asoc.refcnt, -1);
12598 free_cnt_applied = 0;
12599 /* release this lock, otherwise we hang on ourselves */
12600 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12601 /* now relock the stcb so everything is sane */
12605 * In this case top is already chained to mm avoid double
12606 * free, since we free it below if top != NULL and driver
12607 * would free it after sending the packet out
12614 /* Calculate the maximum we can send */
12615 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12616 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12617 if (non_blocking) {
12618 /* we already checked for non-blocking above. */
12621 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12626 if (hold_tcblock) {
12627 SCTP_TCB_UNLOCK(stcb);
12630 /* Is the stream no. valid? */
12631 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12632 /* Invalid stream number */
12633 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12637 if (asoc->strmout == NULL) {
12638 /* huh? software error */
12639 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12643 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12644 if ((user_marks_eor == 0) &&
12645 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12646 /* It will NEVER fit */
12647 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12651 if ((uio == NULL) && user_marks_eor) {
12653 * We do not support eeor mode for
12654 * sending with mbuf chains (like sendfile).
12656 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12660 if (user_marks_eor) {
12661 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12664 * For non-eeor the whole message must fit in
12665 * the socket send buffer.
12667 local_add_more = sndlen;
12670 if (non_blocking) {
12671 goto skip_preblock;
12673 if (((max_len <= local_add_more) &&
12674 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12676 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12677 /* No room right now ! */
12678 SOCKBUF_LOCK(&so->so_snd);
12679 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12680 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12681 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12682 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12683 (unsigned int)SCTP_SB_LIMIT_SND(so),
12686 stcb->asoc.stream_queue_cnt,
12687 stcb->asoc.chunks_on_out_queue,
12688 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12690 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
12693 stcb->block_entry = &be;
12694 error = sbwait(&so->so_snd);
12695 stcb->block_entry = NULL;
12696 if (error || so->so_error || be.error) {
12699 error = so->so_error;
12704 SOCKBUF_UNLOCK(&so->so_snd);
12707 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12708 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12709 asoc, stcb->asoc.total_output_queue_size);
12711 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12714 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12716 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12717 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12721 SOCKBUF_UNLOCK(&so->so_snd);
12724 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12728 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12729 * case NOTE: uio will be null when top/mbuf is passed
12732 if (srcv->sinfo_flags & SCTP_EOF) {
12733 got_all_of_the_send = 1;
12736 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12742 struct sctp_stream_queue_pending *sp;
12743 struct sctp_stream_out *strm;
12746 SCTP_TCB_SEND_LOCK(stcb);
12747 if ((asoc->stream_locked) &&
12748 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12749 SCTP_TCB_SEND_UNLOCK(stcb);
12750 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12754 SCTP_TCB_SEND_UNLOCK(stcb);
12756 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12757 if (strm->last_msg_incomplete == 0) {
12759 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
12760 if ((sp == NULL) || (error)) {
12763 SCTP_TCB_SEND_LOCK(stcb);
12764 if (sp->msg_is_complete) {
12765 strm->last_msg_incomplete = 0;
12766 asoc->stream_locked = 0;
12769 * Just got locked to this guy in case of an
12772 strm->last_msg_incomplete = 1;
12773 asoc->stream_locked = 1;
12774 asoc->stream_locked_on = srcv->sinfo_stream;
12775 sp->sender_all_done = 0;
12777 sctp_snd_sb_alloc(stcb, sp->length);
12778 atomic_add_int(&asoc->stream_queue_cnt, 1);
12779 if (srcv->sinfo_flags & SCTP_UNORDERED) {
12780 SCTP_STAT_INCR(sctps_sends_with_unord);
12782 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12783 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
12784 SCTP_TCB_SEND_UNLOCK(stcb);
12786 SCTP_TCB_SEND_LOCK(stcb);
12787 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12788 SCTP_TCB_SEND_UNLOCK(stcb);
12790 /* ???? Huh ??? last msg is gone */
12792 panic("Warning: Last msg marked incomplete, yet nothing left?");
12794 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12795 strm->last_msg_incomplete = 0;
12801 while (uio->uio_resid > 0) {
12802 /* How much room do we have? */
12803 struct mbuf *new_tail, *mm;
12805 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12806 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12810 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12811 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12812 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
12815 if (hold_tcblock) {
12816 SCTP_TCB_UNLOCK(stcb);
12819 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
12820 if ((mm == NULL) || error) {
12826 /* Update the mbuf and count */
12827 SCTP_TCB_SEND_LOCK(stcb);
12828 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12830 * we need to get out. Peer probably
12834 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12835 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12836 error = ECONNRESET;
12838 SCTP_TCB_SEND_UNLOCK(stcb);
12841 if (sp->tail_mbuf) {
12842 /* tack it to the end */
12843 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12844 sp->tail_mbuf = new_tail;
12846 /* A stolen mbuf */
12848 sp->tail_mbuf = new_tail;
12850 sctp_snd_sb_alloc(stcb, sndout);
12851 atomic_add_int(&sp->length, sndout);
12854 /* Did we reach EOR? */
12855 if ((uio->uio_resid == 0) &&
12856 ((user_marks_eor == 0) ||
12857 (srcv->sinfo_flags & SCTP_EOF) ||
12858 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12859 sp->msg_is_complete = 1;
12861 sp->msg_is_complete = 0;
12863 SCTP_TCB_SEND_UNLOCK(stcb);
12865 if (uio->uio_resid == 0) {
12870 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12872 * This is ugly but we must assure locking
12875 if (hold_tcblock == 0) {
12876 SCTP_TCB_LOCK(stcb);
12879 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12880 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12881 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12882 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12888 SCTP_TCB_UNLOCK(stcb);
12891 /* wait for space now */
12892 if (non_blocking) {
12893 /* Non-blocking io in place out */
12896 /* What about the INIT, send it maybe */
12897 if (queue_only_for_init) {
12898 if (hold_tcblock == 0) {
12899 SCTP_TCB_LOCK(stcb);
12902 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
12903 /* a collision took us forward? */
12906 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
12907 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12911 if ((net->flight_size > net->cwnd) &&
12912 (asoc->sctp_cmt_on_off == 0)) {
12913 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12915 } else if (asoc->ifp_had_enobuf) {
12916 SCTP_STAT_INCR(sctps_ifnomemqueued);
12917 if (net->flight_size > (2 * net->mtu)) {
12920 asoc->ifp_had_enobuf = 0;
12922 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12923 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12924 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
12925 (stcb->asoc.total_flight > 0) &&
12926 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
12927 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
12930 * Ok, Nagle is set on and we have data outstanding.
12931 * Don't send anything and let SACKs drive out the
12932 * data unless wen have a "full" segment to send.
12934 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12935 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
12937 SCTP_STAT_INCR(sctps_naglequeued);
12940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12941 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
12942 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
12944 SCTP_STAT_INCR(sctps_naglesent);
12947 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12949 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
12950 nagle_applies, un_sent);
12951 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
12952 stcb->asoc.total_flight,
12953 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
12955 if (queue_only_for_init)
12956 queue_only_for_init = 0;
12957 if ((queue_only == 0) && (nagle_applies == 0)) {
12959 * need to start chunk output
12960 * before blocking.. note that if
12961 * a lock is already applied, then
12962 * the input via the net is happening
12963 * and I don't need to start output :-D
12965 if (hold_tcblock == 0) {
12966 if (SCTP_TCB_TRYLOCK(stcb)) {
12968 sctp_chunk_output(inp,
12970 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12973 sctp_chunk_output(inp,
12975 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12977 if (hold_tcblock == 1) {
12978 SCTP_TCB_UNLOCK(stcb);
12982 SOCKBUF_LOCK(&so->so_snd);
12984 * This is a bit strange, but I think it will
12985 * work. The total_output_queue_size is locked and
12986 * protected by the TCB_LOCK, which we just released.
12987 * There is a race that can occur between releasing it
12988 * above, and me getting the socket lock, where sacks
12989 * come in but we have not put the SB_WAIT on the
12990 * so_snd buffer to get the wakeup. After the LOCK
12991 * is applied the sack_processing will also need to
12992 * LOCK the so->so_snd to do the actual sowwakeup(). So
12993 * once we have the socket buffer lock if we recheck the
12994 * size we KNOW we will get to sleep safely with the
12995 * wakeup flag in place.
12997 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
12998 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
12999 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13000 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13001 asoc, uio->uio_resid);
13004 stcb->block_entry = &be;
13005 error = sbwait(&so->so_snd);
13006 stcb->block_entry = NULL;
13008 if (error || so->so_error || be.error) {
13011 error = so->so_error;
13016 SOCKBUF_UNLOCK(&so->so_snd);
13019 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13020 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13021 asoc, stcb->asoc.total_output_queue_size);
13024 SOCKBUF_UNLOCK(&so->so_snd);
13025 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13029 SCTP_TCB_SEND_LOCK(stcb);
13031 if (sp->msg_is_complete == 0) {
13032 strm->last_msg_incomplete = 1;
13033 asoc->stream_locked = 1;
13034 asoc->stream_locked_on = srcv->sinfo_stream;
13036 sp->sender_all_done = 1;
13037 strm->last_msg_incomplete = 0;
13038 asoc->stream_locked = 0;
13041 SCTP_PRINTF("Huh no sp TSNH?\n");
13042 strm->last_msg_incomplete = 0;
13043 asoc->stream_locked = 0;
13045 SCTP_TCB_SEND_UNLOCK(stcb);
13046 if (uio->uio_resid == 0) {
13047 got_all_of_the_send = 1;
13050 /* We send in a 0, since we do NOT have any locks */
13051 error = sctp_msg_append(stcb, net, top, srcv, 0);
13053 if (srcv->sinfo_flags & SCTP_EOF) {
13055 * This should only happen for Panda for the mbuf
13056 * send case, which does NOT yet support EEOR mode.
13057 * Thus, we can just set this flag to do the proper
13060 got_all_of_the_send = 1;
13068 if ((srcv->sinfo_flags & SCTP_EOF) &&
13069 (got_all_of_the_send == 1)) {
13072 SCTP_STAT_INCR(sctps_sends_with_eof);
13074 if (hold_tcblock == 0) {
13075 SCTP_TCB_LOCK(stcb);
13078 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13079 if (TAILQ_EMPTY(&asoc->send_queue) &&
13080 TAILQ_EMPTY(&asoc->sent_queue) &&
13082 if (asoc->locked_on_sending) {
13085 /* there is nothing queued to send, so I'm done... */
13086 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13087 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13088 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13089 struct sctp_nets *netp;
13091 if (stcb->asoc.alternate) {
13092 netp = stcb->asoc.alternate;
13094 netp = stcb->asoc.primary_destination;
13096 /* only send SHUTDOWN the first time through */
13097 sctp_send_shutdown(stcb, netp);
13098 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13099 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13101 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13102 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13103 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13105 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13106 asoc->primary_destination);
13110 * we still got (or just got) data to send, so set
13114 * XXX sockets draft says that SCTP_EOF should be
13115 * sent with no data. currently, we will allow user
13116 * data to be sent first and move to
13119 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13120 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13121 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13122 if (hold_tcblock == 0) {
13123 SCTP_TCB_LOCK(stcb);
13126 if (asoc->locked_on_sending) {
13127 /* Locked to send out the data */
13128 struct sctp_stream_queue_pending *sp;
13130 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13132 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13133 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13136 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13137 if (TAILQ_EMPTY(&asoc->send_queue) &&
13138 TAILQ_EMPTY(&asoc->sent_queue) &&
13139 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13141 if (free_cnt_applied) {
13142 atomic_add_int(&stcb->asoc.refcnt, -1);
13143 free_cnt_applied = 0;
13145 sctp_abort_an_association(stcb->sctp_ep, stcb,
13146 NULL, SCTP_SO_LOCKED);
13148 * now relock the stcb so everything
13155 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13156 asoc->primary_destination);
13157 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13162 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13163 some_on_control = 1;
13165 if (queue_only_for_init) {
13166 if (hold_tcblock == 0) {
13167 SCTP_TCB_LOCK(stcb);
13170 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13171 /* a collision took us forward? */
13174 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13175 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13179 if ((net->flight_size > net->cwnd) &&
13180 (stcb->asoc.sctp_cmt_on_off == 0)) {
13181 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13183 } else if (asoc->ifp_had_enobuf) {
13184 SCTP_STAT_INCR(sctps_ifnomemqueued);
13185 if (net->flight_size > (2 * net->mtu)) {
13188 asoc->ifp_had_enobuf = 0;
13190 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13191 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13192 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13193 (stcb->asoc.total_flight > 0) &&
13194 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13195 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13197 * Ok, Nagle is set on and we have data outstanding.
13198 * Don't send anything and let SACKs drive out the
13199 * data unless wen have a "full" segment to send.
13201 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13202 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13204 SCTP_STAT_INCR(sctps_naglequeued);
13207 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13208 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13209 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13211 SCTP_STAT_INCR(sctps_naglesent);
13214 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13215 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13216 nagle_applies, un_sent);
13217 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13218 stcb->asoc.total_flight,
13219 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13221 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13222 /* we can attempt to send too. */
13223 if (hold_tcblock == 0) {
13225 * If there is activity recv'ing sacks no need to
13228 if (SCTP_TCB_TRYLOCK(stcb)) {
13229 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13233 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13235 } else if ((queue_only == 0) &&
13236 (stcb->asoc.peers_rwnd == 0) &&
13237 (stcb->asoc.total_flight == 0)) {
13238 /* We get to have a probe outstanding */
13239 if (hold_tcblock == 0) {
13241 SCTP_TCB_LOCK(stcb);
13243 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13244 } else if (some_on_control) {
13245 int num_out, reason, frag_point;
13247 /* Here we do control only */
13248 if (hold_tcblock == 0) {
13250 SCTP_TCB_LOCK(stcb);
13252 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13253 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13254 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13256 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13257 queue_only, stcb->asoc.peers_rwnd, un_sent,
13258 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13259 stcb->asoc.total_output_queue_size, error);
13264 if (local_soresv && stcb) {
13265 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13267 if (create_lock_applied) {
13268 SCTP_ASOC_CREATE_UNLOCK(inp);
13270 if ((stcb) && hold_tcblock) {
13271 SCTP_TCB_UNLOCK(stcb);
13273 if (stcb && free_cnt_applied) {
13274 atomic_add_int(&stcb->asoc.refcnt, -1);
13278 if (mtx_owned(&stcb->tcb_mtx)) {
13279 panic("Leaving with tcb mtx owned?");
13281 if (mtx_owned(&stcb->tcb_send_mtx)) {
13282 panic("Leaving with tcb send mtx owned?");
13288 sctp_validate_no_locks(inp);
13290 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n");
13297 sctp_m_freem(control);
13304 * generate an AUTHentication chunk, if required
13307 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13308 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13309 struct sctp_tcb *stcb, uint8_t chunk)
13311 struct mbuf *m_auth;
13312 struct sctp_auth_chunk *auth;
13316 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13320 /* sysctl disabled auth? */
13321 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13324 /* peer doesn't do auth... */
13325 if (!stcb->asoc.peer_supports_auth) {
13328 /* does the requested chunk require auth? */
13329 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13332 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13333 if (m_auth == NULL) {
13337 /* reserve some space if this will be the first mbuf */
13339 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13340 /* fill in the AUTH chunk details */
13341 auth = mtod(m_auth, struct sctp_auth_chunk *);
13342 bzero(auth, sizeof(*auth));
13343 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13344 auth->ch.chunk_flags = 0;
13345 chunk_len = sizeof(*auth) +
13346 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13347 auth->ch.chunk_length = htons(chunk_len);
13348 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13349 /* key id and hmac digest will be computed and filled in upon send */
13351 /* save the offset where the auth was inserted into the chain */
13353 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13354 *offset += SCTP_BUF_LEN(cn);
13357 /* update length and return pointer to the auth chunk */
13358 SCTP_BUF_LEN(m_auth) = chunk_len;
13359 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13360 if (auth_ret != NULL)
13368 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13370 struct nd_prefix *pfx = NULL;
13371 struct nd_pfxrouter *pfxrtr = NULL;
13372 struct sockaddr_in6 gw6;
13374 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13377 /* get prefix entry of address */
13378 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13379 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13381 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13382 &src6->sin6_addr, &pfx->ndpr_mask))
13385 /* no prefix entry in the prefix list */
13387 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13388 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13391 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13392 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13394 /* search installed gateway from prefix entry */
13395 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13396 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13397 gw6.sin6_family = AF_INET6;
13398 gw6.sin6_len = sizeof(struct sockaddr_in6);
13399 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13400 sizeof(struct in6_addr));
13401 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13402 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13403 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13404 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13405 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13406 ro->ro_rt->rt_gateway)) {
13407 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13411 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13418 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13421 struct sockaddr_in *sin, *mask;
13422 struct ifaddr *ifa;
13423 struct in_addr srcnetaddr, gwnetaddr;
13425 if (ro == NULL || ro->ro_rt == NULL ||
13426 sifa->address.sa.sa_family != AF_INET) {
13429 ifa = (struct ifaddr *)sifa->ifa;
13430 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13431 sin = (struct sockaddr_in *)&sifa->address.sin;
13432 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13433 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13434 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13435 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13437 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13438 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13439 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13440 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13441 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13442 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {