2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_indata.h>
52 #include <netinet/sctp_bsd_addr.h>
53 #include <netinet/sctp_input.h>
54 #include <netinet/sctp_crc32.h>
55 #include <netinet/udp.h>
56 #include <machine/in_cksum.h>
60 #define SCTP_MAX_GAPS_INARRAY 4
62 uint8_t right_edge; /* mergable on the right edge */
63 uint8_t left_edge; /* mergable on the left edge */
66 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
69 struct sack_track sack_array[256] = {
70 {0, 0, 0, 0, /* 0x00 */
77 {1, 0, 1, 0, /* 0x01 */
84 {0, 0, 1, 0, /* 0x02 */
91 {1, 0, 1, 0, /* 0x03 */
98 {0, 0, 1, 0, /* 0x04 */
105 {1, 0, 2, 0, /* 0x05 */
112 {0, 0, 1, 0, /* 0x06 */
119 {1, 0, 1, 0, /* 0x07 */
126 {0, 0, 1, 0, /* 0x08 */
133 {1, 0, 2, 0, /* 0x09 */
140 {0, 0, 2, 0, /* 0x0a */
147 {1, 0, 2, 0, /* 0x0b */
154 {0, 0, 1, 0, /* 0x0c */
161 {1, 0, 2, 0, /* 0x0d */
168 {0, 0, 1, 0, /* 0x0e */
175 {1, 0, 1, 0, /* 0x0f */
182 {0, 0, 1, 0, /* 0x10 */
189 {1, 0, 2, 0, /* 0x11 */
196 {0, 0, 2, 0, /* 0x12 */
203 {1, 0, 2, 0, /* 0x13 */
210 {0, 0, 2, 0, /* 0x14 */
217 {1, 0, 3, 0, /* 0x15 */
224 {0, 0, 2, 0, /* 0x16 */
231 {1, 0, 2, 0, /* 0x17 */
238 {0, 0, 1, 0, /* 0x18 */
245 {1, 0, 2, 0, /* 0x19 */
252 {0, 0, 2, 0, /* 0x1a */
259 {1, 0, 2, 0, /* 0x1b */
266 {0, 0, 1, 0, /* 0x1c */
273 {1, 0, 2, 0, /* 0x1d */
280 {0, 0, 1, 0, /* 0x1e */
287 {1, 0, 1, 0, /* 0x1f */
294 {0, 0, 1, 0, /* 0x20 */
301 {1, 0, 2, 0, /* 0x21 */
308 {0, 0, 2, 0, /* 0x22 */
315 {1, 0, 2, 0, /* 0x23 */
322 {0, 0, 2, 0, /* 0x24 */
329 {1, 0, 3, 0, /* 0x25 */
336 {0, 0, 2, 0, /* 0x26 */
343 {1, 0, 2, 0, /* 0x27 */
350 {0, 0, 2, 0, /* 0x28 */
357 {1, 0, 3, 0, /* 0x29 */
364 {0, 0, 3, 0, /* 0x2a */
371 {1, 0, 3, 0, /* 0x2b */
378 {0, 0, 2, 0, /* 0x2c */
385 {1, 0, 3, 0, /* 0x2d */
392 {0, 0, 2, 0, /* 0x2e */
399 {1, 0, 2, 0, /* 0x2f */
406 {0, 0, 1, 0, /* 0x30 */
413 {1, 0, 2, 0, /* 0x31 */
420 {0, 0, 2, 0, /* 0x32 */
427 {1, 0, 2, 0, /* 0x33 */
434 {0, 0, 2, 0, /* 0x34 */
441 {1, 0, 3, 0, /* 0x35 */
448 {0, 0, 2, 0, /* 0x36 */
455 {1, 0, 2, 0, /* 0x37 */
462 {0, 0, 1, 0, /* 0x38 */
469 {1, 0, 2, 0, /* 0x39 */
476 {0, 0, 2, 0, /* 0x3a */
483 {1, 0, 2, 0, /* 0x3b */
490 {0, 0, 1, 0, /* 0x3c */
497 {1, 0, 2, 0, /* 0x3d */
504 {0, 0, 1, 0, /* 0x3e */
511 {1, 0, 1, 0, /* 0x3f */
518 {0, 0, 1, 0, /* 0x40 */
525 {1, 0, 2, 0, /* 0x41 */
532 {0, 0, 2, 0, /* 0x42 */
539 {1, 0, 2, 0, /* 0x43 */
546 {0, 0, 2, 0, /* 0x44 */
553 {1, 0, 3, 0, /* 0x45 */
560 {0, 0, 2, 0, /* 0x46 */
567 {1, 0, 2, 0, /* 0x47 */
574 {0, 0, 2, 0, /* 0x48 */
581 {1, 0, 3, 0, /* 0x49 */
588 {0, 0, 3, 0, /* 0x4a */
595 {1, 0, 3, 0, /* 0x4b */
602 {0, 0, 2, 0, /* 0x4c */
609 {1, 0, 3, 0, /* 0x4d */
616 {0, 0, 2, 0, /* 0x4e */
623 {1, 0, 2, 0, /* 0x4f */
630 {0, 0, 2, 0, /* 0x50 */
637 {1, 0, 3, 0, /* 0x51 */
644 {0, 0, 3, 0, /* 0x52 */
651 {1, 0, 3, 0, /* 0x53 */
658 {0, 0, 3, 0, /* 0x54 */
665 {1, 0, 4, 0, /* 0x55 */
672 {0, 0, 3, 0, /* 0x56 */
679 {1, 0, 3, 0, /* 0x57 */
686 {0, 0, 2, 0, /* 0x58 */
693 {1, 0, 3, 0, /* 0x59 */
700 {0, 0, 3, 0, /* 0x5a */
707 {1, 0, 3, 0, /* 0x5b */
714 {0, 0, 2, 0, /* 0x5c */
721 {1, 0, 3, 0, /* 0x5d */
728 {0, 0, 2, 0, /* 0x5e */
735 {1, 0, 2, 0, /* 0x5f */
742 {0, 0, 1, 0, /* 0x60 */
749 {1, 0, 2, 0, /* 0x61 */
756 {0, 0, 2, 0, /* 0x62 */
763 {1, 0, 2, 0, /* 0x63 */
770 {0, 0, 2, 0, /* 0x64 */
777 {1, 0, 3, 0, /* 0x65 */
784 {0, 0, 2, 0, /* 0x66 */
791 {1, 0, 2, 0, /* 0x67 */
798 {0, 0, 2, 0, /* 0x68 */
805 {1, 0, 3, 0, /* 0x69 */
812 {0, 0, 3, 0, /* 0x6a */
819 {1, 0, 3, 0, /* 0x6b */
826 {0, 0, 2, 0, /* 0x6c */
833 {1, 0, 3, 0, /* 0x6d */
840 {0, 0, 2, 0, /* 0x6e */
847 {1, 0, 2, 0, /* 0x6f */
854 {0, 0, 1, 0, /* 0x70 */
861 {1, 0, 2, 0, /* 0x71 */
868 {0, 0, 2, 0, /* 0x72 */
875 {1, 0, 2, 0, /* 0x73 */
882 {0, 0, 2, 0, /* 0x74 */
889 {1, 0, 3, 0, /* 0x75 */
896 {0, 0, 2, 0, /* 0x76 */
903 {1, 0, 2, 0, /* 0x77 */
910 {0, 0, 1, 0, /* 0x78 */
917 {1, 0, 2, 0, /* 0x79 */
924 {0, 0, 2, 0, /* 0x7a */
931 {1, 0, 2, 0, /* 0x7b */
938 {0, 0, 1, 0, /* 0x7c */
945 {1, 0, 2, 0, /* 0x7d */
952 {0, 0, 1, 0, /* 0x7e */
959 {1, 0, 1, 0, /* 0x7f */
966 {0, 1, 1, 0, /* 0x80 */
973 {1, 1, 2, 0, /* 0x81 */
980 {0, 1, 2, 0, /* 0x82 */
987 {1, 1, 2, 0, /* 0x83 */
994 {0, 1, 2, 0, /* 0x84 */
1001 {1, 1, 3, 0, /* 0x85 */
1008 {0, 1, 2, 0, /* 0x86 */
1015 {1, 1, 2, 0, /* 0x87 */
1022 {0, 1, 2, 0, /* 0x88 */
1029 {1, 1, 3, 0, /* 0x89 */
1036 {0, 1, 3, 0, /* 0x8a */
1043 {1, 1, 3, 0, /* 0x8b */
1050 {0, 1, 2, 0, /* 0x8c */
1057 {1, 1, 3, 0, /* 0x8d */
1064 {0, 1, 2, 0, /* 0x8e */
1071 {1, 1, 2, 0, /* 0x8f */
1078 {0, 1, 2, 0, /* 0x90 */
1085 {1, 1, 3, 0, /* 0x91 */
1092 {0, 1, 3, 0, /* 0x92 */
1099 {1, 1, 3, 0, /* 0x93 */
1106 {0, 1, 3, 0, /* 0x94 */
1113 {1, 1, 4, 0, /* 0x95 */
1120 {0, 1, 3, 0, /* 0x96 */
1127 {1, 1, 3, 0, /* 0x97 */
1134 {0, 1, 2, 0, /* 0x98 */
1141 {1, 1, 3, 0, /* 0x99 */
1148 {0, 1, 3, 0, /* 0x9a */
1155 {1, 1, 3, 0, /* 0x9b */
1162 {0, 1, 2, 0, /* 0x9c */
1169 {1, 1, 3, 0, /* 0x9d */
1176 {0, 1, 2, 0, /* 0x9e */
1183 {1, 1, 2, 0, /* 0x9f */
1190 {0, 1, 2, 0, /* 0xa0 */
1197 {1, 1, 3, 0, /* 0xa1 */
1204 {0, 1, 3, 0, /* 0xa2 */
1211 {1, 1, 3, 0, /* 0xa3 */
1218 {0, 1, 3, 0, /* 0xa4 */
1225 {1, 1, 4, 0, /* 0xa5 */
1232 {0, 1, 3, 0, /* 0xa6 */
1239 {1, 1, 3, 0, /* 0xa7 */
1246 {0, 1, 3, 0, /* 0xa8 */
1253 {1, 1, 4, 0, /* 0xa9 */
1260 {0, 1, 4, 0, /* 0xaa */
1267 {1, 1, 4, 0, /* 0xab */
1274 {0, 1, 3, 0, /* 0xac */
1281 {1, 1, 4, 0, /* 0xad */
1288 {0, 1, 3, 0, /* 0xae */
1295 {1, 1, 3, 0, /* 0xaf */
1302 {0, 1, 2, 0, /* 0xb0 */
1309 {1, 1, 3, 0, /* 0xb1 */
1316 {0, 1, 3, 0, /* 0xb2 */
1323 {1, 1, 3, 0, /* 0xb3 */
1330 {0, 1, 3, 0, /* 0xb4 */
1337 {1, 1, 4, 0, /* 0xb5 */
1344 {0, 1, 3, 0, /* 0xb6 */
1351 {1, 1, 3, 0, /* 0xb7 */
1358 {0, 1, 2, 0, /* 0xb8 */
1365 {1, 1, 3, 0, /* 0xb9 */
1372 {0, 1, 3, 0, /* 0xba */
1379 {1, 1, 3, 0, /* 0xbb */
1386 {0, 1, 2, 0, /* 0xbc */
1393 {1, 1, 3, 0, /* 0xbd */
1400 {0, 1, 2, 0, /* 0xbe */
1407 {1, 1, 2, 0, /* 0xbf */
1414 {0, 1, 1, 0, /* 0xc0 */
1421 {1, 1, 2, 0, /* 0xc1 */
1428 {0, 1, 2, 0, /* 0xc2 */
1435 {1, 1, 2, 0, /* 0xc3 */
1442 {0, 1, 2, 0, /* 0xc4 */
1449 {1, 1, 3, 0, /* 0xc5 */
1456 {0, 1, 2, 0, /* 0xc6 */
1463 {1, 1, 2, 0, /* 0xc7 */
1470 {0, 1, 2, 0, /* 0xc8 */
1477 {1, 1, 3, 0, /* 0xc9 */
1484 {0, 1, 3, 0, /* 0xca */
1491 {1, 1, 3, 0, /* 0xcb */
1498 {0, 1, 2, 0, /* 0xcc */
1505 {1, 1, 3, 0, /* 0xcd */
1512 {0, 1, 2, 0, /* 0xce */
1519 {1, 1, 2, 0, /* 0xcf */
1526 {0, 1, 2, 0, /* 0xd0 */
1533 {1, 1, 3, 0, /* 0xd1 */
1540 {0, 1, 3, 0, /* 0xd2 */
1547 {1, 1, 3, 0, /* 0xd3 */
1554 {0, 1, 3, 0, /* 0xd4 */
1561 {1, 1, 4, 0, /* 0xd5 */
1568 {0, 1, 3, 0, /* 0xd6 */
1575 {1, 1, 3, 0, /* 0xd7 */
1582 {0, 1, 2, 0, /* 0xd8 */
1589 {1, 1, 3, 0, /* 0xd9 */
1596 {0, 1, 3, 0, /* 0xda */
1603 {1, 1, 3, 0, /* 0xdb */
1610 {0, 1, 2, 0, /* 0xdc */
1617 {1, 1, 3, 0, /* 0xdd */
1624 {0, 1, 2, 0, /* 0xde */
1631 {1, 1, 2, 0, /* 0xdf */
1638 {0, 1, 1, 0, /* 0xe0 */
1645 {1, 1, 2, 0, /* 0xe1 */
1652 {0, 1, 2, 0, /* 0xe2 */
1659 {1, 1, 2, 0, /* 0xe3 */
1666 {0, 1, 2, 0, /* 0xe4 */
1673 {1, 1, 3, 0, /* 0xe5 */
1680 {0, 1, 2, 0, /* 0xe6 */
1687 {1, 1, 2, 0, /* 0xe7 */
1694 {0, 1, 2, 0, /* 0xe8 */
1701 {1, 1, 3, 0, /* 0xe9 */
1708 {0, 1, 3, 0, /* 0xea */
1715 {1, 1, 3, 0, /* 0xeb */
1722 {0, 1, 2, 0, /* 0xec */
1729 {1, 1, 3, 0, /* 0xed */
1736 {0, 1, 2, 0, /* 0xee */
1743 {1, 1, 2, 0, /* 0xef */
1750 {0, 1, 1, 0, /* 0xf0 */
1757 {1, 1, 2, 0, /* 0xf1 */
1764 {0, 1, 2, 0, /* 0xf2 */
1771 {1, 1, 2, 0, /* 0xf3 */
1778 {0, 1, 2, 0, /* 0xf4 */
1785 {1, 1, 3, 0, /* 0xf5 */
1792 {0, 1, 2, 0, /* 0xf6 */
1799 {1, 1, 2, 0, /* 0xf7 */
1806 {0, 1, 1, 0, /* 0xf8 */
1813 {1, 1, 2, 0, /* 0xf9 */
1820 {0, 1, 2, 0, /* 0xfa */
1827 {1, 1, 2, 0, /* 0xfb */
1834 {0, 1, 1, 0, /* 0xfc */
1841 {1, 1, 2, 0, /* 0xfd */
1848 {0, 1, 1, 0, /* 0xfe */
1855 {1, 1, 1, 0, /* 0xff */
1866 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1867 int ipv4_addr_legal,
1868 int ipv6_addr_legal,
1870 int ipv4_local_scope,
1875 if ((loopback_scope == 0) &&
1876 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1878 * skip loopback if not in scope *
1882 switch (ifa->address.sa.sa_family) {
1884 if (ipv4_addr_legal) {
1885 struct sockaddr_in *sin;
1887 sin = (struct sockaddr_in *)&ifa->address.sin;
1888 if (sin->sin_addr.s_addr == 0) {
1889 /* not in scope , unspecified */
1892 if ((ipv4_local_scope == 0) &&
1893 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1894 /* private address not in scope */
1903 if (ipv6_addr_legal) {
1904 struct sockaddr_in6 *sin6;
1907 * Must update the flags, bummer, which means any
1908 * IFA locks must now be applied HERE <->
1911 sctp_gather_internal_ifa_flags(ifa);
1913 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1916 /* ok to use deprecated addresses? */
1917 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1918 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1919 /* skip unspecifed addresses */
1922 if ( /* (local_scope == 0) && */
1923 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1926 if ((site_scope == 0) &&
1927 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1941 static struct mbuf *
1942 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1944 struct sctp_paramhdr *parmh;
1948 if (ifa->address.sa.sa_family == AF_INET) {
1949 len = sizeof(struct sctp_ipv4addr_param);
1950 } else if (ifa->address.sa.sa_family == AF_INET6) {
1951 len = sizeof(struct sctp_ipv6addr_param);
1956 if (M_TRAILINGSPACE(m) >= len) {
1957 /* easy side we just drop it on the end */
1958 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1961 /* Need more space */
1963 while (SCTP_BUF_NEXT(mret) != NULL) {
1964 mret = SCTP_BUF_NEXT(mret);
1966 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1967 if (SCTP_BUF_NEXT(mret) == NULL) {
1968 /* We are hosed, can't add more addresses */
1971 mret = SCTP_BUF_NEXT(mret);
1972 parmh = mtod(mret, struct sctp_paramhdr *);
1974 /* now add the parameter */
1975 switch (ifa->address.sa.sa_family) {
1978 struct sctp_ipv4addr_param *ipv4p;
1979 struct sockaddr_in *sin;
1981 sin = (struct sockaddr_in *)&ifa->address.sin;
1982 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1983 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1984 parmh->param_length = htons(len);
1985 ipv4p->addr = sin->sin_addr.s_addr;
1986 SCTP_BUF_LEN(mret) += len;
1992 struct sctp_ipv6addr_param *ipv6p;
1993 struct sockaddr_in6 *sin6;
1995 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1996 ipv6p = (struct sctp_ipv6addr_param *)parmh;
1997 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
1998 parmh->param_length = htons(len);
1999 memcpy(ipv6p->addr, &sin6->sin6_addr,
2000 sizeof(ipv6p->addr));
2001 /* clear embedded scope in the address */
2002 in6_clearscope((struct in6_addr *)ipv6p->addr);
2003 SCTP_BUF_LEN(mret) += len;
2015 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
2016 struct mbuf *m_at, int cnt_inits_to)
2018 struct sctp_vrf *vrf = NULL;
2019 int cnt, limit_out = 0, total_count;
2022 vrf_id = inp->def_vrf_id;
2023 SCTP_IPI_ADDR_RLOCK();
2024 vrf = sctp_find_vrf(vrf_id);
2026 SCTP_IPI_ADDR_RUNLOCK();
2029 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2030 struct sctp_ifa *sctp_ifap;
2031 struct sctp_ifn *sctp_ifnp;
2034 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2036 cnt = SCTP_ADDRESS_LIMIT;
2039 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2040 if ((scope->loopback_scope == 0) &&
2041 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2043 * Skip loopback devices if loopback_scope
2048 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2049 if (sctp_is_address_in_scope(sctp_ifap,
2050 scope->ipv4_addr_legal,
2051 scope->ipv6_addr_legal,
2052 scope->loopback_scope,
2053 scope->ipv4_local_scope,
2055 scope->site_scope, 1) == 0) {
2059 if (cnt > SCTP_ADDRESS_LIMIT) {
2063 if (cnt > SCTP_ADDRESS_LIMIT) {
2070 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2072 if ((scope->loopback_scope == 0) &&
2073 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2075 * Skip loopback devices if
2076 * loopback_scope not set
2080 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2081 if (sctp_is_address_in_scope(sctp_ifap,
2082 scope->ipv4_addr_legal,
2083 scope->ipv6_addr_legal,
2084 scope->loopback_scope,
2085 scope->ipv4_local_scope,
2087 scope->site_scope, 0) == 0) {
2090 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2101 if (total_count > SCTP_ADDRESS_LIMIT) {
2102 /* No more addresses */
2110 struct sctp_laddr *laddr;
2113 /* First, how many ? */
2114 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2115 if (laddr->ifa == NULL) {
2118 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2120 * Address being deleted by the system, dont
2124 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2126 * Address being deleted on this ep don't
2131 if (sctp_is_address_in_scope(laddr->ifa,
2132 scope->ipv4_addr_legal,
2133 scope->ipv6_addr_legal,
2134 scope->loopback_scope,
2135 scope->ipv4_local_scope,
2137 scope->site_scope, 1) == 0) {
2142 if (cnt > SCTP_ADDRESS_LIMIT) {
2146 * To get through a NAT we only list addresses if we have
2147 * more than one. That way if you just bind a single address
2148 * we let the source of the init dictate our address.
2151 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2153 if (laddr->ifa == NULL) {
2156 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2159 if (sctp_is_address_in_scope(laddr->ifa,
2160 scope->ipv4_addr_legal,
2161 scope->ipv6_addr_legal,
2162 scope->loopback_scope,
2163 scope->ipv4_local_scope,
2165 scope->site_scope, 0) == 0) {
2168 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2170 if (cnt >= SCTP_ADDRESS_LIMIT) {
2176 SCTP_IPI_ADDR_RUNLOCK();
2180 static struct sctp_ifa *
2181 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2182 uint8_t dest_is_loop,
2183 uint8_t dest_is_priv,
2186 uint8_t dest_is_global = 0;
2188 /* dest_is_priv is true if destination is a private address */
2189 /* dest_is_loop is true if destination is a loopback addresses */
2192 * Here we determine if its a preferred address. A preferred address
2193 * means it is the same scope or higher scope then the destination.
2194 * L = loopback, P = private, G = global
2195 * -----------------------------------------
2196 * src | dest | result
2197 * ----------------------------------------
2199 * -----------------------------------------
2200 * P | L | yes-v4 no-v6
2201 * -----------------------------------------
2202 * G | L | yes-v4 no-v6
2203 * -----------------------------------------
2205 * -----------------------------------------
2207 * -----------------------------------------
2209 * -----------------------------------------
2211 * -----------------------------------------
2213 * -----------------------------------------
2215 * -----------------------------------------
2218 if (ifa->address.sa.sa_family != fam) {
2219 /* forget mis-matched family */
2222 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2225 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2226 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2227 /* Ok the address may be ok */
2228 if (fam == AF_INET6) {
2229 /* ok to use deprecated addresses? no lets not! */
2230 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2231 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2234 if (ifa->src_is_priv && !ifa->src_is_loop) {
2236 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2240 if (ifa->src_is_glob) {
2242 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2248 * Now that we know what is what, implement or table this could in
2249 * theory be done slicker (it used to be), but this is
2250 * straightforward and easier to validate :-)
2252 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2253 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2254 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2255 dest_is_loop, dest_is_priv, dest_is_global);
2257 if ((ifa->src_is_loop) && (dest_is_priv)) {
2258 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2261 if ((ifa->src_is_glob) && (dest_is_priv)) {
2262 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2265 if ((ifa->src_is_loop) && (dest_is_global)) {
2266 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2269 if ((ifa->src_is_priv) && (dest_is_global)) {
2270 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2273 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2274 /* its a preferred address */
2278 static struct sctp_ifa *
2279 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2280 uint8_t dest_is_loop,
2281 uint8_t dest_is_priv,
2284 uint8_t dest_is_global = 0;
2287 * Here we determine if its a acceptable address. A acceptable
2288 * address means it is the same scope or higher scope but we can
2289 * allow for NAT which means its ok to have a global dest and a
2292 * L = loopback, P = private, G = global
2293 * ----------------------------------------- src | dest | result
2294 * ----------------------------------------- L | L | yes
2295 * ----------------------------------------- P | L |
2296 * yes-v4 no-v6 ----------------------------------------- G |
2297 * L | yes ----------------------------------------- L |
2298 * P | no ----------------------------------------- P | P
2299 * | yes ----------------------------------------- G | P
2300 * | yes - May not work -----------------------------------------
2301 * L | G | no ----------------------------------------- P
2302 * | G | yes - May not work
2303 * ----------------------------------------- G | G | yes
2304 * -----------------------------------------
2307 if (ifa->address.sa.sa_family != fam) {
2308 /* forget non matching family */
2311 /* Ok the address may be ok */
2312 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2315 if (fam == AF_INET6) {
2316 /* ok to use deprecated addresses? */
2317 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2320 if (ifa->src_is_priv) {
2321 /* Special case, linklocal to loop */
2327 * Now that we know what is what, implement our table. This could in
2328 * theory be done slicker (it used to be), but this is
2329 * straightforward and easier to validate :-)
2331 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2334 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2337 /* its an acceptable address */
2342 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2344 struct sctp_laddr *laddr;
2347 /* There are no restrictions, no TCB :-) */
2350 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2351 if (laddr->ifa == NULL) {
2352 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2356 if (laddr->ifa == ifa) {
2357 /* Yes it is on the list */
2366 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2368 struct sctp_laddr *laddr;
2372 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2373 if (laddr->ifa == NULL) {
2374 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2378 if ((laddr->ifa == ifa) && laddr->action == 0)
2387 static struct sctp_ifa *
2388 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2391 int non_asoc_addr_ok,
2392 uint8_t dest_is_priv,
2393 uint8_t dest_is_loop,
2396 struct sctp_laddr *laddr, *starting_point;
2399 struct sctp_ifn *sctp_ifn;
2400 struct sctp_ifa *sctp_ifa, *sifa;
2401 struct sctp_vrf *vrf;
2404 vrf = sctp_find_vrf(vrf_id);
2408 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2409 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2410 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2412 * first question, is the ifn we will emit on in our list, if so, we
2413 * want such an address. Note that we first looked for a preferred
2417 /* is a preferred one on the interface we route out? */
2418 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2419 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2420 (non_asoc_addr_ok == 0))
2422 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2427 if (sctp_is_addr_in_ep(inp, sifa)) {
2428 atomic_add_int(&sifa->refcount, 1);
2434 * ok, now we now need to find one on the list of the addresses. We
2435 * can't get one on the emitting interface so let's find first a
2436 * preferred one. If not that an acceptable one otherwise... we
2439 starting_point = inp->next_addr_touse;
2441 if (inp->next_addr_touse == NULL) {
2442 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2445 for (laddr = inp->next_addr_touse; laddr;
2446 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2447 if (laddr->ifa == NULL) {
2448 /* address has been removed */
2451 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2452 /* address is being deleted */
2455 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2459 atomic_add_int(&sifa->refcount, 1);
2462 if (resettotop == 0) {
2463 inp->next_addr_touse = NULL;
2466 inp->next_addr_touse = starting_point;
2469 if (inp->next_addr_touse == NULL) {
2470 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2473 /* ok, what about an acceptable address in the inp */
2474 for (laddr = inp->next_addr_touse; laddr;
2475 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2476 if (laddr->ifa == NULL) {
2477 /* address has been removed */
2480 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2481 /* address is being deleted */
2484 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2488 atomic_add_int(&sifa->refcount, 1);
2491 if (resettotop == 0) {
2492 inp->next_addr_touse = NULL;
2493 goto once_again_too;
2496 * no address bound can be a source for the destination we are in
2504 static struct sctp_ifa *
2505 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2506 struct sctp_tcb *stcb,
2507 struct sctp_nets *net,
2510 uint8_t dest_is_priv,
2511 uint8_t dest_is_loop,
2512 int non_asoc_addr_ok,
2515 struct sctp_laddr *laddr, *starting_point;
2517 struct sctp_ifn *sctp_ifn;
2518 struct sctp_ifa *sctp_ifa, *sifa;
2519 uint8_t start_at_beginning = 0;
2520 struct sctp_vrf *vrf;
2524 * first question, is the ifn we will emit on in our list, if so, we
2527 vrf = sctp_find_vrf(vrf_id);
2531 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2532 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2533 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2536 * first question, is the ifn we will emit on in our list? If so,
2537 * we want that one. First we look for a preferred. Second, we go
2538 * for an acceptable.
2541 /* first try for a preferred address on the ep */
2542 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2543 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2545 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2546 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2549 if (((non_asoc_addr_ok == 0) &&
2550 (sctp_is_addr_restricted(stcb, sifa))) ||
2551 (non_asoc_addr_ok &&
2552 (sctp_is_addr_restricted(stcb, sifa)) &&
2553 (!sctp_is_addr_pending(stcb, sifa)))) {
2554 /* on the no-no list */
2557 atomic_add_int(&sifa->refcount, 1);
2561 /* next try for an acceptable address on the ep */
2562 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2563 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2565 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2566 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2569 if (((non_asoc_addr_ok == 0) &&
2570 (sctp_is_addr_restricted(stcb, sifa))) ||
2571 (non_asoc_addr_ok &&
2572 (sctp_is_addr_restricted(stcb, sifa)) &&
2573 (!sctp_is_addr_pending(stcb, sifa)))) {
2574 /* on the no-no list */
2577 atomic_add_int(&sifa->refcount, 1);
2584 * if we can't find one like that then we must look at all addresses
2585 * bound to pick one at first preferable then secondly acceptable.
2587 starting_point = stcb->asoc.last_used_address;
2589 if (stcb->asoc.last_used_address == NULL) {
2590 start_at_beginning = 1;
2591 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2593 /* search beginning with the last used address */
2594 for (laddr = stcb->asoc.last_used_address; laddr;
2595 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2596 if (laddr->ifa == NULL) {
2597 /* address has been removed */
2600 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2601 /* address is being deleted */
2604 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2607 if (((non_asoc_addr_ok == 0) &&
2608 (sctp_is_addr_restricted(stcb, sifa))) ||
2609 (non_asoc_addr_ok &&
2610 (sctp_is_addr_restricted(stcb, sifa)) &&
2611 (!sctp_is_addr_pending(stcb, sifa)))) {
2612 /* on the no-no list */
2615 stcb->asoc.last_used_address = laddr;
2616 atomic_add_int(&sifa->refcount, 1);
2619 if (start_at_beginning == 0) {
2620 stcb->asoc.last_used_address = NULL;
2621 goto sctp_from_the_top;
2623 /* now try for any higher scope than the destination */
2624 stcb->asoc.last_used_address = starting_point;
2625 start_at_beginning = 0;
2627 if (stcb->asoc.last_used_address == NULL) {
2628 start_at_beginning = 1;
2629 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2631 /* search beginning with the last used address */
2632 for (laddr = stcb->asoc.last_used_address; laddr;
2633 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2634 if (laddr->ifa == NULL) {
2635 /* address has been removed */
2638 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2639 /* address is being deleted */
2642 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2646 if (((non_asoc_addr_ok == 0) &&
2647 (sctp_is_addr_restricted(stcb, sifa))) ||
2648 (non_asoc_addr_ok &&
2649 (sctp_is_addr_restricted(stcb, sifa)) &&
2650 (!sctp_is_addr_pending(stcb, sifa)))) {
2651 /* on the no-no list */
2654 stcb->asoc.last_used_address = laddr;
2655 atomic_add_int(&sifa->refcount, 1);
2658 if (start_at_beginning == 0) {
2659 stcb->asoc.last_used_address = NULL;
2660 goto sctp_from_the_top2;
2665 static struct sctp_ifa *
2666 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2667 struct sctp_tcb *stcb,
2668 int non_asoc_addr_ok,
2669 uint8_t dest_is_loop,
2670 uint8_t dest_is_priv,
2676 struct sctp_ifa *ifa, *sifa;
2677 int num_eligible_addr = 0;
2680 struct sockaddr_in6 sin6, lsa6;
2682 if (fam == AF_INET6) {
2683 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2684 (void)sa6_recoverscope(&sin6);
2687 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2688 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2689 (non_asoc_addr_ok == 0))
2691 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2696 if (fam == AF_INET6 &&
2698 sifa->src_is_loop && sifa->src_is_priv) {
2700 * don't allow fe80::1 to be a src on loop ::1, we
2701 * don't list it to the peer so we will get an
2706 if (fam == AF_INET6 &&
2707 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2708 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2710 * link-local <-> link-local must belong to the same
2713 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2714 (void)sa6_recoverscope(&lsa6);
2715 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2722 * Check if the IPv6 address matches to next-hop. In the
2723 * mobile case, old IPv6 address may be not deleted from the
2724 * interface. Then, the interface has previous and new
2725 * addresses. We should use one corresponding to the
2726 * next-hop. (by micchie)
2729 if (stcb && fam == AF_INET6 &&
2730 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2731 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2737 /* Avoid topologically incorrect IPv4 address */
2738 if (stcb && fam == AF_INET &&
2739 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2740 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2745 if (sctp_is_address_in_scope(ifa,
2746 stcb->asoc.ipv4_addr_legal,
2747 stcb->asoc.ipv6_addr_legal,
2748 stcb->asoc.loopback_scope,
2749 stcb->asoc.ipv4_local_scope,
2750 stcb->asoc.local_scope,
2751 stcb->asoc.site_scope, 0) == 0) {
2754 if (((non_asoc_addr_ok == 0) &&
2755 (sctp_is_addr_restricted(stcb, sifa))) ||
2756 (non_asoc_addr_ok &&
2757 (sctp_is_addr_restricted(stcb, sifa)) &&
2758 (!sctp_is_addr_pending(stcb, sifa)))) {
2760 * It is restricted for some reason..
2761 * probably not yet added.
2766 if (num_eligible_addr >= addr_wanted) {
2769 num_eligible_addr++;
2776 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2777 struct sctp_tcb *stcb,
2778 int non_asoc_addr_ok,
2779 uint8_t dest_is_loop,
2780 uint8_t dest_is_priv,
2783 struct sctp_ifa *ifa, *sifa;
2784 int num_eligible_addr = 0;
2786 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2787 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2788 (non_asoc_addr_ok == 0)) {
2791 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2797 if (sctp_is_address_in_scope(ifa,
2798 stcb->asoc.ipv4_addr_legal,
2799 stcb->asoc.ipv6_addr_legal,
2800 stcb->asoc.loopback_scope,
2801 stcb->asoc.ipv4_local_scope,
2802 stcb->asoc.local_scope,
2803 stcb->asoc.site_scope, 0) == 0) {
2806 if (((non_asoc_addr_ok == 0) &&
2807 (sctp_is_addr_restricted(stcb, sifa))) ||
2808 (non_asoc_addr_ok &&
2809 (sctp_is_addr_restricted(stcb, sifa)) &&
2810 (!sctp_is_addr_pending(stcb, sifa)))) {
2812 * It is restricted for some reason..
2813 * probably not yet added.
2818 num_eligible_addr++;
2820 return (num_eligible_addr);
2823 static struct sctp_ifa *
2824 sctp_choose_boundall(struct sctp_inpcb *inp,
2825 struct sctp_tcb *stcb,
2826 struct sctp_nets *net,
2829 uint8_t dest_is_priv,
2830 uint8_t dest_is_loop,
2831 int non_asoc_addr_ok,
2834 int cur_addr_num = 0, num_preferred = 0;
2836 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2837 struct sctp_ifa *sctp_ifa, *sifa;
2839 struct sctp_vrf *vrf;
2842 * For boundall we can use any address in the association.
2843 * If non_asoc_addr_ok is set we can use any address (at least in
2844 * theory). So we look for preferred addresses first. If we find one,
2845 * we use it. Otherwise we next try to get an address on the
2846 * interface, which we should be able to do (unless non_asoc_addr_ok
2847 * is false and we are routed out that way). In these cases where we
2848 * can't use the address of the interface we go through all the
2849 * ifn's looking for an address we can use and fill that in. Punting
2850 * means we send back address 0, which will probably cause problems
2851 * actually since then IP will fill in the address of the route ifn,
2852 * which means we probably already rejected it.. i.e. here comes an
2855 vrf = sctp_find_vrf(vrf_id);
2859 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2860 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2861 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2862 if (sctp_ifn == NULL) {
2863 /* ?? We don't have this guy ?? */
2864 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2865 goto bound_all_plan_b;
2867 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2868 ifn_index, sctp_ifn->ifn_name);
2871 cur_addr_num = net->indx_of_eligible_next_to_use;
2873 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2878 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2879 num_preferred, sctp_ifn->ifn_name);
2880 if (num_preferred == 0) {
2882 * no eligible addresses, we must use some other interface
2883 * address if we can find one.
2885 goto bound_all_plan_b;
2888 * Ok we have num_eligible_addr set with how many we can use, this
2889 * may vary from call to call due to addresses being deprecated
2892 if (cur_addr_num >= num_preferred) {
2896 * select the nth address from the list (where cur_addr_num is the
2897 * nth) and 0 is the first one, 1 is the second one etc...
2899 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2901 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2902 dest_is_priv, cur_addr_num, fam, ro);
2904 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2906 atomic_add_int(&sctp_ifa->refcount, 1);
2908 /* save off where the next one we will want */
2909 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2914 * plan_b: Look at all interfaces and find a preferred address. If
2915 * no preferred fall through to plan_c.
2918 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2919 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2920 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2921 sctp_ifn->ifn_name);
2922 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2923 /* wrong base scope */
2924 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2927 if ((sctp_ifn == looked_at) && looked_at) {
2928 /* already looked at this guy */
2929 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2932 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2933 dest_is_loop, dest_is_priv, fam);
2934 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2935 "Found ifn:%p %d preferred source addresses\n",
2936 ifn, num_preferred);
2937 if (num_preferred == 0) {
2938 /* None on this interface. */
2939 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2942 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2943 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2944 num_preferred, sctp_ifn, cur_addr_num);
2947 * Ok we have num_eligible_addr set with how many we can
2948 * use, this may vary from call to call due to addresses
2949 * being deprecated etc..
2951 if (cur_addr_num >= num_preferred) {
2954 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2955 dest_is_priv, cur_addr_num, fam, ro);
2959 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2960 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
2962 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
2963 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
2964 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
2965 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
2967 atomic_add_int(&sifa->refcount, 1);
2972 /* plan_c: do we have an acceptable address on the emit interface */
2973 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
2974 if (emit_ifn == NULL) {
2977 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
2978 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2979 (non_asoc_addr_ok == 0))
2981 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
2986 if (sctp_is_address_in_scope(sifa,
2987 stcb->asoc.ipv4_addr_legal,
2988 stcb->asoc.ipv6_addr_legal,
2989 stcb->asoc.loopback_scope,
2990 stcb->asoc.ipv4_local_scope,
2991 stcb->asoc.local_scope,
2992 stcb->asoc.site_scope, 0) == 0) {
2995 if (((non_asoc_addr_ok == 0) &&
2996 (sctp_is_addr_restricted(stcb, sifa))) ||
2997 (non_asoc_addr_ok &&
2998 (sctp_is_addr_restricted(stcb, sifa)) &&
2999 (!sctp_is_addr_pending(stcb, sifa)))) {
3001 * It is restricted for some reason..
3002 * probably not yet added.
3007 atomic_add_int(&sifa->refcount, 1);
3012 * plan_d: We are in trouble. No preferred address on the emit
3013 * interface. And not even a preferred address on all interfaces. Go
3014 * out and see if we can find an acceptable address somewhere
3015 * amongst all interfaces.
3017 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n");
3018 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3019 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3020 /* wrong base scope */
3023 if ((sctp_ifn == looked_at) && looked_at)
3024 /* already looked at this guy */
3027 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3028 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3029 (non_asoc_addr_ok == 0))
3031 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3037 if (sctp_is_address_in_scope(sifa,
3038 stcb->asoc.ipv4_addr_legal,
3039 stcb->asoc.ipv6_addr_legal,
3040 stcb->asoc.loopback_scope,
3041 stcb->asoc.ipv4_local_scope,
3042 stcb->asoc.local_scope,
3043 stcb->asoc.site_scope, 0) == 0) {
3046 if (((non_asoc_addr_ok == 0) &&
3047 (sctp_is_addr_restricted(stcb, sifa))) ||
3048 (non_asoc_addr_ok &&
3049 (sctp_is_addr_restricted(stcb, sifa)) &&
3050 (!sctp_is_addr_pending(stcb, sifa)))) {
3052 * It is restricted for some
3053 * reason.. probably not yet added.
3058 atomic_add_int(&sifa->refcount, 1);
3063 * Ok we can find NO address to source from that is not on our
3064 * restricted list and non_asoc_address is NOT ok, or it is on our
3065 * restricted list. We can't source to it :-(
3072 /* tcb may be NULL */
3074 sctp_source_address_selection(struct sctp_inpcb *inp,
3075 struct sctp_tcb *stcb,
3077 struct sctp_nets *net,
3078 int non_asoc_addr_ok, uint32_t vrf_id)
3080 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3083 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3086 struct sctp_ifa *answer;
3087 uint8_t dest_is_priv, dest_is_loop;
3091 * Rules: - Find the route if needed, cache if I can. - Look at
3092 * interface address in route, Is it in the bound list. If so we
3093 * have the best source. - If not we must rotate amongst the
3098 * Do we need to pay attention to scope. We can have a private address
3099 * or a global address we are sourcing or sending to. So if we draw
3101 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3103 * ------------------------------------------
3104 * source * dest * result
3105 * -----------------------------------------
3106 * <a> Private * Global * NAT
3107 * -----------------------------------------
3108 * <b> Private * Private * No problem
3109 * -----------------------------------------
3110 * <c> Global * Private * Huh, How will this work?
3111 * -----------------------------------------
3112 * <d> Global * Global * No Problem
3113 *------------------------------------------
3114 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3116 *------------------------------------------
3117 * source * dest * result
3118 * -----------------------------------------
3119 * <a> Linklocal * Global *
3120 * -----------------------------------------
3121 * <b> Linklocal * Linklocal * No problem
3122 * -----------------------------------------
3123 * <c> Global * Linklocal * Huh, How will this work?
3124 * -----------------------------------------
3125 * <d> Global * Global * No Problem
3126 *------------------------------------------
3127 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3129 * And then we add to that what happens if there are multiple addresses
3130 * assigned to an interface. Remember the ifa on a ifn is a linked
3131 * list of addresses. So one interface can have more than one IP
3132 * address. What happens if we have both a private and a global
3133 * address? Do we then use context of destination to sort out which
3134 * one is best? And what about NAT's sending P->G may get you a NAT
3135 * translation, or should you select the G thats on the interface in
3140 * - count the number of addresses on the interface.
3141 * - if it is one, no problem except case <c>.
3142 * For <a> we will assume a NAT out there.
3143 * - if there are more than one, then we need to worry about scope P
3144 * or G. We should prefer G -> G and P -> P if possible.
3145 * Then as a secondary fall back to mixed types G->P being a last
3147 * - The above all works for bound all, but bound specific we need to
3148 * use the same concept but instead only consider the bound
3149 * addresses. If the bound set is NOT assigned to the interface then
3150 * we must use rotation amongst the bound addresses..
3152 if (ro->ro_rt == NULL) {
3154 * Need a route to cache.
3156 SCTP_RTALLOC(ro, vrf_id);
3158 if (ro->ro_rt == NULL) {
3161 fam = to->sin_family;
3162 dest_is_priv = dest_is_loop = 0;
3163 /* Setup our scopes for the destination */
3166 /* Scope based on outbound address */
3167 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3170 /* mark it as local */
3171 net->addr_is_local = 1;
3173 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3179 /* Scope based on outbound address */
3180 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3181 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3183 * If the address is a loopback address, which
3184 * consists of "::1" OR "fe80::1%lo0", we are
3185 * loopback scope. But we don't use dest_is_priv
3186 * (link local addresses).
3190 /* mark it as local */
3191 net->addr_is_local = 1;
3193 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3199 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3200 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
3201 SCTP_IPI_ADDR_RLOCK();
3202 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3206 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3207 dest_is_priv, dest_is_loop,
3208 non_asoc_addr_ok, fam);
3209 SCTP_IPI_ADDR_RUNLOCK();
3216 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
3217 vrf_id, dest_is_priv,
3219 non_asoc_addr_ok, fam);
3221 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3226 SCTP_IPI_ADDR_RUNLOCK();
3231 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
3236 tlen = SCTP_BUF_LEN(control);
3239 * Independent of how many mbufs, find the c_type inside the control
3240 * structure and copy out the data.
3243 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3244 /* not enough room for one more we are done. */
3247 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3248 if (((int)cmh.cmsg_len + at) > tlen) {
3250 * this is real messed up since there is not enough
3251 * data here to cover the cmsg header. We are done.
3255 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3256 (c_type == cmh.cmsg_type)) {
3257 /* found the one we want, copy it out */
3258 at += CMSG_ALIGN(sizeof(struct cmsghdr));
3259 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3261 * space of cmsg_len after header not big
3266 m_copydata(control, at, cpsize, data);
3269 at += CMSG_ALIGN(cmh.cmsg_len);
3270 if (cmh.cmsg_len == 0) {
3279 static struct mbuf *
3280 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
3281 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3283 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3284 struct sctp_state_cookie *stc;
3285 struct sctp_paramhdr *ph;
3291 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3292 sizeof(struct sctp_paramhdr)), 0,
3293 M_DONTWAIT, 1, MT_DATA);
3297 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3298 if (copy_init == NULL) {
3302 #ifdef SCTP_MBUF_LOGGING
3303 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3308 if (SCTP_BUF_IS_EXTENDED(mat)) {
3309 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3311 mat = SCTP_BUF_NEXT(mat);
3315 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3317 if (copy_initack == NULL) {
3319 sctp_m_freem(copy_init);
3322 #ifdef SCTP_MBUF_LOGGING
3323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3328 if (SCTP_BUF_IS_EXTENDED(mat)) {
3329 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3331 mat = SCTP_BUF_NEXT(mat);
3335 /* easy side we just drop it on the end */
3336 ph = mtod(mret, struct sctp_paramhdr *);
3337 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3338 sizeof(struct sctp_paramhdr);
3339 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3340 sizeof(struct sctp_paramhdr));
3341 ph->param_type = htons(SCTP_STATE_COOKIE);
3342 ph->param_length = 0; /* fill in at the end */
3343 /* Fill in the stc cookie data */
3344 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3346 /* tack the INIT and then the INIT-ACK onto the chain */
3349 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3350 cookie_sz += SCTP_BUF_LEN(m_at);
3351 if (SCTP_BUF_NEXT(m_at) == NULL) {
3352 SCTP_BUF_NEXT(m_at) = copy_init;
3357 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3358 cookie_sz += SCTP_BUF_LEN(m_at);
3359 if (SCTP_BUF_NEXT(m_at) == NULL) {
3360 SCTP_BUF_NEXT(m_at) = copy_initack;
3365 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3366 cookie_sz += SCTP_BUF_LEN(m_at);
3367 if (SCTP_BUF_NEXT(m_at) == NULL) {
3371 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3373 /* no space, so free the entire chain */
3377 SCTP_BUF_LEN(sig) = 0;
3378 SCTP_BUF_NEXT(m_at) = sig;
3380 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3381 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3383 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3384 cookie_sz += SCTP_SIGNATURE_SIZE;
3385 ph->param_length = htons(cookie_sz);
3391 sctp_get_ect(struct sctp_tcb *stcb,
3392 struct sctp_tmit_chunk *chk)
3394 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3395 return (SCTP_ECT0_BIT);
3402 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3403 struct sctp_tcb *stcb, /* may be NULL */
3404 struct sctp_nets *net,
3405 struct sockaddr *to,
3407 uint32_t auth_offset,
3408 struct sctp_auth_chunk *auth,
3409 uint16_t auth_keyid,
3410 int nofragment_flag,
3412 struct sctp_tmit_chunk *chk,
3419 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3422 union sctp_sockstore *over_addr,
3425 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3428 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
3429 * header WITH an SCTPHDR but no IP header, endpoint inp and sa
3430 * structure: - fill in the HMAC digest of any AUTH chunk in the
3431 * packet. - calculate and fill in the SCTP checksum. - prepend an
3432 * IP address header. - if boundall use INADDR_ANY. - if
3433 * boundspecific do source address selection. - set fragmentation
3434 * option for ipV4. - On return from IP output, check/adjust mtu
3435 * size of output interface and smallest_mtu size as well.
3437 /* Will need ifdefs around this */
3440 struct sctphdr *sctphdr;
3444 sctp_route_t *ro = NULL;
3445 struct udphdr *udp = NULL;
3447 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3448 struct socket *so = NULL;
3452 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3453 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3458 vrf_id = stcb->asoc.vrf_id;
3460 vrf_id = inp->def_vrf_id;
3463 /* fill in the HMAC digest for any AUTH chunk in the packet */
3464 if ((auth != NULL) && (stcb != NULL)) {
3465 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3467 if (to->sa_family == AF_INET) {
3468 struct ip *ip = NULL;
3469 sctp_route_t iproute;
3473 len = sizeof(struct ip) + sizeof(struct sctphdr);
3475 len += sizeof(struct udphdr);
3477 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3480 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3483 SCTP_ALIGN_TO_END(newm, len);
3484 SCTP_BUF_LEN(newm) = len;
3485 SCTP_BUF_NEXT(newm) = m;
3489 if (net->flowidset == 0) {
3490 panic("Flow ID not set");
3493 m->m_pkthdr.flowid = net->flowid;
3494 m->m_flags |= M_FLOWID;
3496 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
3497 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
3498 m->m_flags |= M_FLOWID;
3501 packet_length = sctp_calculate_len(m);
3502 ip = mtod(m, struct ip *);
3503 ip->ip_v = IPVERSION;
3504 ip->ip_hl = (sizeof(struct ip) >> 2);
3506 tos_value = net->tos_flowlabel & 0x000000ff;
3508 tos_value = inp->ip_inp.inp.inp_ip_tos;
3510 if ((nofragment_flag) && (port == 0)) {
3515 /* FreeBSD has a function for ip_id's */
3516 ip->ip_id = ip_newid();
3518 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3519 ip->ip_len = packet_length;
3520 ip->ip_tos = tos_value & 0xfc;
3522 ip->ip_tos |= sctp_get_ect(stcb, chk);
3525 ip->ip_p = IPPROTO_UDP;
3527 ip->ip_p = IPPROTO_SCTP;
3532 memset(&iproute, 0, sizeof(iproute));
3533 memcpy(&ro->ro_dst, to, to->sa_len);
3535 ro = (sctp_route_t *) & net->ro;
3537 /* Now the address selection part */
3538 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3540 /* call the routine to select the src address */
3541 if (net && out_of_asoc_ok == 0) {
3542 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3543 sctp_free_ifa(net->ro._s_addr);
3544 net->ro._s_addr = NULL;
3545 net->src_addr_selected = 0;
3551 if (net->src_addr_selected == 0) {
3552 /* Cache the source address */
3553 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
3556 net->src_addr_selected = 1;
3558 if (net->ro._s_addr == NULL) {
3559 /* No route to host */
3560 net->src_addr_selected = 0;
3563 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
3565 if (over_addr == NULL) {
3566 struct sctp_ifa *_lsrc;
3568 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3572 if (_lsrc == NULL) {
3575 ip->ip_src = _lsrc->address.sin.sin_addr;
3576 sctp_free_ifa(_lsrc);
3578 ip->ip_src = over_addr->sin.sin_addr;
3579 SCTP_RTALLOC(ro, vrf_id);
3583 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
3584 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3585 udp->uh_dport = port;
3586 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
3587 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
3588 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
3590 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
3593 sctphdr->src_port = src_port;
3594 sctphdr->dest_port = dest_port;
3595 sctphdr->v_tag = v_tag;
3596 sctphdr->checksum = 0;
3599 * If source address selection fails and we find no route
3600 * then the ip_output should fail as well with a
3601 * NO_ROUTE_TO_HOST type error. We probably should catch
3602 * that somewhere and abort the association right away
3603 * (assuming this is an INIT being sent).
3605 if ((ro->ro_rt == NULL)) {
3607 * src addr selection failed to find a route (or
3608 * valid source addr), so we can't get there from
3612 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3613 "%s: dropped packet - no valid source addr\n",
3616 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3617 "Destination was ");
3618 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1,
3619 &net->ro._l_addr.sa);
3620 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3621 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3622 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3623 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3625 SCTP_FAILED_THRESHOLD,
3628 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3629 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3631 * JRS 5/14/07 - If a
3633 * unreachable, the PF bit
3634 * is turned off. This
3635 * allows an unambiguous use
3637 * destinations that are
3638 * reachable but potentially
3640 * destination is set to the
3641 * unreachable state, also
3642 * set the destination to
3646 * Add debug message here if
3647 * destination is not in PF
3651 * Stop any running T3
3654 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
3655 (stcb->asoc.sctp_cmt_pf > 0)) {
3656 net->dest_state &= ~SCTP_ADDR_PF;
3657 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
3663 if (net == stcb->asoc.primary_destination) {
3664 /* need a new primary */
3665 struct sctp_nets *alt;
3667 alt = sctp_find_alternate_net(stcb, net, 0);
3669 if (sctp_set_primary_addr(stcb,
3670 (struct sockaddr *)NULL,
3672 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3673 if (net->ro._s_addr) {
3674 sctp_free_ifa(net->ro._s_addr);
3675 net->ro._s_addr = NULL;
3677 net->src_addr_selected = 0;
3683 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
3685 return (EHOSTUNREACH);
3687 if (ro != &iproute) {
3688 memcpy(&iproute, ro, sizeof(*ro));
3690 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
3691 (uint32_t) (ntohl(ip->ip_src.s_addr)));
3692 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
3693 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
3694 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
3697 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
3698 /* failed to prepend data, give up */
3699 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3703 #ifdef SCTP_PACKET_LOGGING
3704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
3705 sctp_packet_log(m, packet_length);
3707 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
3709 #if defined(SCTP_WITH_NO_CSUM)
3710 SCTP_STAT_INCR(sctps_sendnocrc);
3712 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
3714 (stcb->asoc.loopback_scope))) {
3715 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
3716 SCTP_STAT_INCR(sctps_sendswcrc);
3718 SCTP_STAT_INCR(sctps_sendnocrc);
3721 SCTP_ENABLE_UDP_CSUM(o_pak);
3723 #if defined(SCTP_WITH_NO_CSUM)
3724 SCTP_STAT_INCR(sctps_sendnocrc);
3726 m->m_pkthdr.csum_flags = CSUM_SCTP;
3727 m->m_pkthdr.csum_data = 0;
3728 SCTP_STAT_INCR(sctps_sendhwcrc);
3731 /* send it out. table id is taken from stcb */
3732 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3733 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3734 so = SCTP_INP_SO(inp);
3735 SCTP_SOCKET_UNLOCK(so, 0);
3738 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
3739 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3740 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3741 atomic_add_int(&stcb->asoc.refcnt, 1);
3742 SCTP_TCB_UNLOCK(stcb);
3743 SCTP_SOCKET_LOCK(so, 0);
3744 SCTP_TCB_LOCK(stcb);
3745 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3748 SCTP_STAT_INCR(sctps_sendpackets);
3749 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
3751 SCTP_STAT_INCR(sctps_senderrors);
3753 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
3755 /* free tempy routes */
3761 /* PMTU check versus smallest asoc MTU goes here */
3762 if ((ro->ro_rt != NULL) &&
3763 (net->ro._s_addr)) {
3766 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
3768 mtu -= sizeof(struct udphdr);
3770 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
3771 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
3774 } else if (ro->ro_rt == NULL) {
3775 /* route was freed */
3776 if (net->ro._s_addr &&
3777 net->src_addr_selected) {
3778 sctp_free_ifa(net->ro._s_addr);
3779 net->ro._s_addr = NULL;
3781 net->src_addr_selected = 0;
3787 else if (to->sa_family == AF_INET6) {
3789 struct ip6_hdr *ip6h;
3790 struct route_in6 ip6route;
3793 uint16_t flowBottom;
3794 u_char tosBottom, tosTop;
3795 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
3797 struct sockaddr_in6 lsa6_storage;
3799 u_short prev_port = 0;
3803 flowlabel = net->tos_flowlabel;
3805 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
3808 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
3810 len += sizeof(struct udphdr);
3812 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3815 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3818 SCTP_ALIGN_TO_END(newm, len);
3819 SCTP_BUF_LEN(newm) = len;
3820 SCTP_BUF_NEXT(newm) = m;
3824 if (net->flowidset == 0) {
3825 panic("Flow ID not set");
3828 m->m_pkthdr.flowid = net->flowid;
3829 m->m_flags |= M_FLOWID;
3831 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
3832 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
3833 m->m_flags |= M_FLOWID;
3836 packet_length = sctp_calculate_len(m);
3838 ip6h = mtod(m, struct ip6_hdr *);
3840 * We assume here that inp_flow is in host byte order within
3843 flowBottom = flowlabel & 0x0000ffff;
3844 flowTop = ((flowlabel & 0x000f0000) >> 16);
3845 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
3846 /* protect *sin6 from overwrite */
3847 sin6 = (struct sockaddr_in6 *)to;
3851 /* KAME hack: embed scopeid */
3852 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
3853 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3857 memset(&ip6route, 0, sizeof(ip6route));
3858 ro = (sctp_route_t *) & ip6route;
3859 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
3861 ro = (sctp_route_t *) & net->ro;
3863 tosBottom = (((struct in6pcb *)inp)->in6p_flowinfo & 0x0c);
3865 tosBottom |= sctp_get_ect(stcb, chk);
3868 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
3870 ip6h->ip6_nxt = IPPROTO_UDP;
3872 ip6h->ip6_nxt = IPPROTO_SCTP;
3874 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
3875 ip6h->ip6_dst = sin6->sin6_addr;
3878 * Add SRC address selection here: we can only reuse to a
3879 * limited degree the kame src-addr-sel, since we can try
3880 * their selection but it may not be bound.
3882 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
3883 lsa6_tmp.sin6_family = AF_INET6;
3884 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
3886 if (net && out_of_asoc_ok == 0) {
3887 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3888 sctp_free_ifa(net->ro._s_addr);
3889 net->ro._s_addr = NULL;
3890 net->src_addr_selected = 0;
3896 if (net->src_addr_selected == 0) {
3897 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3898 /* KAME hack: embed scopeid */
3899 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
3900 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3903 /* Cache the source address */
3904 net->ro._s_addr = sctp_source_address_selection(inp,
3910 (void)sa6_recoverscope(sin6);
3911 net->src_addr_selected = 1;
3913 if (net->ro._s_addr == NULL) {
3914 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
3915 net->src_addr_selected = 0;
3918 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
3920 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
3921 /* KAME hack: embed scopeid */
3922 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
3923 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3926 if (over_addr == NULL) {
3927 struct sctp_ifa *_lsrc;
3929 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3933 if (_lsrc == NULL) {
3936 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
3937 sctp_free_ifa(_lsrc);
3939 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
3940 SCTP_RTALLOC(ro, vrf_id);
3942 (void)sa6_recoverscope(sin6);
3944 lsa6->sin6_port = inp->sctp_lport;
3946 if (ro->ro_rt == NULL) {
3948 * src addr selection failed to find a route (or
3949 * valid source addr), so we can't get there from
3955 * XXX: sa6 may not have a valid sin6_scope_id in the
3956 * non-SCOPEDROUTING case.
3958 bzero(&lsa6_storage, sizeof(lsa6_storage));
3959 lsa6_storage.sin6_family = AF_INET6;
3960 lsa6_storage.sin6_len = sizeof(lsa6_storage);
3961 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3962 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
3963 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
3968 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3969 lsa6_storage.sin6_port = inp->sctp_lport;
3970 lsa6 = &lsa6_storage;
3971 ip6h->ip6_src = lsa6->sin6_addr;
3974 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
3975 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3976 udp->uh_dport = port;
3977 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
3979 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
3981 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
3984 sctphdr->src_port = src_port;
3985 sctphdr->dest_port = dest_port;
3986 sctphdr->v_tag = v_tag;
3987 sctphdr->checksum = 0;
3990 * We set the hop limit now since there is a good chance
3991 * that our ro pointer is now filled
3993 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
3994 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3997 /* Copy to be sure something bad is not happening */
3998 sin6->sin6_addr = ip6h->ip6_dst;
3999 lsa6->sin6_addr = ip6h->ip6_src;
4002 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4003 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4004 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4005 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4006 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4008 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4009 /* preserve the port and scope for link local send */
4010 prev_scope = sin6->sin6_scope_id;
4011 prev_port = sin6->sin6_port;
4013 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4014 /* failed to prepend data, give up */
4016 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4019 #ifdef SCTP_PACKET_LOGGING
4020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4021 sctp_packet_log(m, packet_length);
4023 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4025 #if defined(SCTP_WITH_NO_CSUM)
4026 SCTP_STAT_INCR(sctps_sendnocrc);
4028 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4030 (stcb->asoc.loopback_scope))) {
4031 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4032 SCTP_STAT_INCR(sctps_sendswcrc);
4034 SCTP_STAT_INCR(sctps_sendnocrc);
4037 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4038 udp->uh_sum = 0xffff;
4041 #if defined(SCTP_WITH_NO_CSUM)
4042 SCTP_STAT_INCR(sctps_sendnocrc);
4044 m->m_pkthdr.csum_flags = CSUM_SCTP;
4045 m->m_pkthdr.csum_data = 0;
4046 SCTP_STAT_INCR(sctps_sendhwcrc);
4049 /* send it out. table id is taken from stcb */
4050 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4051 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4052 so = SCTP_INP_SO(inp);
4053 SCTP_SOCKET_UNLOCK(so, 0);
4056 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4057 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4058 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4059 atomic_add_int(&stcb->asoc.refcnt, 1);
4060 SCTP_TCB_UNLOCK(stcb);
4061 SCTP_SOCKET_LOCK(so, 0);
4062 SCTP_TCB_LOCK(stcb);
4063 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4067 /* for link local this must be done */
4068 sin6->sin6_scope_id = prev_scope;
4069 sin6->sin6_port = prev_port;
4071 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4072 SCTP_STAT_INCR(sctps_sendpackets);
4073 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4075 SCTP_STAT_INCR(sctps_senderrors);
4078 /* Now if we had a temp route free it */
4083 /* PMTU check versus smallest asoc MTU goes here */
4084 if (ro->ro_rt == NULL) {
4085 /* Route was freed */
4086 if (net->ro._s_addr &&
4087 net->src_addr_selected) {
4088 sctp_free_ifa(net->ro._s_addr);
4089 net->ro._s_addr = NULL;
4091 net->src_addr_selected = 0;
4093 if ((ro->ro_rt != NULL) &&
4094 (net->ro._s_addr)) {
4097 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4099 (stcb->asoc.smallest_mtu > mtu)) {
4100 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4103 net->mtu -= sizeof(struct udphdr);
4107 if (ND_IFINFO(ifp)->linkmtu &&
4108 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4109 sctp_mtu_size_reset(inp,
4111 ND_IFINFO(ifp)->linkmtu);
4119 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4120 ((struct sockaddr *)to)->sa_family);
4122 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4129 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4130 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4135 struct mbuf *m, *m_at, *mp_last;
4136 struct sctp_nets *net;
4137 struct sctp_init_chunk *init;
4138 struct sctp_supported_addr_param *sup_addr;
4139 struct sctp_adaptation_layer_indication *ali;
4140 struct sctp_ecn_supported_param *ecn;
4141 struct sctp_prsctp_supported_param *prsctp;
4142 struct sctp_supported_chunk_types_param *pr_supported;
4143 int cnt_inits_to = 0;
4148 /* INIT's always go to the primary (and usually ONLY address) */
4150 net = stcb->asoc.primary_destination;
4152 net = TAILQ_FIRST(&stcb->asoc.nets);
4157 /* we confirm any address we send an INIT to */
4158 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4159 (void)sctp_set_primary_addr(stcb, NULL, net);
4161 /* we confirm any address we send an INIT to */
4162 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4164 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4166 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4168 * special hook, if we are sending to link local it will not
4169 * show up in our private address count.
4171 struct sockaddr_in6 *sin6l;
4173 sin6l = &net->ro._l_addr.sin6;
4174 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4178 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4179 /* This case should not happen */
4180 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4183 /* start the INIT timer */
4184 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4186 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4188 /* No memory, INIT timer will re-attempt. */
4189 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4192 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4194 * assume peer supports asconf in order to be able to queue local
4195 * address changes while an INIT is in flight and before the assoc
4198 stcb->asoc.peer_supports_asconf = 1;
4199 /* Now lets put the SCTP header in place */
4200 init = mtod(m, struct sctp_init_chunk *);
4201 /* now the chunk header */
4202 init->ch.chunk_type = SCTP_INITIATION;
4203 init->ch.chunk_flags = 0;
4204 /* fill in later from mbuf we build */
4205 init->ch.chunk_length = 0;
4206 /* place in my tag */
4207 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4208 /* set up some of the credits. */
4209 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4210 SCTP_MINIMAL_RWND));
4212 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4213 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4214 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4215 /* now the address restriction */
4216 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4218 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4220 /* we support 2 types: IPv6/IPv4 */
4221 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
4222 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4223 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4225 /* we support 1 type: IPv4 */
4226 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
4227 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4228 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4230 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
4231 /* adaptation layer indication parameter */
4232 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
4233 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4234 ali->ph.param_length = htons(sizeof(*ali));
4235 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4236 SCTP_BUF_LEN(m) += sizeof(*ali);
4237 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4239 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4240 /* Add NAT friendly parameter */
4241 struct sctp_paramhdr *ph;
4243 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4244 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4245 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4246 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4247 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4249 /* now any cookie time extensions */
4250 if (stcb->asoc.cookie_preserve_req) {
4251 struct sctp_cookie_perserve_param *cookie_preserve;
4253 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4254 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4255 cookie_preserve->ph.param_length = htons(
4256 sizeof(*cookie_preserve));
4257 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4258 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4259 ecn = (struct sctp_ecn_supported_param *)(
4260 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4261 stcb->asoc.cookie_preserve_req = 0;
4264 if (stcb->asoc.ecn_allowed == 1) {
4265 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4266 ecn->ph.param_length = htons(sizeof(*ecn));
4267 SCTP_BUF_LEN(m) += sizeof(*ecn);
4268 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4271 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4273 /* And now tell the peer we do pr-sctp */
4274 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4275 prsctp->ph.param_length = htons(sizeof(*prsctp));
4276 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4278 /* And now tell the peer we do all the extensions */
4279 pr_supported = (struct sctp_supported_chunk_types_param *)
4280 ((caddr_t)prsctp + sizeof(*prsctp));
4281 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4283 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4284 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4285 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4286 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4287 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4288 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4289 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4291 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4292 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4294 p_len = sizeof(*pr_supported) + num_ext;
4295 pr_supported->ph.param_length = htons(p_len);
4296 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4297 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4300 /* add authentication parameters */
4301 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4302 struct sctp_auth_random *randp;
4303 struct sctp_auth_hmac_algo *hmacs;
4304 struct sctp_auth_chunk_list *chunks;
4306 /* attach RANDOM parameter, if available */
4307 if (stcb->asoc.authinfo.random != NULL) {
4308 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4309 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4310 /* random key already contains the header */
4311 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4312 /* zero out any padding required */
4313 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4314 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4316 /* add HMAC_ALGO parameter */
4317 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4318 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4319 (uint8_t *) hmacs->hmac_ids);
4321 p_len += sizeof(*hmacs);
4322 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4323 hmacs->ph.param_length = htons(p_len);
4324 /* zero out any padding required */
4325 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4326 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4328 /* add CHUNKS parameter */
4329 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4330 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4331 chunks->chunk_types);
4333 p_len += sizeof(*chunks);
4334 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4335 chunks->ph.param_length = htons(p_len);
4336 /* zero out any padding required */
4337 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4338 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4342 /* now the addresses */
4344 struct sctp_scoping scp;
4347 * To optimize this we could put the scoping stuff into a
4348 * structure and remove the individual uint8's from the
4349 * assoc structure. Then we could just sifa in the address
4350 * within the stcb.. but for now this is a quick hack to get
4351 * the address stuff teased apart.
4353 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4354 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4355 scp.loopback_scope = stcb->asoc.loopback_scope;
4356 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4357 scp.local_scope = stcb->asoc.local_scope;
4358 scp.site_scope = stcb->asoc.site_scope;
4360 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
4363 /* calulate the size and update pkt header and chunk header */
4365 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4366 if (SCTP_BUF_NEXT(m_at) == NULL)
4368 p_len += SCTP_BUF_LEN(m_at);
4370 init->ch.chunk_length = htons(p_len);
4372 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4373 * here since the timer will drive a retranmission.
4376 /* I don't expect this to execute but we will be safe here */
4378 if ((padval) && (mp_last)) {
4380 * The compiler worries that mp_last may not be set even
4381 * though I think it is impossible :-> however we add
4382 * mp_last here just in case.
4384 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4386 /* Houston we have a problem, no space */
4392 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4393 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4394 (struct sockaddr *)&net->ro._l_addr,
4395 m, 0, NULL, 0, 0, 0, NULL, 0,
4396 inp->sctp_lport, stcb->rport, htonl(0),
4397 net->port, so_locked, NULL, NULL);
4398 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4399 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4400 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4404 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4405 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4408 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4409 * being equal to the beginning of the params i.e. (iphlen +
4410 * sizeof(struct sctp_init_msg) parse through the parameters to the
4411 * end of the mbuf verifying that all parameters are known.
4413 * For unknown parameters build and return a mbuf with
4414 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4415 * processing this chunk stop, and set *abort_processing to 1.
4417 * By having param_offset be pre-set to where parameters begin it is
4418 * hoped that this routine may be reused in the future by new
4421 struct sctp_paramhdr *phdr, params;
4423 struct mbuf *mat, *op_err;
4424 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4425 int at, limit, pad_needed;
4426 uint16_t ptype, plen, padded_size;
4429 *abort_processing = 0;
4432 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4435 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4436 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4437 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4438 ptype = ntohs(phdr->param_type);
4439 plen = ntohs(phdr->param_length);
4440 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4441 /* wacked parameter */
4442 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4445 limit -= SCTP_SIZE32(plen);
4447 * All parameters for all chunks that we know/understand are
4448 * listed here. We process them other places and make
4449 * appropriate stop actions per the upper bits. However this
4450 * is the generic routine processor's can call to get back
4451 * an operr.. to either incorporate (init-ack) or send.
4453 padded_size = SCTP_SIZE32(plen);
4455 /* Param's with variable size */
4456 case SCTP_HEARTBEAT_INFO:
4457 case SCTP_STATE_COOKIE:
4458 case SCTP_UNRECOG_PARAM:
4459 case SCTP_ERROR_CAUSE_IND:
4463 /* Param's with variable size within a range */
4464 case SCTP_CHUNK_LIST:
4465 case SCTP_SUPPORTED_CHUNK_EXT:
4466 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4467 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4472 case SCTP_SUPPORTED_ADDRTYPE:
4473 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4474 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4480 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4481 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4486 case SCTP_SET_PRIM_ADDR:
4487 case SCTP_DEL_IP_ADDRESS:
4488 case SCTP_ADD_IP_ADDRESS:
4489 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4490 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4491 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4496 /* Param's with a fixed size */
4497 case SCTP_IPV4_ADDRESS:
4498 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4499 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4504 case SCTP_IPV6_ADDRESS:
4505 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4506 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4511 case SCTP_COOKIE_PRESERVE:
4512 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4513 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4518 case SCTP_HAS_NAT_SUPPORT:
4521 case SCTP_PRSCTP_SUPPORTED:
4523 if (padded_size != sizeof(struct sctp_paramhdr)) {
4524 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4529 case SCTP_ECN_CAPABLE:
4530 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4531 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4536 case SCTP_ULP_ADAPTATION:
4537 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4538 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4543 case SCTP_SUCCESS_REPORT:
4544 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4545 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4550 case SCTP_HOSTNAME_ADDRESS:
4552 /* We can NOT handle HOST NAME addresses!! */
4555 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
4556 *abort_processing = 1;
4557 if (op_err == NULL) {
4558 /* Ok need to try to get a mbuf */
4560 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4562 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4565 l_len += sizeof(struct sctp_paramhdr);
4566 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4568 SCTP_BUF_LEN(op_err) = 0;
4570 * pre-reserve space for ip
4571 * and sctp header and
4575 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4577 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4579 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4580 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4584 /* If we have space */
4585 struct sctp_paramhdr s;
4588 uint32_t cpthis = 0;
4590 pad_needed = 4 - (err_at % 4);
4591 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4592 err_at += pad_needed;
4594 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
4595 s.param_length = htons(sizeof(s) + plen);
4596 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4597 err_at += sizeof(s);
4598 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4600 sctp_m_freem(op_err);
4602 * we are out of memory but
4603 * we still need to have a
4604 * look at what to do (the
4605 * system is in trouble
4610 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4618 * we do not recognize the parameter figure out what
4621 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
4622 if ((ptype & 0x4000) == 0x4000) {
4623 /* Report bit is set?? */
4624 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
4625 if (op_err == NULL) {
4628 /* Ok need to try to get an mbuf */
4630 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4632 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4635 l_len += sizeof(struct sctp_paramhdr);
4636 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4638 SCTP_BUF_LEN(op_err) = 0;
4640 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4642 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4644 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4645 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4649 /* If we have space */
4650 struct sctp_paramhdr s;
4653 uint32_t cpthis = 0;
4655 pad_needed = 4 - (err_at % 4);
4656 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4657 err_at += pad_needed;
4659 s.param_type = htons(SCTP_UNRECOG_PARAM);
4660 s.param_length = htons(sizeof(s) + plen);
4661 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4662 err_at += sizeof(s);
4663 if (plen > sizeof(tempbuf)) {
4664 plen = sizeof(tempbuf);
4666 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4668 sctp_m_freem(op_err);
4670 * we are out of memory but
4671 * we still need to have a
4672 * look at what to do (the
4673 * system is in trouble
4677 goto more_processing;
4679 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4684 if ((ptype & 0x8000) == 0x0000) {
4685 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
4688 /* skip this chunk and continue processing */
4689 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
4690 at += SCTP_SIZE32(plen);
4695 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4699 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
4700 *abort_processing = 1;
4701 if ((op_err == NULL) && phdr) {
4705 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4707 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4709 l_len += (2 * sizeof(struct sctp_paramhdr));
4710 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4712 SCTP_BUF_LEN(op_err) = 0;
4714 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4716 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4718 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4719 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4722 if ((op_err) && phdr) {
4723 struct sctp_paramhdr s;
4726 uint32_t cpthis = 0;
4728 pad_needed = 4 - (err_at % 4);
4729 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4730 err_at += pad_needed;
4732 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4733 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
4734 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4735 err_at += sizeof(s);
4736 /* Only copy back the p-hdr that caused the issue */
4737 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
4743 sctp_are_there_new_addresses(struct sctp_association *asoc,
4744 struct mbuf *in_initpkt, int iphlen, int offset)
4747 * Given a INIT packet, look through the packet to verify that there
4748 * are NO new addresses. As we go through the parameters add reports
4749 * of any un-understood parameters that require an error. Also we
4750 * must return (1) to drop the packet if we see a un-understood
4751 * parameter that tells us to drop the chunk.
4753 struct sockaddr_in sin4, *sa4;
4756 struct sockaddr_in6 sin6, *sa6;
4759 struct sockaddr *sa_touse;
4760 struct sockaddr *sa;
4761 struct sctp_paramhdr *phdr, params;
4765 struct ip6_hdr *ip6h;
4769 uint16_t ptype, plen;
4772 struct sctp_nets *net;
4774 memset(&sin4, 0, sizeof(sin4));
4776 memset(&sin6, 0, sizeof(sin6));
4778 sin4.sin_family = AF_INET;
4779 sin4.sin_len = sizeof(sin4);
4781 sin6.sin6_family = AF_INET6;
4782 sin6.sin6_len = sizeof(sin6);
4785 /* First what about the src address of the pkt ? */
4786 iph = mtod(in_initpkt, struct ip *);
4787 switch (iph->ip_v) {
4789 /* source addr is IPv4 */
4790 sin4.sin_addr = iph->ip_src;
4791 sa_touse = (struct sockaddr *)&sin4;
4794 case IPV6_VERSION >> 4:
4795 /* source addr is IPv6 */
4796 ip6h = mtod(in_initpkt, struct ip6_hdr *);
4797 sin6.sin6_addr = ip6h->ip6_src;
4798 sa_touse = (struct sockaddr *)&sin6;
4806 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4807 sa = (struct sockaddr *)&net->ro._l_addr;
4808 if (sa->sa_family == sa_touse->sa_family) {
4809 if (sa->sa_family == AF_INET) {
4810 sa4 = (struct sockaddr_in *)sa;
4811 if (sa4->sin_addr.s_addr ==
4812 sin4.sin_addr.s_addr) {
4818 if (sa->sa_family == AF_INET6) {
4819 sa6 = (struct sockaddr_in6 *)sa;
4820 if (SCTP6_ARE_ADDR_EQUAL(sa6,
4830 /* New address added! no need to look futher. */
4833 /* Ok so far lets munge through the rest of the packet */
4837 offset += sizeof(struct sctp_init_chunk);
4838 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4840 ptype = ntohs(phdr->param_type);
4841 plen = ntohs(phdr->param_length);
4842 if (ptype == SCTP_IPV4_ADDRESS) {
4843 struct sctp_ipv4addr_param *p4, p4_buf;
4845 phdr = sctp_get_next_param(mat, offset,
4846 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4847 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4851 p4 = (struct sctp_ipv4addr_param *)phdr;
4852 sin4.sin_addr.s_addr = p4->addr;
4853 sa_touse = (struct sockaddr *)&sin4;
4854 } else if (ptype == SCTP_IPV6_ADDRESS) {
4855 struct sctp_ipv6addr_param *p6, p6_buf;
4857 phdr = sctp_get_next_param(mat, offset,
4858 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4859 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4863 p6 = (struct sctp_ipv6addr_param *)phdr;
4865 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4868 sa_touse = (struct sockaddr *)&sin4;
4871 /* ok, sa_touse points to one to check */
4873 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4874 sa = (struct sockaddr *)&net->ro._l_addr;
4875 if (sa->sa_family != sa_touse->sa_family) {
4878 if (sa->sa_family == AF_INET) {
4879 sa4 = (struct sockaddr_in *)sa;
4880 if (sa4->sin_addr.s_addr ==
4881 sin4.sin_addr.s_addr) {
4887 if (sa->sa_family == AF_INET6) {
4888 sa6 = (struct sockaddr_in6 *)sa;
4889 if (SCTP6_ARE_ADDR_EQUAL(
4898 /* New addr added! no need to look further */
4902 offset += SCTP_SIZE32(plen);
4903 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4909 * Given a MBUF chain that was sent into us containing an INIT. Build a
4910 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
4911 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
4912 * message (i.e. the struct sctp_init_msg).
4915 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4916 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
4917 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
4919 struct sctp_association *asoc;
4920 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
4921 struct sctp_init_ack_chunk *initack;
4922 struct sctp_adaptation_layer_indication *ali;
4923 struct sctp_ecn_supported_param *ecn;
4924 struct sctp_prsctp_supported_param *prsctp;
4925 struct sctp_supported_chunk_types_param *pr_supported;
4926 union sctp_sockstore store, store1, *over_addr;
4927 struct sockaddr_in *sin, *to_sin;
4930 struct sockaddr_in6 *sin6, *to_sin6;
4936 struct ip6_hdr *ip6;
4939 struct sockaddr *to;
4940 struct sctp_state_cookie stc;
4941 struct sctp_nets *net = NULL;
4942 uint8_t *signature = NULL;
4943 int cnt_inits_to = 0;
4944 uint16_t his_limit, i_want;
4945 int abort_flag, padval;
4948 int nat_friendly = 0;
4956 if ((asoc != NULL) &&
4957 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
4958 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
4959 /* new addresses, out of here in non-cookie-wait states */
4961 * Send a ABORT, we don't add the new address error clause
4962 * though we even set the T bit and copy in the 0 tag.. this
4963 * looks no different than if no listener was present.
4965 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
4969 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
4970 (offset + sizeof(struct sctp_init_chunk)),
4971 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
4974 sctp_send_abort(init_pkt, iphlen, sh,
4975 init_chk->init.initiate_tag, op_err, vrf_id, port);
4978 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
4980 /* No memory, INIT timer will re-attempt. */
4982 sctp_m_freem(op_err);
4985 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4987 /* the time I built cookie */
4988 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
4990 /* populate any tie tags */
4992 /* unlock before tag selections */
4993 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
4994 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
4995 stc.cookie_life = asoc->cookie_life;
4996 net = asoc->primary_destination;
4998 stc.tie_tag_my_vtag = 0;
4999 stc.tie_tag_peer_vtag = 0;
5000 /* life I will award this cookie */
5001 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5004 /* copy in the ports for later check */
5005 stc.myport = sh->dest_port;
5006 stc.peerport = sh->src_port;
5009 * If we wanted to honor cookie life extentions, we would add to
5010 * stc.cookie_life. For now we should NOT honor any extension
5012 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5013 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5014 struct inpcb *in_inp;
5016 /* Its a V6 socket */
5017 in_inp = (struct inpcb *)inp;
5018 stc.ipv6_addr_legal = 1;
5019 /* Now look at the binding flag to see if V4 will be legal */
5020 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5021 stc.ipv4_addr_legal = 1;
5023 /* V4 addresses are NOT legal on the association */
5024 stc.ipv4_addr_legal = 0;
5027 /* Its a V4 socket, no - V6 */
5028 stc.ipv4_addr_legal = 1;
5029 stc.ipv6_addr_legal = 0;
5032 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5037 /* now for scope setup */
5038 memset((caddr_t)&store, 0, sizeof(store));
5039 memset((caddr_t)&store1, 0, sizeof(store1));
5041 to_sin = &store1.sin;
5044 to_sin6 = &store1.sin6;
5046 iph = mtod(init_pkt, struct ip *);
5047 /* establish the to_addr's */
5048 switch (iph->ip_v) {
5050 to_sin->sin_port = sh->dest_port;
5051 to_sin->sin_family = AF_INET;
5052 to_sin->sin_len = sizeof(struct sockaddr_in);
5053 to_sin->sin_addr = iph->ip_dst;
5056 case IPV6_VERSION >> 4:
5057 ip6 = mtod(init_pkt, struct ip6_hdr *);
5058 to_sin6->sin6_addr = ip6->ip6_dst;
5059 to_sin6->sin6_scope_id = 0;
5060 to_sin6->sin6_port = sh->dest_port;
5061 to_sin6->sin6_family = AF_INET6;
5062 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5071 to = (struct sockaddr *)&store;
5072 switch (iph->ip_v) {
5075 sin->sin_family = AF_INET;
5076 sin->sin_len = sizeof(struct sockaddr_in);
5077 sin->sin_port = sh->src_port;
5078 sin->sin_addr = iph->ip_src;
5079 /* lookup address */
5080 stc.address[0] = sin->sin_addr.s_addr;
5084 stc.addr_type = SCTP_IPV4_ADDRESS;
5085 /* local from address */
5086 stc.laddress[0] = to_sin->sin_addr.s_addr;
5087 stc.laddress[1] = 0;
5088 stc.laddress[2] = 0;
5089 stc.laddress[3] = 0;
5090 stc.laddr_type = SCTP_IPV4_ADDRESS;
5091 /* scope_id is only for v6 */
5093 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5094 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5099 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5100 /* Must use the address in this case */
5101 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5102 stc.loopback_scope = 1;
5105 stc.local_scope = 0;
5110 case IPV6_VERSION >> 4:
5112 ip6 = mtod(init_pkt, struct ip6_hdr *);
5113 sin6->sin6_family = AF_INET6;
5114 sin6->sin6_len = sizeof(struct sockaddr_in6);
5115 sin6->sin6_port = sh->src_port;
5116 sin6->sin6_addr = ip6->ip6_src;
5117 /* lookup address */
5118 memcpy(&stc.address, &sin6->sin6_addr,
5119 sizeof(struct in6_addr));
5120 sin6->sin6_scope_id = 0;
5121 stc.addr_type = SCTP_IPV6_ADDRESS;
5123 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5125 * FIX ME: does this have scope from
5128 (void)sa6_recoverscope(sin6);
5129 stc.scope_id = sin6->sin6_scope_id;
5130 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5131 stc.loopback_scope = 1;
5132 stc.local_scope = 0;
5135 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5137 * If the new destination is a
5138 * LINK_LOCAL we must have common
5139 * both site and local scope. Don't
5140 * set local scope though since we
5141 * must depend on the source to be
5142 * added implicitly. We cannot
5143 * assure just because we share one
5144 * link that all links are common.
5146 stc.local_scope = 0;
5150 * we start counting for the private
5151 * address stuff at 1. since the
5152 * link local we source from won't
5153 * show up in our scoped count.
5157 * pull out the scope_id from
5161 * FIX ME: does this have scope from
5164 (void)sa6_recoverscope(sin6);
5165 stc.scope_id = sin6->sin6_scope_id;
5166 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5167 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5169 * If the new destination is
5170 * SITE_LOCAL then we must have site
5175 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5176 stc.laddr_type = SCTP_IPV6_ADDRESS;
5186 /* set the scope per the existing tcb */
5189 struct sctp_nets *lnet;
5193 stc.loopback_scope = asoc->loopback_scope;
5194 stc.ipv4_scope = asoc->ipv4_local_scope;
5195 stc.site_scope = asoc->site_scope;
5196 stc.local_scope = asoc->local_scope;
5198 /* Why do we not consider IPv4 LL addresses? */
5199 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5200 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5201 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5203 * if we have a LL address, start
5211 /* use the net pointer */
5212 to = (struct sockaddr *)&net->ro._l_addr;
5213 switch (to->sa_family) {
5215 sin = (struct sockaddr_in *)to;
5216 stc.address[0] = sin->sin_addr.s_addr;
5220 stc.addr_type = SCTP_IPV4_ADDRESS;
5221 if (net->src_addr_selected == 0) {
5223 * strange case here, the INIT should have
5224 * did the selection.
5226 net->ro._s_addr = sctp_source_address_selection(inp,
5227 stcb, (sctp_route_t *) & net->ro,
5229 if (net->ro._s_addr == NULL)
5232 net->src_addr_selected = 1;
5235 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5236 stc.laddress[1] = 0;
5237 stc.laddress[2] = 0;
5238 stc.laddress[3] = 0;
5239 stc.laddr_type = SCTP_IPV4_ADDRESS;
5243 sin6 = (struct sockaddr_in6 *)to;
5244 memcpy(&stc.address, &sin6->sin6_addr,
5245 sizeof(struct in6_addr));
5246 stc.addr_type = SCTP_IPV6_ADDRESS;
5247 if (net->src_addr_selected == 0) {
5249 * strange case here, the INIT should have
5250 * did the selection.
5252 net->ro._s_addr = sctp_source_address_selection(inp,
5253 stcb, (sctp_route_t *) & net->ro,
5255 if (net->ro._s_addr == NULL)
5258 net->src_addr_selected = 1;
5260 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5261 sizeof(struct in6_addr));
5262 stc.laddr_type = SCTP_IPV6_ADDRESS;
5267 /* Now lets put the SCTP header in place */
5268 initack = mtod(m, struct sctp_init_ack_chunk *);
5269 /* Save it off for quick ref */
5270 stc.peers_vtag = init_chk->init.initiate_tag;
5272 memcpy(stc.identification, SCTP_VERSION_STRING,
5273 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5274 /* now the chunk header */
5275 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5276 initack->ch.chunk_flags = 0;
5277 /* fill in later from mbuf we build */
5278 initack->ch.chunk_length = 0;
5279 /* place in my tag */
5280 if ((asoc != NULL) &&
5281 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5282 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5283 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5284 /* re-use the v-tags and init-seq here */
5285 initack->init.initiate_tag = htonl(asoc->my_vtag);
5286 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5288 uint32_t vtag, itsn;
5290 if (hold_inp_lock) {
5291 SCTP_INP_INCR_REF(inp);
5292 SCTP_INP_RUNLOCK(inp);
5295 atomic_add_int(&asoc->refcnt, 1);
5296 SCTP_TCB_UNLOCK(stcb);
5298 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5299 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5301 * Got a duplicate vtag on some guy behind a
5302 * nat make sure we don't use it.
5306 initack->init.initiate_tag = htonl(vtag);
5307 /* get a TSN to use too */
5308 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5309 initack->init.initial_tsn = htonl(itsn);
5310 SCTP_TCB_LOCK(stcb);
5311 atomic_add_int(&asoc->refcnt, -1);
5313 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5314 initack->init.initiate_tag = htonl(vtag);
5315 /* get a TSN to use too */
5316 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5318 if (hold_inp_lock) {
5319 SCTP_INP_RLOCK(inp);
5320 SCTP_INP_DECR_REF(inp);
5323 /* save away my tag to */
5324 stc.my_vtag = initack->init.initiate_tag;
5326 /* set up some of the credits. */
5327 so = inp->sctp_socket;
5329 /* memory problem */
5333 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5335 /* set what I want */
5336 his_limit = ntohs(init_chk->init.num_inbound_streams);
5337 /* choose what I want */
5339 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5340 i_want = asoc->streamoutcnt;
5342 i_want = inp->sctp_ep.pre_open_stream_count;
5345 i_want = inp->sctp_ep.pre_open_stream_count;
5347 if (his_limit < i_want) {
5348 /* I Want more :< */
5349 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5351 /* I can have what I want :> */
5352 initack->init.num_outbound_streams = htons(i_want);
5354 /* tell him his limt. */
5355 initack->init.num_inbound_streams =
5356 htons(inp->sctp_ep.max_open_streams_intome);
5358 /* adaptation layer indication parameter */
5359 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5360 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5361 ali->ph.param_length = htons(sizeof(*ali));
5362 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5363 SCTP_BUF_LEN(m) += sizeof(*ali);
5364 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5367 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5368 (inp->sctp_ecn_enable == 1)) {
5369 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5370 ecn->ph.param_length = htons(sizeof(*ecn));
5371 SCTP_BUF_LEN(m) += sizeof(*ecn);
5373 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5376 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5378 /* And now tell the peer we do pr-sctp */
5379 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5380 prsctp->ph.param_length = htons(sizeof(*prsctp));
5381 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5383 /* Add NAT friendly parameter */
5384 struct sctp_paramhdr *ph;
5386 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5387 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5388 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5389 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5391 /* And now tell the peer we do all the extensions */
5392 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5393 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5395 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5396 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5397 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5398 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5399 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5400 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5401 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5402 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5403 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5404 p_len = sizeof(*pr_supported) + num_ext;
5405 pr_supported->ph.param_length = htons(p_len);
5406 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5407 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5409 /* add authentication parameters */
5410 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5411 struct sctp_auth_random *randp;
5412 struct sctp_auth_hmac_algo *hmacs;
5413 struct sctp_auth_chunk_list *chunks;
5414 uint16_t random_len;
5416 /* generate and add RANDOM parameter */
5417 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5418 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5419 randp->ph.param_type = htons(SCTP_RANDOM);
5420 p_len = sizeof(*randp) + random_len;
5421 randp->ph.param_length = htons(p_len);
5422 SCTP_READ_RANDOM(randp->random_data, random_len);
5423 /* zero out any padding required */
5424 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5425 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5427 /* add HMAC_ALGO parameter */
5428 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5429 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5430 (uint8_t *) hmacs->hmac_ids);
5432 p_len += sizeof(*hmacs);
5433 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5434 hmacs->ph.param_length = htons(p_len);
5435 /* zero out any padding required */
5436 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5437 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5439 /* add CHUNKS parameter */
5440 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5441 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5442 chunks->chunk_types);
5444 p_len += sizeof(*chunks);
5445 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5446 chunks->ph.param_length = htons(p_len);
5447 /* zero out any padding required */
5448 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5449 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5453 /* now the addresses */
5455 struct sctp_scoping scp;
5458 * To optimize this we could put the scoping stuff into a
5459 * structure and remove the individual uint8's from the stc
5460 * structure. Then we could just sifa in the address within
5461 * the stc.. but for now this is a quick hack to get the
5462 * address stuff teased apart.
5464 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5465 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5466 scp.loopback_scope = stc.loopback_scope;
5467 scp.ipv4_local_scope = stc.ipv4_scope;
5468 scp.local_scope = stc.local_scope;
5469 scp.site_scope = stc.site_scope;
5470 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
5473 /* tack on the operational error if present */
5481 llen += SCTP_BUF_LEN(ol);
5482 ol = SCTP_BUF_NEXT(ol);
5485 /* must add a pad to the param */
5486 uint32_t cpthis = 0;
5489 padlen = 4 - (llen % 4);
5490 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5492 while (SCTP_BUF_NEXT(m_at) != NULL) {
5493 m_at = SCTP_BUF_NEXT(m_at);
5495 SCTP_BUF_NEXT(m_at) = op_err;
5496 while (SCTP_BUF_NEXT(m_at) != NULL) {
5497 m_at = SCTP_BUF_NEXT(m_at);
5500 /* pre-calulate the size and update pkt header and chunk header */
5502 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5503 p_len += SCTP_BUF_LEN(m_tmp);
5504 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5505 /* m_tmp should now point to last one */
5510 /* Now we must build a cookie */
5511 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature);
5512 if (m_cookie == NULL) {
5513 /* memory problem */
5517 /* Now append the cookie to the end and update the space/size */
5518 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5520 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5521 p_len += SCTP_BUF_LEN(m_tmp);
5522 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5523 /* m_tmp should now point to last one */
5529 * Place in the size, but we don't include the last pad (if any) in
5532 initack->ch.chunk_length = htons(p_len);
5535 * Time to sign the cookie, we don't sign over the cookie signature
5536 * though thus we set trailer.
5538 (void)sctp_hmac_m(SCTP_HMAC,
5539 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5540 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5541 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5543 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5544 * here since the timer will drive a retranmission.
5547 if ((padval) && (mp_last)) {
5548 /* see my previous comments on mp_last */
5551 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
5553 /* Houston we have a problem, no space */
5559 if (stc.loopback_scope) {
5560 over_addr = &store1;
5565 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5567 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
5568 port, SCTP_SO_NOT_LOCKED, over_addr, init_pkt);
5569 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5574 sctp_prune_prsctp(struct sctp_tcb *stcb,
5575 struct sctp_association *asoc,
5576 struct sctp_sndrcvinfo *srcv,
5580 struct sctp_tmit_chunk *chk, *nchk;
5582 SCTP_TCB_LOCK_ASSERT(stcb);
5583 if ((asoc->peer_supports_prsctp) &&
5584 (asoc->sent_queue_cnt_removeable > 0)) {
5585 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5587 * Look for chunks marked with the PR_SCTP flag AND
5588 * the buffer space flag. If the one being sent is
5589 * equal or greater priority then purge the old one
5590 * and free some space.
5592 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5594 * This one is PR-SCTP AND buffer space
5597 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5599 * Lower numbers equates to higher
5600 * priority so if the one we are
5601 * looking at has a larger or equal
5602 * priority we want to drop the data
5603 * and NOT retransmit it.
5607 * We release the book_size
5608 * if the mbuf is here
5613 if (chk->sent > SCTP_DATAGRAM_UNSENT)
5614 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
5616 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
5617 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5620 freed_spc += ret_spc;
5621 if (freed_spc >= dataout) {
5624 } /* if chunk was present */
5625 } /* if of sufficent priority */
5626 } /* if chunk has enabled */
5627 } /* tailqforeach */
5629 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
5630 /* Here we must move to the sent queue and mark */
5631 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5632 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5635 * We release the book_size
5636 * if the mbuf is here
5640 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5641 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
5644 freed_spc += ret_spc;
5645 if (freed_spc >= dataout) {
5648 } /* end if chk->data */
5649 } /* end if right class */
5650 } /* end if chk pr-sctp */
5651 } /* tailqforeachsafe (chk) */
5652 } /* if enabled in asoc */
5656 sctp_get_frag_point(struct sctp_tcb *stcb,
5657 struct sctp_association *asoc)
5662 * For endpoints that have both v6 and v4 addresses we must reserve
5663 * room for the ipv6 header, for those that are only dealing with V4
5664 * we use a larger frag point.
5666 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5667 ovh = SCTP_MED_OVERHEAD;
5669 ovh = SCTP_MED_V4_OVERHEAD;
5672 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
5673 siz = asoc->smallest_mtu - ovh;
5675 siz = (stcb->asoc.sctp_frag_point - ovh);
5677 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
5679 /* A data chunk MUST fit in a cluster */
5680 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
5683 /* adjust for an AUTH chunk if DATA requires auth */
5684 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
5685 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5688 /* make it an even word boundary please */
5695 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
5699 * We assume that the user wants PR_SCTP_TTL if the user provides a
5700 * positive lifetime but does not specify any PR_SCTP policy. This
5701 * is a BAD assumption and causes problems at least with the
5702 * U-Vancovers MPI folks. I will change this to be no policy means
5705 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
5706 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
5711 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
5712 case CHUNK_FLAGS_PR_SCTP_BUF:
5714 * Time to live is a priority stored in tv_sec when doing
5715 * the buffer drop thing.
5717 sp->ts.tv_sec = sp->timetolive;
5720 case CHUNK_FLAGS_PR_SCTP_TTL:
5724 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5725 tv.tv_sec = sp->timetolive / 1000;
5726 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
5728 * TODO sctp_constants.h needs alternative time
5729 * macros when _KERNEL is undefined.
5731 timevaladd(&sp->ts, &tv);
5734 case CHUNK_FLAGS_PR_SCTP_RTX:
5736 * Time to live is a the number or retransmissions stored in
5739 sp->ts.tv_sec = sp->timetolive;
5743 SCTPDBG(SCTP_DEBUG_USRREQ1,
5744 "Unknown PR_SCTP policy %u.\n",
5745 PR_SCTP_POLICY(sp->sinfo_flags));
5751 sctp_msg_append(struct sctp_tcb *stcb,
5752 struct sctp_nets *net,
5754 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
5756 int error = 0, holds_lock;
5758 struct sctp_stream_queue_pending *sp = NULL;
5759 struct sctp_stream_out *strm;
5762 * Given an mbuf chain, put it into the association send queue and
5763 * place it on the wheel
5765 holds_lock = hold_stcb_lock;
5766 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
5767 /* Invalid stream number */
5768 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5772 if ((stcb->asoc.stream_locked) &&
5773 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
5774 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5778 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
5779 /* Now can we send this? */
5780 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
5781 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5782 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
5783 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
5784 /* got data while shutting down */
5785 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
5789 sctp_alloc_a_strmoq(stcb, sp);
5791 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5795 sp->sinfo_flags = srcv->sinfo_flags;
5796 sp->timetolive = srcv->sinfo_timetolive;
5797 sp->ppid = srcv->sinfo_ppid;
5798 sp->context = srcv->sinfo_context;
5800 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
5802 atomic_add_int(&sp->net->ref_count, 1);
5806 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5807 sp->stream = srcv->sinfo_stream;
5808 sp->msg_is_complete = 1;
5809 sp->sender_all_done = 1;
5812 sp->tail_mbuf = NULL;
5815 sctp_set_prsctp_policy(sp);
5817 * We could in theory (for sendall) sifa the length in, but we would
5818 * still have to hunt through the chain since we need to setup the
5822 if (SCTP_BUF_NEXT(at) == NULL)
5824 sp->length += SCTP_BUF_LEN(at);
5825 at = SCTP_BUF_NEXT(at);
5827 SCTP_TCB_SEND_LOCK(stcb);
5828 sctp_snd_sb_alloc(stcb, sp->length);
5829 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
5830 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
5831 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
5832 sp->strseq = strm->next_sequence_sent;
5833 strm->next_sequence_sent++;
5835 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
5837 SCTP_TCB_SEND_UNLOCK(stcb);
5846 static struct mbuf *
5847 sctp_copy_mbufchain(struct mbuf *clonechain,
5848 struct mbuf *outchain,
5849 struct mbuf **endofchain,
5852 uint8_t copy_by_ref)
5855 struct mbuf *appendchain;
5859 if (endofchain == NULL) {
5863 sctp_m_freem(outchain);
5866 if (can_take_mbuf) {
5867 appendchain = clonechain;
5870 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
5872 /* Its not in a cluster */
5873 if (*endofchain == NULL) {
5874 /* lets get a mbuf cluster */
5875 if (outchain == NULL) {
5876 /* This is the general case */
5878 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5879 if (outchain == NULL) {
5882 SCTP_BUF_LEN(outchain) = 0;
5883 *endofchain = outchain;
5884 /* get the prepend space */
5885 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
5888 * We really should not get a NULL
5894 if (SCTP_BUF_NEXT(m) == NULL) {
5898 m = SCTP_BUF_NEXT(m);
5901 if (*endofchain == NULL) {
5903 * huh, TSNH XXX maybe we
5906 sctp_m_freem(outchain);
5910 /* get the new end of length */
5911 len = M_TRAILINGSPACE(*endofchain);
5913 /* how much is left at the end? */
5914 len = M_TRAILINGSPACE(*endofchain);
5916 /* Find the end of the data, for appending */
5917 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
5919 /* Now lets copy it out */
5920 if (len >= sizeofcpy) {
5921 /* It all fits, copy it in */
5922 m_copydata(clonechain, 0, sizeofcpy, cp);
5923 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
5925 /* fill up the end of the chain */
5927 m_copydata(clonechain, 0, len, cp);
5928 SCTP_BUF_LEN((*endofchain)) += len;
5929 /* now we need another one */
5932 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5937 SCTP_BUF_NEXT((*endofchain)) = m;
5939 cp = mtod((*endofchain), caddr_t);
5940 m_copydata(clonechain, len, sizeofcpy, cp);
5941 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
5945 /* copy the old fashion way */
5946 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
5947 #ifdef SCTP_MBUF_LOGGING
5948 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5953 if (SCTP_BUF_IS_EXTENDED(mat)) {
5954 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5956 mat = SCTP_BUF_NEXT(mat);
5962 if (appendchain == NULL) {
5965 sctp_m_freem(outchain);
5969 /* tack on to the end */
5970 if (*endofchain != NULL) {
5971 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
5975 if (SCTP_BUF_NEXT(m) == NULL) {
5976 SCTP_BUF_NEXT(m) = appendchain;
5979 m = SCTP_BUF_NEXT(m);
5983 * save off the end and update the end-chain postion
5987 if (SCTP_BUF_NEXT(m) == NULL) {
5991 m = SCTP_BUF_NEXT(m);
5995 /* save off the end and update the end-chain postion */
5998 if (SCTP_BUF_NEXT(m) == NULL) {
6002 m = SCTP_BUF_NEXT(m);
6004 return (appendchain);
6009 sctp_med_chunk_output(struct sctp_inpcb *inp,
6010 struct sctp_tcb *stcb,
6011 struct sctp_association *asoc,
6014 int control_only, int from_where,
6015 struct timeval *now, int *now_filled, int frag_point, int so_locked
6016 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6022 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6025 struct sctp_copy_all *ca;
6028 int added_control = 0;
6029 int un_sent, do_chunk_output = 1;
6030 struct sctp_association *asoc;
6032 ca = (struct sctp_copy_all *)ptr;
6033 if (ca->m == NULL) {
6036 if (ca->inp != inp) {
6040 if ((ca->m) && ca->sndlen) {
6041 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6043 /* can't copy so we are done */
6047 #ifdef SCTP_MBUF_LOGGING
6048 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6053 if (SCTP_BUF_IS_EXTENDED(mat)) {
6054 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6056 mat = SCTP_BUF_NEXT(mat);
6063 SCTP_TCB_LOCK_ASSERT(stcb);
6064 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6065 /* Abort this assoc with m as the user defined reason */
6067 struct sctp_paramhdr *ph;
6069 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6071 ph = mtod(m, struct sctp_paramhdr *);
6072 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6073 ph->param_length = htons(ca->sndlen);
6076 * We add one here to keep the assoc from
6077 * dis-appearing on us.
6079 atomic_add_int(&stcb->asoc.refcnt, 1);
6080 sctp_abort_an_association(inp, stcb,
6081 SCTP_RESPONSE_TO_USER_REQ,
6082 m, SCTP_SO_NOT_LOCKED);
6084 * sctp_abort_an_association calls sctp_free_asoc()
6085 * free association will NOT free it since we
6086 * incremented the refcnt .. we do this to prevent
6087 * it being freed and things getting tricky since we
6088 * could end up (from free_asoc) calling inpcb_free
6089 * which would get a recursive lock call to the
6090 * iterator lock.. But as a consequence of that the
6091 * stcb will return to us un-locked.. since
6092 * free_asoc returns with either no TCB or the TCB
6093 * unlocked, we must relock.. to unlock in the
6094 * iterator timer :-0
6096 SCTP_TCB_LOCK(stcb);
6097 atomic_add_int(&stcb->asoc.refcnt, -1);
6098 goto no_chunk_output;
6102 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
6106 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6107 /* shutdown this assoc */
6110 cnt = sctp_is_there_unsent_data(stcb);
6112 if (TAILQ_EMPTY(&asoc->send_queue) &&
6113 TAILQ_EMPTY(&asoc->sent_queue) &&
6115 if (asoc->locked_on_sending) {
6119 * there is nothing queued to send, so I'm
6122 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6123 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6124 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6126 * only send SHUTDOWN the first time
6129 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
6130 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6131 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6133 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6134 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6135 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6136 asoc->primary_destination);
6137 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6138 asoc->primary_destination);
6140 do_chunk_output = 0;
6144 * we still got (or just got) data to send,
6145 * so set SHUTDOWN_PENDING
6148 * XXX sockets draft says that SCTP_EOF
6149 * should be sent with no data. currently,
6150 * we will allow user data to be sent first
6151 * and move to SHUTDOWN-PENDING
6153 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6154 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6155 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6156 if (asoc->locked_on_sending) {
6158 * Locked to send out the
6161 struct sctp_stream_queue_pending *sp;
6163 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6165 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6166 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6169 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6170 if (TAILQ_EMPTY(&asoc->send_queue) &&
6171 TAILQ_EMPTY(&asoc->sent_queue) &&
6172 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6174 atomic_add_int(&stcb->asoc.refcnt, 1);
6175 sctp_abort_an_association(stcb->sctp_ep, stcb,
6176 SCTP_RESPONSE_TO_USER_REQ,
6177 NULL, SCTP_SO_NOT_LOCKED);
6178 atomic_add_int(&stcb->asoc.refcnt, -1);
6179 goto no_chunk_output;
6181 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6182 asoc->primary_destination);
6188 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6189 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6191 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6192 (stcb->asoc.total_flight > 0) &&
6193 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6195 do_chunk_output = 0;
6197 if (do_chunk_output)
6198 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6199 else if (added_control) {
6200 int num_out = 0, reason = 0, now_filled = 0;
6204 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6205 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6206 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6217 sctp_sendall_completes(void *ptr, uint32_t val)
6219 struct sctp_copy_all *ca;
6221 ca = (struct sctp_copy_all *)ptr;
6223 * Do a notify here? Kacheong suggests that the notify be done at
6224 * the send time.. so you would push up a notification if any send
6225 * failed. Don't know if this is feasable since the only failures we
6226 * have is "memory" related and if you cannot get an mbuf to send
6227 * the data you surely can't get an mbuf to send up to notify the
6228 * user you can't send the data :->
6231 /* now free everything */
6232 sctp_m_freem(ca->m);
6233 SCTP_FREE(ca, SCTP_M_COPYAL);
6237 #define MC_ALIGN(m, len) do { \
6238 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6243 static struct mbuf *
6244 sctp_copy_out_all(struct uio *uio, int len)
6246 struct mbuf *ret, *at;
6247 int left, willcpy, cancpy, error;
6249 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6255 SCTP_BUF_LEN(ret) = 0;
6256 /* save space for the data chunk header */
6257 cancpy = M_TRAILINGSPACE(ret);
6258 willcpy = min(cancpy, left);
6261 /* Align data to the end */
6262 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6268 SCTP_BUF_LEN(at) = willcpy;
6269 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6272 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6273 if (SCTP_BUF_NEXT(at) == NULL) {
6276 at = SCTP_BUF_NEXT(at);
6277 SCTP_BUF_LEN(at) = 0;
6278 cancpy = M_TRAILINGSPACE(at);
6279 willcpy = min(cancpy, left);
6286 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6287 struct sctp_sndrcvinfo *srcv)
6290 struct sctp_copy_all *ca;
6292 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6296 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6299 memset(ca, 0, sizeof(struct sctp_copy_all));
6302 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6304 * take off the sendall flag, it would be bad if we failed to do
6307 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6308 /* get length and mbuf chain */
6310 ca->sndlen = uio->uio_resid;
6311 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6312 if (ca->m == NULL) {
6313 SCTP_FREE(ca, SCTP_M_COPYAL);
6314 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6318 /* Gather the length of the send */
6324 ca->sndlen += SCTP_BUF_LEN(m);
6325 m = SCTP_BUF_NEXT(m);
6329 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6330 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6331 SCTP_ASOC_ANY_STATE,
6333 sctp_sendall_completes, inp, 1);
6335 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6336 SCTP_FREE(ca, SCTP_M_COPYAL);
6337 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6345 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6347 struct sctp_tmit_chunk *chk, *nchk;
6349 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6350 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6351 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6353 sctp_m_freem(chk->data);
6356 asoc->ctrl_queue_cnt--;
6357 sctp_free_a_chunk(stcb, chk);
6363 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6365 struct sctp_association *asoc;
6366 struct sctp_tmit_chunk *chk, *nchk;
6367 struct sctp_asconf_chunk *acp;
6370 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6371 /* find SCTP_ASCONF chunk in queue */
6372 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6374 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6375 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6380 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6382 sctp_m_freem(chk->data);
6385 asoc->ctrl_queue_cnt--;
6386 sctp_free_a_chunk(stcb, chk);
6393 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6394 struct sctp_association *asoc,
6395 struct sctp_tmit_chunk **data_list,
6397 struct sctp_nets *net)
6400 struct sctp_tmit_chunk *tp1;
6402 for (i = 0; i < bundle_at; i++) {
6403 /* off of the send queue */
6404 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6405 asoc->send_queue_cnt--;
6408 * Any chunk NOT 0 you zap the time chunk 0 gets
6409 * zapped or set based on if a RTO measurment is
6412 data_list[i]->do_rtt = 0;
6415 data_list[i]->sent_rcv_time = net->last_sent_time;
6416 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6417 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6418 if (data_list[i]->whoTo == NULL) {
6419 data_list[i]->whoTo = net;
6420 atomic_add_int(&net->ref_count, 1);
6422 /* on to the sent queue */
6423 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6424 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6425 struct sctp_tmit_chunk *tpp;
6427 /* need to move back */
6429 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6431 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6435 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6438 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6440 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6445 /* This does not lower until the cum-ack passes it */
6446 asoc->sent_queue_cnt++;
6447 if ((asoc->peers_rwnd <= 0) &&
6448 (asoc->total_flight == 0) &&
6450 /* Mark the chunk as being a window probe */
6451 SCTP_STAT_INCR(sctps_windowprobed);
6453 #ifdef SCTP_AUDITING_ENABLED
6454 sctp_audit_log(0xC2, 3);
6456 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6457 data_list[i]->snd_count = 1;
6458 data_list[i]->rec.data.chunk_was_revoked = 0;
6459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6460 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6461 data_list[i]->whoTo->flight_size,
6462 data_list[i]->book_size,
6463 (uintptr_t) data_list[i]->whoTo,
6464 data_list[i]->rec.data.TSN_seq);
6466 sctp_flight_size_increase(data_list[i]);
6467 sctp_total_flight_increase(stcb, data_list[i]);
6468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6469 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6470 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6472 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6473 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6474 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6475 /* SWS sender side engages */
6476 asoc->peers_rwnd = 0;
6482 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
6484 struct sctp_tmit_chunk *chk, *nchk;
6486 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6487 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6488 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6489 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6490 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6491 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6492 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6493 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6494 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6495 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6496 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6497 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6498 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6499 /* Stray chunks must be cleaned up */
6501 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6503 sctp_m_freem(chk->data);
6506 asoc->ctrl_queue_cnt--;
6507 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
6508 asoc->fwd_tsn_cnt--;
6509 sctp_free_a_chunk(stcb, chk);
6510 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6511 /* special handling, we must look into the param */
6512 if (chk != asoc->str_reset) {
6513 goto clean_up_anyway;
6521 sctp_can_we_split_this(struct sctp_tcb *stcb,
6523 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6526 * Make a decision on if I should split a msg into multiple parts.
6527 * This is only asked of incomplete messages.
6531 * If we are doing EEOR we need to always send it if its the
6532 * entire thing, since it might be all the guy is putting in
6535 if (goal_mtu >= length) {
6537 * If we have data outstanding,
6538 * we get another chance when the sack
6539 * arrives to transmit - wait for more data
6541 if (stcb->asoc.total_flight == 0) {
6543 * If nothing is in flight, we zero the
6551 /* You can fill the rest */
6556 * For those strange folk that make the send buffer
6557 * smaller than our fragmentation point, we can't
6558 * get a full msg in so we have to allow splitting.
6560 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
6563 if ((length <= goal_mtu) ||
6564 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
6565 /* Sub-optimial residual don't split in non-eeor mode. */
6569 * If we reach here length is larger than the goal_mtu. Do we wish
6570 * to split it for the sake of packet putting together?
6572 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
6573 /* Its ok to split it */
6574 return (min(goal_mtu, frag_point));
6576 /* Nope, can't split */
6582 sctp_move_to_outqueue(struct sctp_tcb *stcb,
6583 struct sctp_stream_out *strq,
6585 uint32_t frag_point,
6591 /* Move from the stream to the send_queue keeping track of the total */
6592 struct sctp_association *asoc;
6593 struct sctp_stream_queue_pending *sp;
6594 struct sctp_tmit_chunk *chk;
6595 struct sctp_data_chunk *dchkh;
6596 uint32_t to_move, length;
6597 uint8_t rcv_flags = 0;
6599 uint8_t send_lock_up = 0;
6601 SCTP_TCB_LOCK_ASSERT(stcb);
6604 /* sa_ignore FREED_MEMORY */
6605 sp = TAILQ_FIRST(&strq->outqueue);
6608 if (send_lock_up == 0) {
6609 SCTP_TCB_SEND_LOCK(stcb);
6612 sp = TAILQ_FIRST(&strq->outqueue);
6616 if (strq->last_msg_incomplete) {
6617 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
6619 strq->last_msg_incomplete);
6620 strq->last_msg_incomplete = 0;
6624 SCTP_TCB_SEND_UNLOCK(stcb);
6629 if ((sp->msg_is_complete) && (sp->length == 0)) {
6630 if (sp->sender_all_done) {
6632 * We are doing differed cleanup. Last time through
6633 * when we took all the data the sender_all_done was
6636 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
6637 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
6638 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
6639 sp->sender_all_done,
6641 sp->msg_is_complete,
6645 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
6646 SCTP_TCB_SEND_LOCK(stcb);
6649 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
6650 TAILQ_REMOVE(&strq->outqueue, sp, next);
6651 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
6653 sctp_free_remote_addr(sp->net);
6657 sctp_m_freem(sp->data);
6660 sctp_free_a_strmoq(stcb, sp);
6661 /* we can't be locked to it */
6663 stcb->asoc.locked_on_sending = NULL;
6665 SCTP_TCB_SEND_UNLOCK(stcb);
6668 /* back to get the next msg */
6672 * sender just finished this but still holds a
6681 /* is there some to get */
6682 if (sp->length == 0) {
6688 } else if (sp->discard_rest) {
6689 if (send_lock_up == 0) {
6690 SCTP_TCB_SEND_LOCK(stcb);
6693 /* Whack down the size */
6694 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
6695 if ((stcb->sctp_socket != NULL) && \
6696 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6697 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
6698 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
6701 sctp_m_freem(sp->data);
6703 sp->tail_mbuf = NULL;
6713 some_taken = sp->some_taken;
6714 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6715 sp->msg_is_complete = 1;
6718 length = sp->length;
6719 if (sp->msg_is_complete) {
6720 /* The message is complete */
6721 to_move = min(length, frag_point);
6722 if (to_move == length) {
6723 /* All of it fits in the MTU */
6724 if (sp->some_taken) {
6725 rcv_flags |= SCTP_DATA_LAST_FRAG;
6726 sp->put_last_out = 1;
6728 rcv_flags |= SCTP_DATA_NOT_FRAG;
6729 sp->put_last_out = 1;
6732 /* Not all of it fits, we fragment */
6733 if (sp->some_taken == 0) {
6734 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6739 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
6742 * We use a snapshot of length in case it
6743 * is expanding during the compare.
6748 if (to_move >= llen) {
6750 if (send_lock_up == 0) {
6752 * We are taking all of an incomplete msg
6753 * thus we need a send lock.
6755 SCTP_TCB_SEND_LOCK(stcb);
6757 if (sp->msg_is_complete) {
6759 * the sender finished the
6766 if (sp->some_taken == 0) {
6767 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6771 /* Nothing to take. */
6772 if (sp->some_taken) {
6781 /* If we reach here, we can copy out a chunk */
6782 sctp_alloc_a_chunk(stcb, chk);
6784 /* No chunk memory */
6790 * Setup for unordered if needed by looking at the user sent info
6793 if (sp->sinfo_flags & SCTP_UNORDERED) {
6794 rcv_flags |= SCTP_DATA_UNORDERED;
6796 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
6797 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
6798 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
6800 /* clear out the chunk before setting up */
6801 memset(chk, 0, sizeof(*chk));
6802 chk->rec.data.rcv_flags = rcv_flags;
6804 if (to_move >= length) {
6805 /* we think we can steal the whole thing */
6806 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
6807 SCTP_TCB_SEND_LOCK(stcb);
6810 if (to_move < sp->length) {
6811 /* bail, it changed */
6814 chk->data = sp->data;
6815 chk->last_mbuf = sp->tail_mbuf;
6816 /* register the stealing */
6817 sp->data = sp->tail_mbuf = NULL;
6822 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
6823 chk->last_mbuf = NULL;
6824 if (chk->data == NULL) {
6825 sp->some_taken = some_taken;
6826 sctp_free_a_chunk(stcb, chk);
6831 #ifdef SCTP_MBUF_LOGGING
6832 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6837 if (SCTP_BUF_IS_EXTENDED(mat)) {
6838 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6840 mat = SCTP_BUF_NEXT(mat);
6844 /* Pull off the data */
6845 m_adj(sp->data, to_move);
6846 /* Now lets work our way down and compact it */
6848 while (m && (SCTP_BUF_LEN(m) == 0)) {
6849 sp->data = SCTP_BUF_NEXT(m);
6850 SCTP_BUF_NEXT(m) = NULL;
6851 if (sp->tail_mbuf == m) {
6853 * Freeing tail? TSNH since
6854 * we supposedly were taking less
6855 * than the sp->length.
6858 panic("Huh, freing tail? - TSNH");
6860 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
6861 sp->tail_mbuf = sp->data = NULL;
6870 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
6871 chk->copy_by_ref = 1;
6873 chk->copy_by_ref = 0;
6876 * get last_mbuf and counts of mb useage This is ugly but hopefully
6877 * its only one mbuf.
6879 if (chk->last_mbuf == NULL) {
6880 chk->last_mbuf = chk->data;
6881 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
6882 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
6885 if (to_move > length) {
6886 /*- This should not happen either
6887 * since we always lower to_move to the size
6888 * of sp->length if its larger.
6891 panic("Huh, how can to_move be larger?");
6893 SCTP_PRINTF("Huh, how can to_move be larger?\n");
6897 atomic_subtract_int(&sp->length, to_move);
6899 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
6900 /* Not enough room for a chunk header, get some */
6903 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
6906 * we're in trouble here. _PREPEND below will free
6907 * all the data if there is no leading space, so we
6908 * must put the data back and restore.
6910 if (send_lock_up == 0) {
6911 SCTP_TCB_SEND_LOCK(stcb);
6914 if (chk->data == NULL) {
6915 /* unsteal the data */
6916 sp->data = chk->data;
6917 sp->tail_mbuf = chk->last_mbuf;
6921 /* reassemble the data */
6923 sp->data = chk->data;
6924 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
6926 sp->some_taken = some_taken;
6927 atomic_add_int(&sp->length, to_move);
6930 sctp_free_a_chunk(stcb, chk);
6934 SCTP_BUF_LEN(m) = 0;
6935 SCTP_BUF_NEXT(m) = chk->data;
6937 M_ALIGN(chk->data, 4);
6940 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
6941 if (chk->data == NULL) {
6942 /* HELP, TSNH since we assured it would not above? */
6944 panic("prepend failes HELP?");
6946 SCTP_PRINTF("prepend fails HELP?\n");
6947 sctp_free_a_chunk(stcb, chk);
6953 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
6954 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
6955 chk->book_size_scale = 0;
6956 chk->sent = SCTP_DATAGRAM_UNSENT;
6959 chk->asoc = &stcb->asoc;
6960 chk->pad_inplace = 0;
6961 chk->no_fr_allowed = 0;
6962 chk->rec.data.stream_seq = sp->strseq;
6963 chk->rec.data.stream_number = sp->stream;
6964 chk->rec.data.payloadtype = sp->ppid;
6965 chk->rec.data.context = sp->context;
6966 chk->rec.data.doing_fast_retransmit = 0;
6968 chk->rec.data.timetodrop = sp->ts;
6969 chk->flags = sp->act_flags;
6972 chk->whoTo = sp->net;
6973 atomic_add_int(&chk->whoTo->ref_count, 1);
6977 if (sp->holds_key_ref) {
6978 chk->auth_keyid = sp->auth_keyid;
6979 sctp_auth_key_acquire(stcb, chk->auth_keyid);
6980 chk->holds_key_ref = 1;
6982 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
6983 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
6984 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
6985 (uintptr_t) stcb, sp->length,
6986 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
6987 chk->rec.data.TSN_seq);
6989 dchkh = mtod(chk->data, struct sctp_data_chunk *);
6991 * Put the rest of the things in place now. Size was done earlier in
6992 * previous loop prior to padding.
6995 #ifdef SCTP_ASOCLOG_OF_TSNS
6996 SCTP_TCB_LOCK_ASSERT(stcb);
6997 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
6998 asoc->tsn_out_at = 0;
6999 asoc->tsn_out_wrapped = 1;
7001 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7002 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7003 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7004 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7005 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7006 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7007 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7008 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7012 dchkh->ch.chunk_type = SCTP_DATA;
7013 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7014 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7015 dchkh->dp.stream_id = htons(strq->stream_no);
7016 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7017 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7018 dchkh->ch.chunk_length = htons(chk->send_size);
7019 /* Now advance the chk->send_size by the actual pad needed. */
7020 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7025 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7026 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7027 chk->pad_inplace = 1;
7029 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7030 /* pad added an mbuf */
7031 chk->last_mbuf = lm;
7033 chk->send_size += pads;
7035 /* We only re-set the policy if it is on */
7036 if (sp->pr_sctp_on) {
7037 sctp_set_prsctp_policy(sp);
7038 asoc->pr_sctp_cnt++;
7039 chk->pr_sctp_on = 1;
7041 chk->pr_sctp_on = 0;
7043 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7044 /* All done pull and kill the message */
7045 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7046 if (sp->put_last_out == 0) {
7047 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7048 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7049 sp->sender_all_done,
7051 sp->msg_is_complete,
7055 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7056 SCTP_TCB_SEND_LOCK(stcb);
7059 TAILQ_REMOVE(&strq->outqueue, sp, next);
7060 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7062 sctp_free_remote_addr(sp->net);
7066 sctp_m_freem(sp->data);
7069 sctp_free_a_strmoq(stcb, sp);
7071 /* we can't be locked to it */
7073 stcb->asoc.locked_on_sending = NULL;
7075 /* more to go, we are locked */
7078 asoc->chunks_on_out_queue++;
7079 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7080 asoc->send_queue_cnt++;
7083 SCTP_TCB_SEND_UNLOCK(stcb);
7091 sctp_fill_outqueue(struct sctp_tcb *stcb,
7092 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now)
7094 struct sctp_association *asoc;
7095 struct sctp_stream_out *strq, *strqn;
7096 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7099 SCTP_TCB_LOCK_ASSERT(stcb);
7102 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
7103 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7105 /* ?? not sure what else to do */
7106 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7109 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7111 /* Need an allowance for the data chunk header too */
7112 goal_mtu -= sizeof(struct sctp_data_chunk);
7114 /* must make even word boundary */
7115 goal_mtu &= 0xfffffffc;
7116 if (asoc->locked_on_sending) {
7117 /* We are stuck on one stream until the message completes. */
7118 strq = asoc->locked_on_sending;
7121 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7125 while ((goal_mtu > 0) && strq) {
7128 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7129 &giveup, eeor_mode, &bail);
7131 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7134 asoc->locked_on_sending = strq;
7135 if ((moved_how_much == 0) || (giveup) || bail)
7136 /* no more to move for now */
7139 asoc->locked_on_sending = NULL;
7140 if ((giveup) || bail) {
7143 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7148 total_moved += moved_how_much;
7149 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7150 goal_mtu &= 0xfffffffc;
7155 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7157 if (total_moved == 0) {
7158 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7159 (net == stcb->asoc.primary_destination)) {
7160 /* ran dry for primary network net */
7161 SCTP_STAT_INCR(sctps_primary_randry);
7162 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7163 /* ran dry with CMT on */
7164 SCTP_STAT_INCR(sctps_cmt_randry);
7170 sctp_fix_ecn_echo(struct sctp_association *asoc)
7172 struct sctp_tmit_chunk *chk;
7174 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7175 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7176 chk->sent = SCTP_DATAGRAM_UNSENT;
7182 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7184 struct sctp_association *asoc;
7185 struct sctp_tmit_chunk *chk;
7186 struct sctp_stream_queue_pending *sp;
7193 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7194 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7195 if (sp->net == net) {
7196 sctp_free_remote_addr(sp->net);
7201 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7202 if (chk->whoTo == net) {
7203 sctp_free_remote_addr(chk->whoTo);
7210 sctp_med_chunk_output(struct sctp_inpcb *inp,
7211 struct sctp_tcb *stcb,
7212 struct sctp_association *asoc,
7215 int control_only, int from_where,
7216 struct timeval *now, int *now_filled, int frag_point, int so_locked
7217 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7223 * Ok this is the generic chunk service queue. we must do the
7224 * following: - Service the stream queue that is next, moving any
7225 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7226 * LAST to the out queue in one pass) and assigning TSN's - Check to
7227 * see if the cwnd/rwnd allows any output, if so we go ahead and
7228 * fomulate and send the low level chunks. Making sure to combine
7229 * any control in the control chunk queue also.
7231 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7232 struct mbuf *outchain, *endoutchain;
7233 struct sctp_tmit_chunk *chk, *nchk;
7235 /* temp arrays for unlinking */
7236 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7237 int no_fragmentflg, error;
7238 unsigned int max_rwnd_per_dest, max_send_per_dest;
7239 int one_chunk, hbflag, skip_data_for_this_net;
7240 int asconf, cookie, no_out_cnt;
7241 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7242 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7244 uint32_t auth_offset = 0;
7245 struct sctp_auth_chunk *auth = NULL;
7246 uint16_t auth_keyid;
7247 int override_ok = 1;
7248 int data_auth_reqd = 0;
7251 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7258 auth_keyid = stcb->asoc.authinfo.active_keyid;
7260 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7261 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7262 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7267 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7269 * First lets prime the pump. For each destination, if there is room
7270 * in the flight size, attempt to pull an MTU's worth out of the
7271 * stream queues into the general send_queue
7273 #ifdef SCTP_AUDITING_ENABLED
7274 sctp_audit_log(0xC2, 2);
7276 SCTP_TCB_LOCK_ASSERT(stcb);
7278 if ((control_only) || (asoc->stream_reset_outstanding))
7283 /* Nothing to possible to send? */
7284 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7285 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7286 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7287 TAILQ_EMPTY(&asoc->send_queue) &&
7288 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7293 if (asoc->peers_rwnd == 0) {
7294 /* No room in peers rwnd */
7296 if (asoc->total_flight > 0) {
7297 /* we are allowed one chunk in flight */
7301 if (stcb->asoc.ecn_echo_cnt_onq) {
7302 /* Record where a sack goes, if any */
7303 if (no_data_chunks &&
7304 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7305 /* Nothing but ECNe to send - we don't do that */
7306 goto nothing_to_send;
7308 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7309 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7310 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7311 sack_goes_to = chk->whoTo;
7316 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7317 if (stcb->sctp_socket)
7318 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7320 max_send_per_dest = 0;
7321 if ((no_data_chunks == 0) &&
7322 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7323 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7325 * This for loop we are in takes in each net, if
7326 * its's got space in cwnd and has data sent to it
7327 * (when CMT is off) then it calls
7328 * sctp_fill_outqueue for the net. This gets data on
7329 * the send queue for that network.
7331 * In sctp_fill_outqueue TSN's are assigned and data is
7332 * copied out of the stream buffers. Note mostly
7333 * copy by reference (we hope).
7335 net->window_probe = 0;
7336 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ||
7337 (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
7338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7339 sctp_log_cwnd(stcb, net, 1,
7340 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7344 if ((asoc->sctp_cmt_on_off == 0) &&
7345 (asoc->primary_destination != net) &&
7346 (net->ref_count < 2)) {
7347 /* nothing can be in queue for this guy */
7348 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7349 sctp_log_cwnd(stcb, net, 2,
7350 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7354 if (net->flight_size >= net->cwnd) {
7355 /* skip this network, no room - can't fill */
7356 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7357 sctp_log_cwnd(stcb, net, 3,
7358 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7362 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7363 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7365 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
7367 /* memory alloc failure */
7373 /* now service each destination and send out what we can for it */
7374 /* Nothing to send? */
7375 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7376 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7377 TAILQ_EMPTY(&asoc->send_queue)) {
7381 if (asoc->sctp_cmt_on_off > 0) {
7382 /* get the last start point */
7383 start_at = asoc->last_net_cmt_send_started;
7384 if (start_at == NULL) {
7385 /* null so to beginning */
7386 start_at = TAILQ_FIRST(&asoc->nets);
7388 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7389 if (start_at == NULL) {
7390 start_at = TAILQ_FIRST(&asoc->nets);
7393 asoc->last_net_cmt_send_started = start_at;
7395 start_at = TAILQ_FIRST(&asoc->nets);
7397 old_start_at = NULL;
7398 again_one_more_time:
7399 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7400 /* how much can we send? */
7401 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7402 if (old_start_at && (old_start_at == net)) {
7403 /* through list ocmpletely. */
7407 if ((asoc->sctp_cmt_on_off == 0) &&
7408 (asoc->primary_destination != net) &&
7409 (net->ref_count < 2)) {
7411 * Ref-count of 1 so we cannot have data or control
7412 * queued to this address. Skip it (non-CMT).
7416 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7417 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7418 (net->flight_size >= net->cwnd)) {
7420 * Nothing on control or asconf and flight is full,
7421 * we can skip even in the CMT case.
7425 ctl_cnt = bundle_at = 0;
7426 endoutchain = outchain = NULL;
7429 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7430 skip_data_for_this_net = 1;
7432 skip_data_for_this_net = 0;
7434 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7436 * if we have a route and an ifp check to see if we
7437 * have room to send to this guy
7441 ifp = net->ro.ro_rt->rt_ifp;
7442 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7443 SCTP_STAT_INCR(sctps_ifnomemqueued);
7444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7445 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7450 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7452 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7456 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7466 if (mtu > asoc->peers_rwnd) {
7467 if (asoc->total_flight > 0) {
7468 /* We have a packet in flight somewhere */
7469 r_mtu = asoc->peers_rwnd;
7471 /* We are always allowed to send one MTU out */
7478 /************************/
7479 /* ASCONF transmission */
7480 /************************/
7481 /* Now first lets go through the asconf queue */
7482 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7483 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7486 if (chk->whoTo != net) {
7488 * No, not sent to the network we are
7493 if (chk->data == NULL) {
7496 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7497 chk->sent != SCTP_DATAGRAM_RESEND) {
7501 * if no AUTH is yet included and this chunk
7502 * requires it, make sure to account for it. We
7503 * don't apply the size until the AUTH chunk is
7504 * actually added below in case there is no room for
7505 * this chunk. NOTE: we overload the use of "omtu"
7508 if ((auth == NULL) &&
7509 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7510 stcb->asoc.peer_auth_chunks)) {
7511 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7514 /* Here we do NOT factor the r_mtu */
7515 if ((chk->send_size < (int)(mtu - omtu)) ||
7516 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7518 * We probably should glom the mbuf chain
7519 * from the chk->data for control but the
7520 * problem is it becomes yet one more level
7521 * of tracking to do if for some reason
7522 * output fails. Then I have got to
7523 * reconstruct the merged control chain.. el
7524 * yucko.. for now we take the easy way and
7528 * Add an AUTH chunk, if chunk requires it
7529 * save the offset into the chain for AUTH
7531 if ((auth == NULL) &&
7532 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7533 stcb->asoc.peer_auth_chunks))) {
7534 outchain = sctp_add_auth_chunk(outchain,
7539 chk->rec.chunk_id.id);
7540 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7542 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7543 (int)chk->rec.chunk_id.can_take_data,
7544 chk->send_size, chk->copy_by_ref);
7545 if (outchain == NULL) {
7547 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7550 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7551 /* update our MTU size */
7552 if (mtu > (chk->send_size + omtu))
7553 mtu -= (chk->send_size + omtu);
7556 to_out += (chk->send_size + omtu);
7557 /* Do clear IP_DF ? */
7558 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7561 if (chk->rec.chunk_id.can_take_data)
7564 * set hb flag since we can use these for
7570 * should sysctl this: don't bundle data
7571 * with ASCONF since it requires AUTH
7574 chk->sent = SCTP_DATAGRAM_SENT;
7578 * Ok we are out of room but we can
7579 * output without effecting the
7580 * flight size since this little guy
7581 * is a control only packet.
7583 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7585 * do NOT clear the asconf flag as
7586 * it is used to do appropriate
7587 * source address selection.
7589 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7590 (struct sockaddr *)&net->ro._l_addr,
7591 outchain, auth_offset, auth,
7592 stcb->asoc.authinfo.active_keyid,
7593 no_fragmentflg, 0, NULL, asconf,
7594 inp->sctp_lport, stcb->rport,
7595 htonl(stcb->asoc.peer_vtag),
7596 net->port, so_locked, NULL, NULL))) {
7597 if (error == ENOBUFS) {
7598 asoc->ifp_had_enobuf = 1;
7599 SCTP_STAT_INCR(sctps_lowlevelerr);
7601 if (from_where == 0) {
7602 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7604 if (*now_filled == 0) {
7605 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7607 *now = net->last_sent_time;
7609 net->last_sent_time = *now;
7612 /* error, could not output */
7613 if (error == EHOSTUNREACH) {
7619 sctp_move_chunks_from_net(stcb, net);
7624 asoc->ifp_had_enobuf = 0;
7625 if (*now_filled == 0) {
7626 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7628 *now = net->last_sent_time;
7630 net->last_sent_time = *now;
7634 * increase the number we sent, if a
7635 * cookie is sent we don't tell them
7638 outchain = endoutchain = NULL;
7642 *num_out += ctl_cnt;
7643 /* recalc a clean slate and setup */
7644 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7645 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7647 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
7654 /************************/
7655 /* Control transmission */
7656 /************************/
7657 /* Now first lets go through the control queue */
7658 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7659 if ((sack_goes_to) &&
7660 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
7661 (chk->whoTo != sack_goes_to)) {
7663 * if we have a sack in queue, and we are
7664 * looking at an ecn echo that is NOT queued
7665 * to where the sack is going..
7667 if (chk->whoTo == net) {
7669 * Don't transmit it to where its
7670 * going (current net)
7673 } else if (sack_goes_to == net) {
7675 * But do transmit it to this
7678 goto skip_net_check;
7681 if (chk->whoTo != net) {
7683 * No, not sent to the network we are
7689 if (chk->data == NULL) {
7692 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
7694 * It must be unsent. Cookies and ASCONF's
7695 * hang around but there timers will force
7696 * when marked for resend.
7701 * if no AUTH is yet included and this chunk
7702 * requires it, make sure to account for it. We
7703 * don't apply the size until the AUTH chunk is
7704 * actually added below in case there is no room for
7705 * this chunk. NOTE: we overload the use of "omtu"
7708 if ((auth == NULL) &&
7709 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7710 stcb->asoc.peer_auth_chunks)) {
7711 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7714 /* Here we do NOT factor the r_mtu */
7715 if ((chk->send_size <= (int)(mtu - omtu)) ||
7716 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7718 * We probably should glom the mbuf chain
7719 * from the chk->data for control but the
7720 * problem is it becomes yet one more level
7721 * of tracking to do if for some reason
7722 * output fails. Then I have got to
7723 * reconstruct the merged control chain.. el
7724 * yucko.. for now we take the easy way and
7728 * Add an AUTH chunk, if chunk requires it
7729 * save the offset into the chain for AUTH
7731 if ((auth == NULL) &&
7732 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7733 stcb->asoc.peer_auth_chunks))) {
7734 outchain = sctp_add_auth_chunk(outchain,
7739 chk->rec.chunk_id.id);
7740 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7742 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7743 (int)chk->rec.chunk_id.can_take_data,
7744 chk->send_size, chk->copy_by_ref);
7745 if (outchain == NULL) {
7747 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7750 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7751 /* update our MTU size */
7752 if (mtu > (chk->send_size + omtu))
7753 mtu -= (chk->send_size + omtu);
7756 to_out += (chk->send_size + omtu);
7757 /* Do clear IP_DF ? */
7758 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7761 if (chk->rec.chunk_id.can_take_data)
7763 /* Mark things to be removed, if needed */
7764 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7765 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7766 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7767 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7768 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7769 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7770 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7771 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7772 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7773 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7774 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7776 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
7779 * JRS 5/14/07 - Set the
7780 * flag to say a heartbeat
7785 /* remove these chunks at the end */
7786 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7787 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7788 /* turn off the timer */
7789 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
7790 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7791 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
7797 * Other chunks, since they have
7798 * timers running (i.e. COOKIE) we
7799 * just "trust" that it gets sent or
7803 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7806 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7808 * Increment ecne send count
7809 * here this means we may be
7810 * over-zealous in our
7811 * counting if the send
7812 * fails, but its the best
7813 * place to do it (we used
7814 * to do it in the queue of
7815 * the chunk, but that did
7816 * not tell how many times
7819 SCTP_STAT_INCR(sctps_sendecne);
7821 chk->sent = SCTP_DATAGRAM_SENT;
7826 * Ok we are out of room but we can
7827 * output without effecting the
7828 * flight size since this little guy
7829 * is a control only packet.
7832 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7834 * do NOT clear the asconf
7835 * flag as it is used to do
7836 * appropriate source
7837 * address selection.
7841 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
7844 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7845 (struct sockaddr *)&net->ro._l_addr,
7848 stcb->asoc.authinfo.active_keyid,
7849 no_fragmentflg, 0, NULL, asconf,
7850 inp->sctp_lport, stcb->rport,
7851 htonl(stcb->asoc.peer_vtag),
7852 net->port, so_locked, NULL, NULL))) {
7853 if (error == ENOBUFS) {
7854 asoc->ifp_had_enobuf = 1;
7855 SCTP_STAT_INCR(sctps_lowlevelerr);
7857 if (from_where == 0) {
7858 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7860 /* error, could not output */
7862 if (*now_filled == 0) {
7863 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7865 *now = net->last_sent_time;
7867 net->last_sent_time = *now;
7871 if (error == EHOSTUNREACH) {
7877 sctp_move_chunks_from_net(stcb, net);
7882 asoc->ifp_had_enobuf = 0;
7883 /* Only HB or ASCONF advances time */
7885 if (*now_filled == 0) {
7886 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7888 *now = net->last_sent_time;
7890 net->last_sent_time = *now;
7895 * increase the number we sent, if a
7896 * cookie is sent we don't tell them
7899 outchain = endoutchain = NULL;
7903 *num_out += ctl_cnt;
7904 /* recalc a clean slate and setup */
7905 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7906 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7908 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
7915 /* JRI: if dest is in PF state, do not send data to it */
7916 if ((asoc->sctp_cmt_on_off > 0) &&
7917 (asoc->sctp_cmt_pf > 0) &&
7918 (net->dest_state & SCTP_ADDR_PF)) {
7921 if (net->flight_size >= net->cwnd) {
7924 if ((asoc->sctp_cmt_on_off > 0) &&
7925 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
7926 (net->flight_size > max_rwnd_per_dest)) {
7930 * We need a specific accounting for the usage of the send
7931 * buffer. We also need to check the number of messages per
7932 * net. For now, this is better than nothing and it disabled
7935 if ((asoc->sctp_cmt_on_off > 0) &&
7936 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
7937 (max_send_per_dest > 0) &&
7938 (net->flight_size > max_send_per_dest)) {
7941 /*********************/
7942 /* Data transmission */
7943 /*********************/
7945 * if AUTH for DATA is required and no AUTH has been added
7946 * yet, account for this in the mtu now... if no data can be
7947 * bundled, this adjustment won't matter anyways since the
7948 * packet will be going out...
7950 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
7951 stcb->asoc.peer_auth_chunks);
7952 if (data_auth_reqd && (auth == NULL)) {
7953 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7955 /* now lets add any data within the MTU constraints */
7956 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7958 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
7959 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7965 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
7966 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7976 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
7977 (skip_data_for_this_net == 0)) ||
7979 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
7980 if (no_data_chunks) {
7981 /* let only control go out */
7985 if (net->flight_size >= net->cwnd) {
7986 /* skip this net, no room for data */
7990 if ((chk->whoTo != NULL) &&
7991 (chk->whoTo != net)) {
7992 /* Don't send the chunk on this net */
7995 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
7997 * strange, we have a chunk that is
7998 * to big for its destination and
7999 * yet no fragment ok flag.
8000 * Something went wrong when the
8001 * PMTU changed...we did not mark
8002 * this chunk for some reason?? I
8003 * will fix it here by letting IP
8004 * fragment it for now and printing
8005 * a warning. This really should not
8008 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8009 chk->send_size, mtu);
8010 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8012 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8013 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8014 struct sctp_data_chunk *dchkh;
8016 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8017 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8019 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8020 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8021 /* ok we will add this one */
8024 * Add an AUTH chunk, if chunk
8025 * requires it, save the offset into
8026 * the chain for AUTH
8028 if (data_auth_reqd) {
8030 outchain = sctp_add_auth_chunk(outchain,
8036 auth_keyid = chk->auth_keyid;
8038 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8039 } else if (override_ok) {
8044 auth_keyid = chk->auth_keyid;
8046 } else if (auth_keyid != chk->auth_keyid) {
8054 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8055 chk->send_size, chk->copy_by_ref);
8056 if (outchain == NULL) {
8057 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8058 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8059 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8062 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8065 /* upate our MTU size */
8066 /* Do clear IP_DF ? */
8067 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8070 /* unsigned subtraction of mtu */
8071 if (mtu > chk->send_size)
8072 mtu -= chk->send_size;
8075 /* unsigned subtraction of r_mtu */
8076 if (r_mtu > chk->send_size)
8077 r_mtu -= chk->send_size;
8081 to_out += chk->send_size;
8082 if ((to_out > mx_mtu) && no_fragmentflg) {
8084 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8086 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8090 chk->window_probe = 0;
8091 data_list[bundle_at++] = chk;
8092 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8096 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8097 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8098 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8100 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8102 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8103 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8113 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8115 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8116 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8117 data_list[0]->window_probe = 1;
8118 net->window_probe = 1;
8124 * Must be sent in order of the
8125 * TSN's (on a network)
8129 } /* for (chunk gather loop for this net) */
8130 } /* if asoc.state OPEN */
8132 /* Is there something to send for this destination? */
8134 /* We may need to start a control timer or two */
8136 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8139 * do NOT clear the asconf flag as it is
8140 * used to do appropriate source address
8145 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8148 /* must start a send timer if data is being sent */
8149 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8151 * no timer running on this destination
8154 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8155 } else if ((asoc->sctp_cmt_on_off > 0) &&
8156 (asoc->sctp_cmt_pf > 0) &&
8158 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
8159 (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8161 * JRS 5/14/07 - If a HB has been sent to a
8162 * PF destination and no T3 timer is
8163 * currently running, start the T3 timer to
8164 * track the HBs that were sent.
8166 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8168 /* Now send it, if there is anything to send :> */
8169 if ((error = sctp_lowlevel_chunk_output(inp,
8172 (struct sockaddr *)&net->ro._l_addr,
8181 inp->sctp_lport, stcb->rport,
8182 htonl(stcb->asoc.peer_vtag),
8183 net->port, so_locked, NULL, NULL))) {
8184 /* error, we could not output */
8185 if (error == ENOBUFS) {
8186 SCTP_STAT_INCR(sctps_lowlevelerr);
8187 asoc->ifp_had_enobuf = 1;
8189 if (from_where == 0) {
8190 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8192 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8194 if (*now_filled == 0) {
8195 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8197 *now = net->last_sent_time;
8199 net->last_sent_time = *now;
8203 if (error == EHOSTUNREACH) {
8205 * Destination went unreachable
8208 sctp_move_chunks_from_net(stcb, net);
8212 * I add this line to be paranoid. As far as
8213 * I can tell the continue, takes us back to
8214 * the top of the for, but just to make sure
8215 * I will reset these again here.
8217 ctl_cnt = bundle_at = 0;
8218 continue; /* This takes us back to the
8219 * for() for the nets. */
8221 asoc->ifp_had_enobuf = 0;
8223 outchain = endoutchain = NULL;
8226 if (bundle_at || hbflag) {
8227 /* For data/asconf and hb set time */
8228 if (*now_filled == 0) {
8229 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8231 *now = net->last_sent_time;
8233 net->last_sent_time = *now;
8237 *num_out += (ctl_cnt + bundle_at);
8240 /* setup for a RTO measurement */
8241 tsns_sent = data_list[0]->rec.data.TSN_seq;
8242 /* fill time if not already filled */
8243 if (*now_filled == 0) {
8244 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8246 *now = asoc->time_last_sent;
8248 asoc->time_last_sent = *now;
8250 data_list[0]->do_rtt = 1;
8251 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8252 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8253 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8254 if (net->flight_size < net->cwnd) {
8255 /* start or restart it */
8256 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8257 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8258 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
8260 SCTP_STAT_INCR(sctps_earlyfrstrout);
8261 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
8263 /* stop it if its running */
8264 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8265 SCTP_STAT_INCR(sctps_earlyfrstpout);
8266 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8267 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
8276 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8277 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8280 if (old_start_at == NULL) {
8281 old_start_at = start_at;
8282 start_at = TAILQ_FIRST(&asoc->nets);
8284 goto again_one_more_time;
8287 * At the end there should be no NON timed chunks hanging on this
8290 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8291 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8293 if ((*num_out == 0) && (*reason_code == 0)) {
8298 sctp_clean_up_ctl(stcb, asoc);
8303 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8306 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8307 * the control chunk queue.
8309 struct sctp_chunkhdr *hdr;
8310 struct sctp_tmit_chunk *chk;
8313 SCTP_TCB_LOCK_ASSERT(stcb);
8314 sctp_alloc_a_chunk(stcb, chk);
8317 sctp_m_freem(op_err);
8320 chk->copy_by_ref = 0;
8321 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8322 if (op_err == NULL) {
8323 sctp_free_a_chunk(stcb, chk);
8328 while (mat != NULL) {
8329 chk->send_size += SCTP_BUF_LEN(mat);
8330 mat = SCTP_BUF_NEXT(mat);
8332 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8333 chk->rec.chunk_id.can_take_data = 1;
8334 chk->sent = SCTP_DATAGRAM_UNSENT;
8337 chk->asoc = &stcb->asoc;
8339 chk->whoTo = chk->asoc->primary_destination;
8340 atomic_add_int(&chk->whoTo->ref_count, 1);
8341 hdr = mtod(op_err, struct sctp_chunkhdr *);
8342 hdr->chunk_type = SCTP_OPERATION_ERROR;
8343 hdr->chunk_flags = 0;
8344 hdr->chunk_length = htons(chk->send_size);
8345 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8348 chk->asoc->ctrl_queue_cnt++;
8352 sctp_send_cookie_echo(struct mbuf *m,
8354 struct sctp_tcb *stcb,
8355 struct sctp_nets *net)
8358 * pull out the cookie and put it at the front of the control chunk
8362 struct mbuf *cookie;
8363 struct sctp_paramhdr parm, *phdr;
8364 struct sctp_chunkhdr *hdr;
8365 struct sctp_tmit_chunk *chk;
8366 uint16_t ptype, plen;
8368 /* First find the cookie in the param area */
8370 at = offset + sizeof(struct sctp_init_chunk);
8372 SCTP_TCB_LOCK_ASSERT(stcb);
8374 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8378 ptype = ntohs(phdr->param_type);
8379 plen = ntohs(phdr->param_length);
8380 if (ptype == SCTP_STATE_COOKIE) {
8383 /* found the cookie */
8384 if ((pad = (plen % 4))) {
8387 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8388 if (cookie == NULL) {
8392 #ifdef SCTP_MBUF_LOGGING
8393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8398 if (SCTP_BUF_IS_EXTENDED(mat)) {
8399 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8401 mat = SCTP_BUF_NEXT(mat);
8407 at += SCTP_SIZE32(plen);
8409 if (cookie == NULL) {
8410 /* Did not find the cookie */
8413 /* ok, we got the cookie lets change it into a cookie echo chunk */
8415 /* first the change from param to cookie */
8416 hdr = mtod(cookie, struct sctp_chunkhdr *);
8417 hdr->chunk_type = SCTP_COOKIE_ECHO;
8418 hdr->chunk_flags = 0;
8419 /* get the chunk stuff now and place it in the FRONT of the queue */
8420 sctp_alloc_a_chunk(stcb, chk);
8423 sctp_m_freem(cookie);
8426 chk->copy_by_ref = 0;
8427 chk->send_size = plen;
8428 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8429 chk->rec.chunk_id.can_take_data = 0;
8430 chk->sent = SCTP_DATAGRAM_UNSENT;
8432 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8433 chk->asoc = &stcb->asoc;
8435 chk->whoTo = chk->asoc->primary_destination;
8436 atomic_add_int(&chk->whoTo->ref_count, 1);
8437 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8438 chk->asoc->ctrl_queue_cnt++;
8443 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8447 struct sctp_nets *net)
8450 * take a HB request and make it into a HB ack and send it.
8452 struct mbuf *outchain;
8453 struct sctp_chunkhdr *chdr;
8454 struct sctp_tmit_chunk *chk;
8458 /* must have a net pointer */
8461 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8462 if (outchain == NULL) {
8463 /* gak out of memory */
8466 #ifdef SCTP_MBUF_LOGGING
8467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8472 if (SCTP_BUF_IS_EXTENDED(mat)) {
8473 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8475 mat = SCTP_BUF_NEXT(mat);
8479 chdr = mtod(outchain, struct sctp_chunkhdr *);
8480 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8481 chdr->chunk_flags = 0;
8482 if (chk_length % 4) {
8484 uint32_t cpthis = 0;
8487 padlen = 4 - (chk_length % 4);
8488 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8490 sctp_alloc_a_chunk(stcb, chk);
8493 sctp_m_freem(outchain);
8496 chk->copy_by_ref = 0;
8497 chk->send_size = chk_length;
8498 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8499 chk->rec.chunk_id.can_take_data = 1;
8500 chk->sent = SCTP_DATAGRAM_UNSENT;
8503 chk->asoc = &stcb->asoc;
8504 chk->data = outchain;
8506 atomic_add_int(&chk->whoTo->ref_count, 1);
8507 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8508 chk->asoc->ctrl_queue_cnt++;
8512 sctp_send_cookie_ack(struct sctp_tcb *stcb)
8514 /* formulate and queue a cookie-ack back to sender */
8515 struct mbuf *cookie_ack;
8516 struct sctp_chunkhdr *hdr;
8517 struct sctp_tmit_chunk *chk;
8520 SCTP_TCB_LOCK_ASSERT(stcb);
8522 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
8523 if (cookie_ack == NULL) {
8527 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
8528 sctp_alloc_a_chunk(stcb, chk);
8531 sctp_m_freem(cookie_ack);
8534 chk->copy_by_ref = 0;
8535 chk->send_size = sizeof(struct sctp_chunkhdr);
8536 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
8537 chk->rec.chunk_id.can_take_data = 1;
8538 chk->sent = SCTP_DATAGRAM_UNSENT;
8541 chk->asoc = &stcb->asoc;
8542 chk->data = cookie_ack;
8543 if (chk->asoc->last_control_chunk_from != NULL) {
8544 chk->whoTo = chk->asoc->last_control_chunk_from;
8546 chk->whoTo = chk->asoc->primary_destination;
8548 atomic_add_int(&chk->whoTo->ref_count, 1);
8549 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
8550 hdr->chunk_type = SCTP_COOKIE_ACK;
8551 hdr->chunk_flags = 0;
8552 hdr->chunk_length = htons(chk->send_size);
8553 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
8554 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8555 chk->asoc->ctrl_queue_cnt++;
8561 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
8563 /* formulate and queue a SHUTDOWN-ACK back to the sender */
8564 struct mbuf *m_shutdown_ack;
8565 struct sctp_shutdown_ack_chunk *ack_cp;
8566 struct sctp_tmit_chunk *chk;
8568 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8569 if (m_shutdown_ack == NULL) {
8573 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
8574 sctp_alloc_a_chunk(stcb, chk);
8577 sctp_m_freem(m_shutdown_ack);
8580 chk->copy_by_ref = 0;
8581 chk->send_size = sizeof(struct sctp_chunkhdr);
8582 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
8583 chk->rec.chunk_id.can_take_data = 1;
8584 chk->sent = SCTP_DATAGRAM_UNSENT;
8587 chk->asoc = &stcb->asoc;
8588 chk->data = m_shutdown_ack;
8590 atomic_add_int(&net->ref_count, 1);
8592 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
8593 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
8594 ack_cp->ch.chunk_flags = 0;
8595 ack_cp->ch.chunk_length = htons(chk->send_size);
8596 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
8597 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8598 chk->asoc->ctrl_queue_cnt++;
8603 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
8605 /* formulate and queue a SHUTDOWN to the sender */
8606 struct mbuf *m_shutdown;
8607 struct sctp_shutdown_chunk *shutdown_cp;
8608 struct sctp_tmit_chunk *chk;
8610 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8611 if (m_shutdown == NULL) {
8615 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
8616 sctp_alloc_a_chunk(stcb, chk);
8619 sctp_m_freem(m_shutdown);
8622 chk->copy_by_ref = 0;
8623 chk->send_size = sizeof(struct sctp_shutdown_chunk);
8624 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
8625 chk->rec.chunk_id.can_take_data = 1;
8626 chk->sent = SCTP_DATAGRAM_UNSENT;
8629 chk->asoc = &stcb->asoc;
8630 chk->data = m_shutdown;
8632 atomic_add_int(&net->ref_count, 1);
8634 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
8635 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
8636 shutdown_cp->ch.chunk_flags = 0;
8637 shutdown_cp->ch.chunk_length = htons(chk->send_size);
8638 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
8639 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
8640 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8641 chk->asoc->ctrl_queue_cnt++;
8646 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
8649 * formulate and queue an ASCONF to the peer. ASCONF parameters
8650 * should be queued on the assoc queue.
8652 struct sctp_tmit_chunk *chk;
8653 struct mbuf *m_asconf;
8656 SCTP_TCB_LOCK_ASSERT(stcb);
8658 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
8659 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
8660 /* can't send a new one if there is one in flight already */
8663 /* compose an ASCONF chunk, maximum length is PMTU */
8664 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
8665 if (m_asconf == NULL) {
8668 sctp_alloc_a_chunk(stcb, chk);
8671 sctp_m_freem(m_asconf);
8674 chk->copy_by_ref = 0;
8675 chk->data = m_asconf;
8676 chk->send_size = len;
8677 chk->rec.chunk_id.id = SCTP_ASCONF;
8678 chk->rec.chunk_id.can_take_data = 0;
8679 chk->sent = SCTP_DATAGRAM_UNSENT;
8681 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8682 chk->asoc = &stcb->asoc;
8684 atomic_add_int(&chk->whoTo->ref_count, 1);
8685 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
8686 chk->asoc->ctrl_queue_cnt++;
8691 sctp_send_asconf_ack(struct sctp_tcb *stcb)
8694 * formulate and queue a asconf-ack back to sender. the asconf-ack
8695 * must be stored in the tcb.
8697 struct sctp_tmit_chunk *chk;
8698 struct sctp_asconf_ack *ack, *latest_ack;
8699 struct mbuf *m_ack, *m;
8700 struct sctp_nets *net = NULL;
8702 SCTP_TCB_LOCK_ASSERT(stcb);
8703 /* Get the latest ASCONF-ACK */
8704 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
8705 if (latest_ack == NULL) {
8708 if (latest_ack->last_sent_to != NULL &&
8709 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
8710 /* we're doing a retransmission */
8711 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
8714 if (stcb->asoc.last_control_chunk_from == NULL)
8715 net = stcb->asoc.primary_destination;
8717 net = stcb->asoc.last_control_chunk_from;
8721 if (stcb->asoc.last_control_chunk_from == NULL)
8722 net = stcb->asoc.primary_destination;
8724 net = stcb->asoc.last_control_chunk_from;
8726 latest_ack->last_sent_to = net;
8728 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
8729 if (ack->data == NULL) {
8732 /* copy the asconf_ack */
8733 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
8734 if (m_ack == NULL) {
8735 /* couldn't copy it */
8738 #ifdef SCTP_MBUF_LOGGING
8739 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8744 if (SCTP_BUF_IS_EXTENDED(mat)) {
8745 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8747 mat = SCTP_BUF_NEXT(mat);
8752 sctp_alloc_a_chunk(stcb, chk);
8756 sctp_m_freem(m_ack);
8759 chk->copy_by_ref = 0;
8766 chk->send_size = ack->len;
8767 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
8768 chk->rec.chunk_id.can_take_data = 1;
8769 chk->sent = SCTP_DATAGRAM_UNSENT;
8771 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
8772 chk->asoc = &stcb->asoc;
8773 atomic_add_int(&chk->whoTo->ref_count, 1);
8775 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8776 chk->asoc->ctrl_queue_cnt++;
8783 sctp_chunk_retransmission(struct sctp_inpcb *inp,
8784 struct sctp_tcb *stcb,
8785 struct sctp_association *asoc,
8786 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
8787 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8793 * send out one MTU of retransmission. If fast_retransmit is
8794 * happening we ignore the cwnd. Otherwise we obey the cwnd and
8795 * rwnd. For a Cookie or Asconf in the control chunk queue we
8796 * retransmit them by themselves.
8798 * For data chunks we will pick out the lowest TSN's in the sent_queue
8799 * marked for resend and bundle them all together (up to a MTU of
8800 * destination). The address to send to should have been
8801 * selected/changed where the retransmission was marked (i.e. in FR
8802 * or t3-timeout routines).
8804 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8805 struct sctp_tmit_chunk *chk, *fwd;
8806 struct mbuf *m, *endofchain;
8807 struct sctp_nets *net = NULL;
8808 uint32_t tsns_sent = 0;
8809 int no_fragmentflg, bundle_at, cnt_thru;
8811 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
8812 struct sctp_auth_chunk *auth = NULL;
8813 uint32_t auth_offset = 0;
8814 uint16_t auth_keyid;
8815 int override_ok = 1;
8816 int data_auth_reqd = 0;
8819 SCTP_TCB_LOCK_ASSERT(stcb);
8820 tmr_started = ctl_cnt = bundle_at = error = 0;
8825 endofchain = m = NULL;
8826 auth_keyid = stcb->asoc.authinfo.active_keyid;
8827 #ifdef SCTP_AUDITING_ENABLED
8828 sctp_audit_log(0xC3, 1);
8830 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
8831 (TAILQ_EMPTY(&asoc->control_send_queue))) {
8832 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
8833 asoc->sent_queue_retran_cnt);
8834 asoc->sent_queue_cnt = 0;
8835 asoc->sent_queue_cnt_removeable = 0;
8836 /* send back 0/0 so we enter normal transmission */
8840 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8841 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
8842 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
8843 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
8844 if (chk->sent != SCTP_DATAGRAM_RESEND) {
8847 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
8848 if (chk != asoc->str_reset) {
8850 * not eligible for retran if its
8857 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
8862 * Add an AUTH chunk, if chunk requires it save the
8863 * offset into the chain for AUTH
8865 if ((auth == NULL) &&
8866 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8867 stcb->asoc.peer_auth_chunks))) {
8868 m = sctp_add_auth_chunk(m, &endofchain,
8869 &auth, &auth_offset,
8871 chk->rec.chunk_id.id);
8872 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8874 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
8880 /* do we have control chunks to retransmit? */
8882 /* Start a timer no matter if we suceed or fail */
8883 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8884 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
8885 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
8886 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
8887 chk->snd_count++; /* update our count */
8888 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
8889 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
8890 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
8891 no_fragmentflg, 0, NULL, 0,
8892 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
8893 chk->whoTo->port, so_locked, NULL, NULL))) {
8894 SCTP_STAT_INCR(sctps_lowlevelerr);
8897 m = endofchain = NULL;
8901 * We don't want to mark the net->sent time here since this
8902 * we use this for HB and retrans cannot measure RTT
8904 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
8906 chk->sent = SCTP_DATAGRAM_SENT;
8907 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
8911 /* Clean up the fwd-tsn list */
8912 sctp_clean_up_ctl(stcb, asoc);
8917 * Ok, it is just data retransmission we need to do or that and a
8918 * fwd-tsn with it all.
8920 if (TAILQ_EMPTY(&asoc->sent_queue)) {
8921 return (SCTP_RETRAN_DONE);
8923 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
8924 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
8925 /* not yet open, resend the cookie and that is it */
8928 #ifdef SCTP_AUDITING_ENABLED
8929 sctp_auditing(20, inp, stcb, NULL);
8931 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
8932 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
8933 if (chk->sent != SCTP_DATAGRAM_RESEND) {
8934 /* No, not sent to this net or not ready for rtx */
8937 if (chk->data == NULL) {
8938 printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
8939 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
8942 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
8943 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
8944 /* Gak, we have exceeded max unlucky retran, abort! */
8945 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
8947 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
8948 atomic_add_int(&stcb->asoc.refcnt, 1);
8949 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
8950 SCTP_TCB_LOCK(stcb);
8951 atomic_subtract_int(&stcb->asoc.refcnt, 1);
8952 return (SCTP_RETRAN_EXIT);
8954 /* pick up the net */
8956 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8957 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8959 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8962 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
8963 /* No room in peers rwnd */
8966 tsn = asoc->last_acked_seq + 1;
8967 if (tsn == chk->rec.data.TSN_seq) {
8969 * we make a special exception for this
8970 * case. The peer has no rwnd but is missing
8971 * the lowest chunk.. which is probably what
8972 * is holding up the rwnd.
8974 goto one_chunk_around;
8979 if (asoc->peers_rwnd < mtu) {
8981 if ((asoc->peers_rwnd == 0) &&
8982 (asoc->total_flight == 0)) {
8983 chk->window_probe = 1;
8984 chk->whoTo->window_probe = 1;
8987 #ifdef SCTP_AUDITING_ENABLED
8988 sctp_audit_log(0xC3, 2);
8992 net->fast_retran_ip = 0;
8993 if (chk->rec.data.doing_fast_retransmit == 0) {
8995 * if no FR in progress skip destination that have
8996 * flight_size > cwnd.
8998 if (net->flight_size >= net->cwnd) {
9003 * Mark the destination net to have FR recovery
9007 net->fast_retran_ip = 1;
9011 * if no AUTH is yet included and this chunk requires it,
9012 * make sure to account for it. We don't apply the size
9013 * until the AUTH chunk is actually added below in case
9014 * there is no room for this chunk.
9016 if (data_auth_reqd && (auth == NULL)) {
9017 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9021 if ((chk->send_size <= (mtu - dmtu)) ||
9022 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9023 /* ok we will add this one */
9024 if (data_auth_reqd) {
9026 m = sctp_add_auth_chunk(m,
9032 auth_keyid = chk->auth_keyid;
9034 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9035 } else if (override_ok) {
9036 auth_keyid = chk->auth_keyid;
9038 } else if (chk->auth_keyid != auth_keyid) {
9039 /* different keyid, so done bundling */
9043 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9045 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9048 /* Do clear IP_DF ? */
9049 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9052 /* upate our MTU size */
9053 if (mtu > (chk->send_size + dmtu))
9054 mtu -= (chk->send_size + dmtu);
9057 data_list[bundle_at++] = chk;
9058 if (one_chunk && (asoc->total_flight <= 0)) {
9059 SCTP_STAT_INCR(sctps_windowprobed);
9062 if (one_chunk == 0) {
9064 * now are there anymore forward from chk to pick
9067 fwd = TAILQ_NEXT(chk, sctp_next);
9069 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9070 /* Nope, not for retran */
9071 fwd = TAILQ_NEXT(fwd, sctp_next);
9074 if (fwd->whoTo != net) {
9075 /* Nope, not the net in question */
9076 fwd = TAILQ_NEXT(fwd, sctp_next);
9079 if (data_auth_reqd && (auth == NULL)) {
9080 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9083 if (fwd->send_size <= (mtu - dmtu)) {
9084 if (data_auth_reqd) {
9086 m = sctp_add_auth_chunk(m,
9092 auth_keyid = fwd->auth_keyid;
9094 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9095 } else if (override_ok) {
9096 auth_keyid = fwd->auth_keyid;
9098 } else if (fwd->auth_keyid != auth_keyid) {
9106 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9108 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9111 /* Do clear IP_DF ? */
9112 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9115 /* upate our MTU size */
9116 if (mtu > (fwd->send_size + dmtu))
9117 mtu -= (fwd->send_size + dmtu);
9120 data_list[bundle_at++] = fwd;
9121 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9124 fwd = TAILQ_NEXT(fwd, sctp_next);
9126 /* can't fit so we are done */
9131 /* Is there something to send for this destination? */
9134 * No matter if we fail/or suceed we should start a
9135 * timer. A failure is like a lost IP packet :-)
9137 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9139 * no timer running on this destination
9142 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9145 /* Now lets send it, if there is anything to send :> */
9146 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9147 (struct sockaddr *)&net->ro._l_addr, m,
9148 auth_offset, auth, auth_keyid,
9149 no_fragmentflg, 0, NULL, 0,
9150 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9151 net->port, so_locked, NULL, NULL))) {
9152 /* error, we could not output */
9153 SCTP_STAT_INCR(sctps_lowlevelerr);
9156 m = endofchain = NULL;
9161 * We don't want to mark the net->sent time here
9162 * since this we use this for HB and retrans cannot
9165 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9167 /* For auto-close */
9169 if (*now_filled == 0) {
9170 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9171 *now = asoc->time_last_sent;
9174 asoc->time_last_sent = *now;
9176 *cnt_out += bundle_at;
9177 #ifdef SCTP_AUDITING_ENABLED
9178 sctp_audit_log(0xC4, bundle_at);
9181 tsns_sent = data_list[0]->rec.data.TSN_seq;
9183 for (i = 0; i < bundle_at; i++) {
9184 SCTP_STAT_INCR(sctps_sendretransdata);
9185 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9187 * When we have a revoked data, and we
9188 * retransmit it, then we clear the revoked
9189 * flag since this flag dictates if we
9190 * subtracted from the fs
9192 if (data_list[i]->rec.data.chunk_was_revoked) {
9193 /* Deflate the cwnd */
9194 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9195 data_list[i]->rec.data.chunk_was_revoked = 0;
9197 data_list[i]->snd_count++;
9198 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9199 /* record the time */
9200 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9201 if (data_list[i]->book_size_scale) {
9203 * need to double the book size on
9206 data_list[i]->book_size_scale = 0;
9208 * Since we double the booksize, we
9209 * must also double the output queue
9210 * size, since this get shrunk when
9211 * we free by this amount.
9213 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9214 data_list[i]->book_size *= 2;
9218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9219 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9220 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9222 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9223 (uint32_t) (data_list[i]->send_size +
9224 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9226 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9227 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9228 data_list[i]->whoTo->flight_size,
9229 data_list[i]->book_size,
9230 (uintptr_t) data_list[i]->whoTo,
9231 data_list[i]->rec.data.TSN_seq);
9233 sctp_flight_size_increase(data_list[i]);
9234 sctp_total_flight_increase(stcb, data_list[i]);
9235 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9236 /* SWS sender side engages */
9237 asoc->peers_rwnd = 0;
9240 (data_list[i]->rec.data.doing_fast_retransmit)) {
9241 SCTP_STAT_INCR(sctps_sendfastretrans);
9242 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9243 (tmr_started == 0)) {
9245 * ok we just fast-retrans'd
9246 * the lowest TSN, i.e the
9247 * first on the list. In
9248 * this case we want to give
9249 * some more time to get a
9250 * SACK back without a
9253 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9254 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9255 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9259 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9260 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9262 #ifdef SCTP_AUDITING_ENABLED
9263 sctp_auditing(21, inp, stcb, NULL);
9269 if (asoc->sent_queue_retran_cnt <= 0) {
9270 /* all done we have no more to retran */
9271 asoc->sent_queue_retran_cnt = 0;
9275 /* No more room in rwnd */
9278 /* stop the for loop here. we sent out a packet */
9286 sctp_timer_validation(struct sctp_inpcb *inp,
9287 struct sctp_tcb *stcb,
9288 struct sctp_association *asoc,
9291 struct sctp_nets *net;
9293 /* Validate that a timer is running somewhere */
9294 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9295 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9296 /* Here is a timer */
9300 SCTP_TCB_LOCK_ASSERT(stcb);
9301 /* Gak, we did not have a timer somewhere */
9302 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9303 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9308 sctp_chunk_output(struct sctp_inpcb *inp,
9309 struct sctp_tcb *stcb,
9312 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9318 * Ok this is the generic chunk service queue. we must do the
9320 * - See if there are retransmits pending, if so we must
9322 * - Service the stream queue that is next, moving any
9323 * message (note I must get a complete message i.e.
9324 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9326 * - Check to see if the cwnd/rwnd allows any output, if so we
9327 * go ahead and fomulate and send the low level chunks. Making sure
9328 * to combine any control in the control chunk queue also.
9330 struct sctp_association *asoc;
9331 struct sctp_nets *net;
9332 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9333 unsigned int burst_cnt = 0;
9337 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9340 unsigned int tot_frs = 0;
9343 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9344 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9350 SCTP_TCB_LOCK_ASSERT(stcb);
9352 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9354 if ((un_sent <= 0) &&
9355 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9356 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9357 (asoc->sent_queue_retran_cnt == 0)) {
9358 /* Nothing to do unless there is something to be sent left */
9362 * Do we have something to send, data or control AND a sack timer
9363 * running, if so piggy-back the sack.
9365 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9366 sctp_send_sack(stcb);
9367 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9369 while (asoc->sent_queue_retran_cnt) {
9371 * Ok, it is retransmission time only, we send out only ONE
9372 * packet with a single call off to the retran code.
9374 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9376 * Special hook for handling cookiess discarded
9377 * by peer that carried data. Send cookie-ack only
9378 * and then the next call with get the retran's.
9380 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9382 &now, &now_filled, frag_point, so_locked);
9384 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9385 /* if its not from a HB then do it */
9387 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9393 * its from any other place, we don't allow retran
9394 * output (only control)
9399 /* Can't send anymore */
9401 * now lets push out control by calling med-level
9402 * output once. this assures that we WILL send HB's
9405 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9407 &now, &now_filled, frag_point, so_locked);
9408 #ifdef SCTP_AUDITING_ENABLED
9409 sctp_auditing(8, inp, stcb, NULL);
9411 (void)sctp_timer_validation(inp, stcb, asoc, ret);
9416 * The count was off.. retran is not happening so do
9417 * the normal retransmission.
9419 #ifdef SCTP_AUDITING_ENABLED
9420 sctp_auditing(9, inp, stcb, NULL);
9422 if (ret == SCTP_RETRAN_EXIT) {
9427 if (from_where == SCTP_OUTPUT_FROM_T3) {
9428 /* Only one transmission allowed out of a timeout */
9429 #ifdef SCTP_AUDITING_ENABLED
9430 sctp_auditing(10, inp, stcb, NULL);
9432 /* Push out any control */
9433 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9434 &now, &now_filled, frag_point, so_locked);
9437 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
9438 /* Hit FR burst limit */
9441 if ((num_out == 0) && (ret == 0)) {
9442 /* No more retrans to send */
9446 #ifdef SCTP_AUDITING_ENABLED
9447 sctp_auditing(12, inp, stcb, NULL);
9449 /* Check for bad destinations, if they exist move chunks around. */
9450 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9451 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
9452 SCTP_ADDR_NOT_REACHABLE) {
9454 * if possible move things off of this address we
9455 * still may send below due to the dormant state but
9456 * we try to find an alternate address to send to
9457 * and if we have one we move all queued data on the
9458 * out wheel to this alternate address.
9460 if (net->ref_count > 1)
9461 sctp_move_chunks_from_net(stcb, net);
9462 } else if ((asoc->sctp_cmt_on_off > 0) &&
9463 (asoc->sctp_cmt_pf > 0) &&
9464 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
9466 * JRS 5/14/07 - If CMT PF is on and the current
9467 * destination is in PF state, move all queued data
9468 * to an alternate desination.
9470 if (net->ref_count > 1)
9471 sctp_move_chunks_from_net(stcb, net);
9474 * if ((asoc->sat_network) || (net->addr_is_local))
9475 * { burst_limit = asoc->max_burst *
9476 * SCTP_SAT_NETWORK_BURST_INCR; }
9478 if (asoc->max_burst > 0) {
9479 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9480 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
9482 * JRS - Use the congestion
9483 * control given in the
9484 * congestion control module
9486 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
9487 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9488 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
9490 SCTP_STAT_INCR(sctps_maxburstqueued);
9492 net->fast_retran_ip = 0;
9494 if (net->flight_size == 0) {
9496 * Should be decaying the
9508 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
9509 &reason_code, 0, from_where,
9510 &now, &now_filled, frag_point, so_locked);
9512 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
9513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9514 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
9516 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9517 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
9518 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
9522 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
9526 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9527 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
9529 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
9534 * When nagle is on, we look at how much is un_sent, then
9535 * if its smaller than an MTU and we have data in
9538 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9539 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
9540 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
9541 (stcb->asoc.total_flight > 0)) {
9545 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
9546 TAILQ_EMPTY(&asoc->send_queue) &&
9547 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
9548 /* Nothing left to send */
9551 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
9552 /* Nothing left to send */
9556 ((asoc->max_burst == 0) ||
9557 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
9558 (burst_cnt < asoc->max_burst)));
9560 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
9561 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
9562 SCTP_STAT_INCR(sctps_maxburstqueued);
9563 asoc->burst_limit_applied = 1;
9564 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9565 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
9568 asoc->burst_limit_applied = 0;
9571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9572 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
9574 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
9578 * Now we need to clean up the control chunk chain if a ECNE is on
9579 * it. It must be marked as UNSENT again so next call will continue
9580 * to send it until such time that we get a CWR, to remove it.
9582 if (stcb->asoc.ecn_echo_cnt_onq)
9583 sctp_fix_ecn_echo(asoc);
9589 sctp_output(inp, m, addr, control, p, flags)
9590 struct sctp_inpcb *inp;
9592 struct sockaddr *addr;
9593 struct mbuf *control;
9598 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9601 if (inp->sctp_socket == NULL) {
9602 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9605 return (sctp_sosend(inp->sctp_socket,
9615 send_forward_tsn(struct sctp_tcb *stcb,
9616 struct sctp_association *asoc)
9618 struct sctp_tmit_chunk *chk;
9619 struct sctp_forward_tsn_chunk *fwdtsn;
9620 uint32_t advance_peer_ack_point;
9622 SCTP_TCB_LOCK_ASSERT(stcb);
9623 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9624 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9625 /* mark it to unsent */
9626 chk->sent = SCTP_DATAGRAM_UNSENT;
9628 /* Do we correct its output location? */
9629 if (chk->whoTo != asoc->primary_destination) {
9630 sctp_free_remote_addr(chk->whoTo);
9631 chk->whoTo = asoc->primary_destination;
9632 atomic_add_int(&chk->whoTo->ref_count, 1);
9634 goto sctp_fill_in_rest;
9637 /* Ok if we reach here we must build one */
9638 sctp_alloc_a_chunk(stcb, chk);
9642 asoc->fwd_tsn_cnt++;
9643 chk->copy_by_ref = 0;
9644 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
9645 chk->rec.chunk_id.can_take_data = 0;
9648 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
9649 if (chk->data == NULL) {
9650 sctp_free_a_chunk(stcb, chk);
9653 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9654 chk->sent = SCTP_DATAGRAM_UNSENT;
9656 chk->whoTo = asoc->primary_destination;
9657 atomic_add_int(&chk->whoTo->ref_count, 1);
9658 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
9659 asoc->ctrl_queue_cnt++;
9662 * Here we go through and fill out the part that deals with
9663 * stream/seq of the ones we skip.
9665 SCTP_BUF_LEN(chk->data) = 0;
9667 struct sctp_tmit_chunk *at, *tp1, *last;
9668 struct sctp_strseq *strseq;
9669 unsigned int cnt_of_space, i, ovh;
9670 unsigned int space_needed;
9671 unsigned int cnt_of_skipped = 0;
9673 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
9674 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
9675 /* no more to look at */
9678 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9679 /* We don't report these */
9684 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
9685 (cnt_of_skipped * sizeof(struct sctp_strseq)));
9687 cnt_of_space = M_TRAILINGSPACE(chk->data);
9689 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9690 ovh = SCTP_MIN_OVERHEAD;
9692 ovh = SCTP_MIN_V4_OVERHEAD;
9694 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
9695 /* trim to a mtu size */
9696 cnt_of_space = asoc->smallest_mtu - ovh;
9698 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9699 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9700 0xff, 0, cnt_of_skipped,
9701 asoc->advanced_peer_ack_point);
9704 advance_peer_ack_point = asoc->advanced_peer_ack_point;
9705 if (cnt_of_space < space_needed) {
9707 * ok we must trim down the chunk by lowering the
9708 * advance peer ack point.
9710 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9711 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9712 0xff, 0xff, cnt_of_space,
9715 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
9716 cnt_of_skipped /= sizeof(struct sctp_strseq);
9718 * Go through and find the TSN that will be the one
9721 at = TAILQ_FIRST(&asoc->sent_queue);
9722 for (i = 0; i < cnt_of_skipped; i++) {
9723 tp1 = TAILQ_NEXT(at, sctp_next);
9729 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9730 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9731 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
9732 asoc->advanced_peer_ack_point);
9736 * last now points to last one I can report, update
9740 advance_peer_ack_point = last->rec.data.TSN_seq;
9741 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
9742 cnt_of_skipped * sizeof(struct sctp_strseq);
9744 chk->send_size = space_needed;
9745 /* Setup the chunk */
9746 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
9747 fwdtsn->ch.chunk_length = htons(chk->send_size);
9748 fwdtsn->ch.chunk_flags = 0;
9749 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
9750 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
9751 SCTP_BUF_LEN(chk->data) = chk->send_size;
9754 * Move pointer to after the fwdtsn and transfer to the
9757 strseq = (struct sctp_strseq *)fwdtsn;
9759 * Now populate the strseq list. This is done blindly
9760 * without pulling out duplicate stream info. This is
9761 * inefficent but won't harm the process since the peer will
9762 * look at these in sequence and will thus release anything.
9763 * It could mean we exceed the PMTU and chop off some that
9764 * we could have included.. but this is unlikely (aka 1432/4
9765 * would mean 300+ stream seq's would have to be reported in
9766 * one FWD-TSN. With a bit of work we can later FIX this to
9767 * optimize and pull out duplcates.. but it does add more
9768 * overhead. So for now... not!
9770 at = TAILQ_FIRST(&asoc->sent_queue);
9771 for (i = 0; i < cnt_of_skipped; i++) {
9772 tp1 = TAILQ_NEXT(at, sctp_next);
9775 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9776 /* We don't report these */
9781 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
9782 at->rec.data.fwd_tsn_cnt = 0;
9784 strseq->stream = ntohs(at->rec.data.stream_number);
9785 strseq->sequence = ntohs(at->rec.data.stream_seq);
9795 sctp_send_sack(struct sctp_tcb *stcb)
9798 * Queue up a SACK or NR-SACK in the control queue.
9799 * We must first check to see if a SACK or NR-SACK is
9800 * somehow on the control queue.
9801 * If so, we will take and and remove the old one.
9803 struct sctp_association *asoc;
9804 struct sctp_tmit_chunk *chk, *a_chk;
9805 struct sctp_sack_chunk *sack;
9806 struct sctp_nr_sack_chunk *nr_sack;
9807 struct sctp_gap_ack_block *gap_descriptor;
9808 struct sack_track *selector;
9813 int limit_reached = 0;
9814 unsigned int i, siz, j;
9815 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
9818 uint32_t highest_tsn;
9823 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
9824 (stcb->asoc.peer_supports_nr_sack == 1)) {
9825 type = SCTP_NR_SELECTIVE_ACK;
9827 type = SCTP_SELECTIVE_ACK;
9831 SCTP_TCB_LOCK_ASSERT(stcb);
9832 if (asoc->last_data_chunk_from == NULL) {
9833 /* Hmm we never received anything */
9836 sctp_slide_mapping_arrays(stcb);
9837 sctp_set_rwnd(stcb, asoc);
9838 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9839 if (chk->rec.chunk_id.id == type) {
9840 /* Hmm, found a sack already on queue, remove it */
9841 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
9842 asoc->ctrl_queue_cnt--;
9845 sctp_m_freem(a_chk->data);
9848 sctp_free_remote_addr(a_chk->whoTo);
9849 a_chk->whoTo = NULL;
9853 if (a_chk == NULL) {
9854 sctp_alloc_a_chunk(stcb, a_chk);
9855 if (a_chk == NULL) {
9856 /* No memory so we drop the idea, and set a timer */
9857 if (stcb->asoc.delayed_ack) {
9858 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9859 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
9860 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
9861 stcb->sctp_ep, stcb, NULL);
9863 stcb->asoc.send_sack = 1;
9867 a_chk->copy_by_ref = 0;
9868 a_chk->rec.chunk_id.id = type;
9869 a_chk->rec.chunk_id.can_take_data = 1;
9871 /* Clear our pkt counts */
9872 asoc->data_pkts_seen = 0;
9875 a_chk->snd_count = 0;
9876 a_chk->send_size = 0; /* fill in later */
9877 a_chk->sent = SCTP_DATAGRAM_UNSENT;
9878 a_chk->whoTo = NULL;
9880 if ((asoc->numduptsns) ||
9881 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) {
9883 * Ok, we have some duplicates or the destination for the
9884 * sack is unreachable, lets see if we can select an
9885 * alternate than asoc->last_data_chunk_from
9887 if ((!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) &&
9888 (asoc->used_alt_onsack > asoc->numnets)) {
9889 /* We used an alt last time, don't this time */
9890 a_chk->whoTo = NULL;
9892 asoc->used_alt_onsack++;
9893 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
9895 if (a_chk->whoTo == NULL) {
9896 /* Nope, no alternate */
9897 a_chk->whoTo = asoc->last_data_chunk_from;
9898 asoc->used_alt_onsack = 0;
9902 * No duplicates so we use the last place we received data
9905 asoc->used_alt_onsack = 0;
9906 a_chk->whoTo = asoc->last_data_chunk_from;
9909 atomic_add_int(&a_chk->whoTo->ref_count, 1);
9911 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
9912 highest_tsn = asoc->highest_tsn_inside_map;
9914 highest_tsn = asoc->highest_tsn_inside_nr_map;
9916 if (highest_tsn == asoc->cumulative_tsn) {
9918 if (type == SCTP_SELECTIVE_ACK) {
9919 space_req = sizeof(struct sctp_sack_chunk);
9921 space_req = sizeof(struct sctp_nr_sack_chunk);
9924 /* gaps get a cluster */
9925 space_req = MCLBYTES;
9927 /* Ok now lets formulate a MBUF with our sack */
9928 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
9929 if ((a_chk->data == NULL) ||
9930 (a_chk->whoTo == NULL)) {
9931 /* rats, no mbuf memory */
9933 /* was a problem with the destination */
9934 sctp_m_freem(a_chk->data);
9937 sctp_free_a_chunk(stcb, a_chk);
9938 /* sa_ignore NO_NULL_CHK */
9939 if (stcb->asoc.delayed_ack) {
9940 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9941 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
9942 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
9943 stcb->sctp_ep, stcb, NULL);
9945 stcb->asoc.send_sack = 1;
9949 /* ok, lets go through and fill it in */
9950 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
9951 space = M_TRAILINGSPACE(a_chk->data);
9952 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
9953 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
9955 limit = mtod(a_chk->data, caddr_t);
9960 if ((asoc->sctp_cmt_on_off > 0) &&
9961 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
9963 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
9964 * received, then set high bit to 1, else 0. Reset
9967 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
9968 asoc->cmt_dac_pkts_rcvd = 0;
9970 #ifdef SCTP_ASOCLOG_OF_TSNS
9971 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
9972 stcb->asoc.cumack_log_atsnt++;
9973 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
9974 stcb->asoc.cumack_log_atsnt = 0;
9977 /* reset the readers interpretation */
9978 stcb->freed_by_sorcv_sincelast = 0;
9980 if (type == SCTP_SELECTIVE_ACK) {
9981 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
9983 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
9984 if (highest_tsn > asoc->mapping_array_base_tsn) {
9985 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
9987 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
9991 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
9992 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
9993 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
9994 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
9996 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10000 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10003 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10005 if (((type == SCTP_SELECTIVE_ACK) &&
10006 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10007 ((type == SCTP_NR_SELECTIVE_ACK) &&
10008 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10009 /* we have a gap .. maybe */
10010 for (i = 0; i < siz; i++) {
10011 tsn_map = asoc->mapping_array[i];
10012 if (type == SCTP_SELECTIVE_ACK) {
10013 tsn_map |= asoc->nr_mapping_array[i];
10017 * Clear all bits corresponding to TSNs
10018 * smaller or equal to the cumulative TSN.
10020 tsn_map &= (~0 << (1 - offset));
10022 selector = &sack_array[tsn_map];
10023 if (mergeable && selector->right_edge) {
10025 * Backup, left and right edges were ok to
10031 if (selector->num_entries == 0)
10034 for (j = 0; j < selector->num_entries; j++) {
10035 if (mergeable && selector->right_edge) {
10037 * do a merge by NOT setting
10043 * no merge, set the left
10047 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10049 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10052 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10058 if (selector->left_edge) {
10062 if (limit_reached) {
10063 /* Reached the limit stop */
10069 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10070 (limit_reached == 0)) {
10074 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10075 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10077 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10080 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10083 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10085 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10086 /* we have a gap .. maybe */
10087 for (i = 0; i < siz; i++) {
10088 tsn_map = asoc->nr_mapping_array[i];
10091 * Clear all bits corresponding to
10092 * TSNs smaller or equal to the
10095 tsn_map &= (~0 << (1 - offset));
10097 selector = &sack_array[tsn_map];
10098 if (mergeable && selector->right_edge) {
10100 * Backup, left and right edges were
10103 num_nr_gap_blocks--;
10106 if (selector->num_entries == 0)
10109 for (j = 0; j < selector->num_entries; j++) {
10110 if (mergeable && selector->right_edge) {
10112 * do a merge by NOT
10119 * no merge, set the
10123 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10125 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10126 num_nr_gap_blocks++;
10128 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10134 if (selector->left_edge) {
10138 if (limit_reached) {
10139 /* Reached the limit stop */
10146 /* now we must add any dups we are going to report. */
10147 if ((limit_reached == 0) && (asoc->numduptsns)) {
10148 dup = (uint32_t *) gap_descriptor;
10149 for (i = 0; i < asoc->numduptsns; i++) {
10150 *dup = htonl(asoc->dup_tsns[i]);
10153 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10158 asoc->numduptsns = 0;
10161 * now that the chunk is prepared queue it to the control chunk
10164 if (type == SCTP_SELECTIVE_ACK) {
10165 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10166 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10167 num_dups * sizeof(int32_t);
10168 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10169 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10170 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10171 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10172 sack->sack.num_dup_tsns = htons(num_dups);
10173 sack->ch.chunk_type = type;
10174 sack->ch.chunk_flags = flags;
10175 sack->ch.chunk_length = htons(a_chk->send_size);
10177 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10178 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10179 num_dups * sizeof(int32_t);
10180 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10181 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10182 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10183 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10184 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10185 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10186 nr_sack->nr_sack.reserved = 0;
10187 nr_sack->ch.chunk_type = type;
10188 nr_sack->ch.chunk_flags = flags;
10189 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10191 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10192 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10193 asoc->ctrl_queue_cnt++;
10194 asoc->send_sack = 0;
10195 SCTP_STAT_INCR(sctps_sendsacks);
10200 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10201 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10206 struct mbuf *m_abort;
10207 struct mbuf *m_out = NULL, *m_end = NULL;
10208 struct sctp_abort_chunk *abort = NULL;
10210 uint32_t auth_offset = 0;
10211 struct sctp_auth_chunk *auth = NULL;
10214 * Add an AUTH chunk, if chunk requires it and save the offset into
10215 * the chain for AUTH
10217 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10218 stcb->asoc.peer_auth_chunks)) {
10219 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10220 stcb, SCTP_ABORT_ASSOCIATION);
10221 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10223 SCTP_TCB_LOCK_ASSERT(stcb);
10224 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10225 if (m_abort == NULL) {
10228 sctp_m_freem(m_out);
10231 /* link in any error */
10232 SCTP_BUF_NEXT(m_abort) = operr;
10239 sz += SCTP_BUF_LEN(n);
10240 n = SCTP_BUF_NEXT(n);
10243 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10244 if (m_out == NULL) {
10245 /* NO Auth chunk prepended, so reserve space in front */
10246 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10249 /* Put AUTH chunk at the front of the chain */
10250 SCTP_BUF_NEXT(m_end) = m_abort;
10253 /* fill in the ABORT chunk */
10254 abort = mtod(m_abort, struct sctp_abort_chunk *);
10255 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10256 abort->ch.chunk_flags = 0;
10257 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10259 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
10260 stcb->asoc.primary_destination,
10261 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
10262 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0,
10263 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10264 stcb->asoc.primary_destination->port, so_locked, NULL, NULL);
10265 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10269 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10270 struct sctp_nets *net,
10273 /* formulate and SEND a SHUTDOWN-COMPLETE */
10274 struct mbuf *m_shutdown_comp;
10275 struct sctp_shutdown_complete_chunk *shutdown_complete;
10279 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10280 if (m_shutdown_comp == NULL) {
10284 if (reflect_vtag) {
10285 flags = SCTP_HAD_NO_TCB;
10286 vtag = stcb->asoc.my_vtag;
10289 vtag = stcb->asoc.peer_vtag;
10291 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10292 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10293 shutdown_complete->ch.chunk_flags = flags;
10294 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10295 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10296 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10297 (struct sockaddr *)&net->ro._l_addr,
10298 m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0,
10299 stcb->sctp_ep->sctp_lport, stcb->rport,
10301 net->port, SCTP_SO_NOT_LOCKED, NULL, NULL);
10302 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10307 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
10308 uint32_t vrf_id, uint16_t port)
10310 /* formulate and SEND a SHUTDOWN-COMPLETE */
10311 struct mbuf *o_pak;
10313 struct ip *iph, *iph_out;
10314 struct udphdr *udp = NULL;
10317 struct ip6_hdr *ip6, *ip6_out;
10320 int offset_out, len, mlen;
10321 struct sctp_shutdown_complete_msg *comp_cp;
10323 iph = mtod(m, struct ip *);
10324 switch (iph->ip_v) {
10326 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10329 case IPV6_VERSION >> 4:
10330 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10337 len += sizeof(struct udphdr);
10339 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10340 if (mout == NULL) {
10343 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10344 SCTP_BUF_LEN(mout) = len;
10345 SCTP_BUF_NEXT(mout) = NULL;
10346 if (m->m_flags & M_FLOWID) {
10347 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
10348 mout->m_flags |= M_FLOWID;
10356 switch (iph->ip_v) {
10358 iph_out = mtod(mout, struct ip *);
10360 /* Fill in the IP header for the ABORT */
10361 iph_out->ip_v = IPVERSION;
10362 iph_out->ip_hl = (sizeof(struct ip) / 4);
10363 iph_out->ip_tos = (u_char)0;
10364 iph_out->ip_id = 0;
10365 iph_out->ip_off = 0;
10366 iph_out->ip_ttl = MAXTTL;
10368 iph_out->ip_p = IPPROTO_UDP;
10370 iph_out->ip_p = IPPROTO_SCTP;
10372 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10373 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10375 /* let IP layer calculate this */
10376 iph_out->ip_sum = 0;
10377 offset_out += sizeof(*iph_out);
10378 comp_cp = (struct sctp_shutdown_complete_msg *)(
10379 (caddr_t)iph_out + offset_out);
10382 case IPV6_VERSION >> 4:
10383 ip6 = (struct ip6_hdr *)iph;
10384 ip6_out = mtod(mout, struct ip6_hdr *);
10386 /* Fill in the IPv6 header for the ABORT */
10387 ip6_out->ip6_flow = ip6->ip6_flow;
10388 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10390 ip6_out->ip6_nxt = IPPROTO_UDP;
10392 ip6_out->ip6_nxt = IPPROTO_SCTP;
10394 ip6_out->ip6_src = ip6->ip6_dst;
10395 ip6_out->ip6_dst = ip6->ip6_src;
10397 * ?? The old code had both the iph len + payload, I think
10398 * this is wrong and would never have worked
10400 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10401 offset_out += sizeof(*ip6_out);
10402 comp_cp = (struct sctp_shutdown_complete_msg *)(
10403 (caddr_t)ip6_out + offset_out);
10407 /* Currently not supported. */
10408 sctp_m_freem(mout);
10412 udp = (struct udphdr *)comp_cp;
10413 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10414 udp->uh_dport = port;
10415 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
10417 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10418 offset_out += sizeof(struct udphdr);
10419 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
10421 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10423 sctp_m_freem(mout);
10426 /* Now copy in and fill in the ABORT tags etc. */
10427 comp_cp->sh.src_port = sh->dest_port;
10428 comp_cp->sh.dest_port = sh->src_port;
10429 comp_cp->sh.checksum = 0;
10430 comp_cp->sh.v_tag = sh->v_tag;
10431 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
10432 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10433 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10435 if (iph_out != NULL) {
10439 mlen = SCTP_BUF_LEN(mout);
10440 bzero(&ro, sizeof ro);
10441 /* set IPv4 length */
10442 iph_out->ip_len = mlen;
10443 #ifdef SCTP_PACKET_LOGGING
10444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10445 sctp_packet_log(mout, mlen);
10448 #if defined(SCTP_WITH_NO_CSUM)
10449 SCTP_STAT_INCR(sctps_sendnocrc);
10451 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
10452 SCTP_STAT_INCR(sctps_sendswcrc);
10454 SCTP_ENABLE_UDP_CSUM(mout);
10456 #if defined(SCTP_WITH_NO_CSUM)
10457 SCTP_STAT_INCR(sctps_sendnocrc);
10459 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10460 mout->m_pkthdr.csum_data = 0;
10461 SCTP_STAT_INCR(sctps_sendhwcrc);
10464 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10466 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
10468 /* Free the route if we got one back */
10473 if (ip6_out != NULL) {
10474 struct route_in6 ro;
10476 struct ifnet *ifp = NULL;
10478 bzero(&ro, sizeof(ro));
10479 mlen = SCTP_BUF_LEN(mout);
10480 #ifdef SCTP_PACKET_LOGGING
10481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10482 sctp_packet_log(mout, mlen);
10484 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10486 #if defined(SCTP_WITH_NO_CSUM)
10487 SCTP_STAT_INCR(sctps_sendnocrc);
10489 comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
10490 SCTP_STAT_INCR(sctps_sendswcrc);
10492 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
10493 udp->uh_sum = 0xffff;
10496 #if defined(SCTP_WITH_NO_CSUM)
10497 SCTP_STAT_INCR(sctps_sendnocrc);
10499 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10500 mout->m_pkthdr.csum_data = 0;
10501 SCTP_STAT_INCR(sctps_sendhwcrc);
10504 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
10506 /* Free the route if we got one back */
10511 SCTP_STAT_INCR(sctps_sendpackets);
10512 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10513 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10518 static struct sctp_nets *
10519 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
10521 struct sctp_nets *net, *hnet;
10522 int ms_goneby, highest_ms, state_overide = 0;
10524 (void)SCTP_GETTIME_TIMEVAL(now);
10527 SCTP_TCB_LOCK_ASSERT(stcb);
10528 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
10530 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
10531 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
10534 * Skip this guy from consideration if HB is off AND
10539 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
10540 /* skip this dest net from consideration */
10543 if (net->last_sent_time.tv_sec) {
10544 /* Sent to so we subtract */
10545 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
10547 /* Never been sent to */
10548 ms_goneby = 0x7fffffff;
10550 * When the address state is unconfirmed but still
10551 * considered reachable, we HB at a higher rate. Once it
10552 * goes confirmed OR reaches the "unreachable" state, thenw
10553 * we cut it back to HB at a more normal pace.
10555 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
10561 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
10562 (ms_goneby > highest_ms)) {
10563 highest_ms = ms_goneby;
10568 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
10574 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
10576 * Found the one with longest delay bounds OR it is
10577 * unconfirmed and still not marked unreachable.
10579 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
10582 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
10583 (struct sockaddr *)&hnet->ro._l_addr);
10585 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
10588 /* update the timer now */
10589 hnet->last_sent_time = *now;
10592 /* Nothing to HB */
10597 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
10599 struct sctp_tmit_chunk *chk;
10600 struct sctp_nets *net;
10601 struct sctp_heartbeat_chunk *hb;
10602 struct timeval now;
10603 struct sockaddr_in *sin;
10604 struct sockaddr_in6 *sin6;
10606 SCTP_TCB_LOCK_ASSERT(stcb);
10607 if (user_req == 0) {
10608 net = sctp_select_hb_destination(stcb, &now);
10611 * All our busy none to send to, just start the
10614 if (stcb->asoc.state == 0) {
10617 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
10628 (void)SCTP_GETTIME_TIMEVAL(&now);
10630 sin = (struct sockaddr_in *)&net->ro._l_addr;
10631 if (sin->sin_family != AF_INET) {
10632 if (sin->sin_family != AF_INET6) {
10637 sctp_alloc_a_chunk(stcb, chk);
10639 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
10642 chk->copy_by_ref = 0;
10643 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
10644 chk->rec.chunk_id.can_take_data = 1;
10645 chk->asoc = &stcb->asoc;
10646 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
10648 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10649 if (chk->data == NULL) {
10650 sctp_free_a_chunk(stcb, chk);
10653 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10654 SCTP_BUF_LEN(chk->data) = chk->send_size;
10655 chk->sent = SCTP_DATAGRAM_UNSENT;
10656 chk->snd_count = 0;
10658 atomic_add_int(&chk->whoTo->ref_count, 1);
10659 /* Now we have a mbuf that we can fill in with the details */
10660 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
10661 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
10662 /* fill out chunk header */
10663 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
10664 hb->ch.chunk_flags = 0;
10665 hb->ch.chunk_length = htons(chk->send_size);
10666 /* Fill out hb parameter */
10667 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
10668 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
10669 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
10670 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
10671 /* Did our user request this one, put it in */
10672 hb->heartbeat.hb_info.user_req = user_req;
10673 hb->heartbeat.hb_info.addr_family = sin->sin_family;
10674 hb->heartbeat.hb_info.addr_len = sin->sin_len;
10675 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
10677 * we only take from the entropy pool if the address is not
10680 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
10681 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
10683 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
10684 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
10686 if (sin->sin_family == AF_INET) {
10687 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
10688 } else if (sin->sin_family == AF_INET6) {
10689 /* We leave the scope the way it is in our lookup table. */
10690 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
10691 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
10693 /* huh compiler bug */
10698 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
10699 * PF-heartbeats. Because of this, threshold management is done by
10700 * the t3 timer handler, and does not need to be done upon the send
10701 * of a PF-heartbeat. If CMT PF is on and the destination to which a
10702 * heartbeat is being sent is in PF state, do NOT do threshold
10705 if ((stcb->asoc.sctp_cmt_pf == 0) ||
10706 ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
10707 /* ok we have a destination that needs a beat */
10708 /* lets do the theshold management Qiaobing style */
10709 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
10710 stcb->asoc.max_send_times)) {
10712 * we have lost the association, in a way this is
10713 * quite bad since we really are one less time since
10714 * we really did not send yet. This is the down side
10715 * to the Q's style as defined in the RFC and not my
10716 * alternate style defined in the RFC.
10718 if (chk->data != NULL) {
10719 sctp_m_freem(chk->data);
10723 * Here we do NOT use the macro since the
10724 * association is now gone.
10727 sctp_free_remote_addr(chk->whoTo);
10730 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
10734 net->hb_responded = 0;
10735 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10736 stcb->asoc.ctrl_queue_cnt++;
10737 SCTP_STAT_INCR(sctps_sendheartbeat);
10739 * Call directly med level routine to put out the chunk. It will
10740 * always tumble out control chunks aka HB but it may even tumble
10747 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
10750 struct sctp_association *asoc;
10751 struct sctp_ecne_chunk *ecne;
10752 struct sctp_tmit_chunk *chk;
10754 asoc = &stcb->asoc;
10755 SCTP_TCB_LOCK_ASSERT(stcb);
10756 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10757 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
10758 /* found a previous ECN_ECHO update it if needed */
10759 uint32_t cnt, ctsn;
10761 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
10762 ctsn = ntohl(ecne->tsn);
10763 if (SCTP_TSN_GT(high_tsn, ctsn)) {
10764 ecne->tsn = htonl(high_tsn);
10765 SCTP_STAT_INCR(sctps_queue_upd_ecne);
10767 cnt = ntohl(ecne->num_pkts_since_cwr);
10769 ecne->num_pkts_since_cwr = htonl(cnt);
10773 /* nope could not find one to update so we must build one */
10774 sctp_alloc_a_chunk(stcb, chk);
10778 chk->copy_by_ref = 0;
10779 SCTP_STAT_INCR(sctps_queue_upd_ecne);
10780 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
10781 chk->rec.chunk_id.can_take_data = 0;
10782 chk->asoc = &stcb->asoc;
10783 chk->send_size = sizeof(struct sctp_ecne_chunk);
10784 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10785 if (chk->data == NULL) {
10786 sctp_free_a_chunk(stcb, chk);
10789 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10790 SCTP_BUF_LEN(chk->data) = chk->send_size;
10791 chk->sent = SCTP_DATAGRAM_UNSENT;
10792 chk->snd_count = 0;
10794 atomic_add_int(&chk->whoTo->ref_count, 1);
10795 stcb->asoc.ecn_echo_cnt_onq++;
10796 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
10797 ecne->ch.chunk_type = SCTP_ECN_ECHO;
10798 ecne->ch.chunk_flags = 0;
10799 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
10800 ecne->tsn = htonl(high_tsn);
10801 ecne->num_pkts_since_cwr = htonl(1);
10802 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
10803 asoc->ctrl_queue_cnt++;
10807 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
10808 struct mbuf *m, int iphlen, int bad_crc)
10810 struct sctp_association *asoc;
10811 struct sctp_pktdrop_chunk *drp;
10812 struct sctp_tmit_chunk *chk;
10819 struct ip6_hdr *ip6h;
10822 int fullsz = 0, extra = 0;
10825 struct sctp_chunkhdr *ch, chunk_buf;
10826 unsigned int chk_length;
10831 asoc = &stcb->asoc;
10832 SCTP_TCB_LOCK_ASSERT(stcb);
10833 if (asoc->peer_supports_pktdrop == 0) {
10835 * peer must declare support before I send one.
10839 if (stcb->sctp_socket == NULL) {
10842 sctp_alloc_a_chunk(stcb, chk);
10846 chk->copy_by_ref = 0;
10847 iph = mtod(m, struct ip *);
10849 sctp_free_a_chunk(stcb, chk);
10852 switch (iph->ip_v) {
10855 len = chk->send_size = iph->ip_len;
10858 case IPV6_VERSION >> 4:
10860 ip6h = mtod(m, struct ip6_hdr *);
10861 len = chk->send_size = htons(ip6h->ip6_plen);
10867 /* Validate that we do not have an ABORT in here. */
10868 offset = iphlen + sizeof(struct sctphdr);
10869 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
10870 sizeof(*ch), (uint8_t *) & chunk_buf);
10871 while (ch != NULL) {
10872 chk_length = ntohs(ch->chunk_length);
10873 if (chk_length < sizeof(*ch)) {
10874 /* break to abort land */
10877 switch (ch->chunk_type) {
10878 case SCTP_PACKET_DROPPED:
10879 case SCTP_ABORT_ASSOCIATION:
10880 case SCTP_INITIATION_ACK:
10882 * We don't respond with an PKT-DROP to an ABORT
10883 * or PKT-DROP. We also do not respond to an
10884 * INIT-ACK, because we can't know if the initiation
10885 * tag is correct or not.
10887 sctp_free_a_chunk(stcb, chk);
10892 offset += SCTP_SIZE32(chk_length);
10893 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
10894 sizeof(*ch), (uint8_t *) & chunk_buf);
10897 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
10898 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
10900 * only send 1 mtu worth, trim off the excess on the end.
10902 fullsz = len - extra;
10903 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
10906 chk->asoc = &stcb->asoc;
10907 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10908 if (chk->data == NULL) {
10910 sctp_free_a_chunk(stcb, chk);
10913 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10914 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
10916 sctp_m_freem(chk->data);
10920 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
10921 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
10922 chk->book_size_scale = 0;
10924 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
10925 drp->trunc_len = htons(fullsz);
10927 * Len is already adjusted to size minus overhead above take
10928 * out the pkt_drop chunk itself from it.
10930 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
10931 len = chk->send_size;
10933 /* no truncation needed */
10934 drp->ch.chunk_flags = 0;
10935 drp->trunc_len = htons(0);
10938 drp->ch.chunk_flags |= SCTP_BADCRC;
10940 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
10941 SCTP_BUF_LEN(chk->data) = chk->send_size;
10942 chk->sent = SCTP_DATAGRAM_UNSENT;
10943 chk->snd_count = 0;
10945 /* we should hit here */
10948 chk->whoTo = asoc->primary_destination;
10950 atomic_add_int(&chk->whoTo->ref_count, 1);
10951 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
10952 chk->rec.chunk_id.can_take_data = 1;
10953 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
10954 drp->ch.chunk_length = htons(chk->send_size);
10955 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
10959 drp->bottle_bw = htonl(spc);
10960 if (asoc->my_rwnd) {
10961 drp->current_onq = htonl(asoc->size_on_reasm_queue +
10962 asoc->size_on_all_streams +
10963 asoc->my_rwnd_control_len +
10964 stcb->sctp_socket->so_rcv.sb_cc);
10967 * If my rwnd is 0, possibly from mbuf depletion as well as
10968 * space used, tell the peer there is NO space aka onq == bw
10970 drp->current_onq = htonl(spc);
10974 m_copydata(m, iphlen, len, (caddr_t)datap);
10975 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10976 asoc->ctrl_queue_cnt++;
10980 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
10982 struct sctp_association *asoc;
10983 struct sctp_cwr_chunk *cwr;
10984 struct sctp_tmit_chunk *chk;
10986 asoc = &stcb->asoc;
10987 SCTP_TCB_LOCK_ASSERT(stcb);
10990 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10991 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
10993 * found a previous CWR queued to same destination
10994 * update it if needed
10998 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
10999 ctsn = ntohl(cwr->tsn);
11000 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11001 cwr->tsn = htonl(high_tsn);
11003 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11004 /* Make sure override is carried */
11005 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11010 sctp_alloc_a_chunk(stcb, chk);
11014 chk->copy_by_ref = 0;
11015 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11016 chk->rec.chunk_id.can_take_data = 1;
11017 chk->asoc = &stcb->asoc;
11018 chk->send_size = sizeof(struct sctp_cwr_chunk);
11019 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11020 if (chk->data == NULL) {
11021 sctp_free_a_chunk(stcb, chk);
11024 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11025 SCTP_BUF_LEN(chk->data) = chk->send_size;
11026 chk->sent = SCTP_DATAGRAM_UNSENT;
11027 chk->snd_count = 0;
11029 atomic_add_int(&chk->whoTo->ref_count, 1);
11030 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11031 cwr->ch.chunk_type = SCTP_ECN_CWR;
11032 cwr->ch.chunk_flags = override;
11033 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11034 cwr->tsn = htonl(high_tsn);
11035 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11036 asoc->ctrl_queue_cnt++;
11040 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11041 int number_entries, uint16_t * list,
11042 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11044 int len, old_len, i;
11045 struct sctp_stream_reset_out_request *req_out;
11046 struct sctp_chunkhdr *ch;
11048 ch = mtod(chk->data, struct sctp_chunkhdr *);
11051 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11053 /* get to new offset for the param. */
11054 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11055 /* now how long will this param be? */
11056 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11057 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11058 req_out->ph.param_length = htons(len);
11059 req_out->request_seq = htonl(seq);
11060 req_out->response_seq = htonl(resp_seq);
11061 req_out->send_reset_at_tsn = htonl(last_sent);
11062 if (number_entries) {
11063 for (i = 0; i < number_entries; i++) {
11064 req_out->list_of_streams[i] = htons(list[i]);
11067 if (SCTP_SIZE32(len) > len) {
11069 * Need to worry about the pad we may end up adding to the
11070 * end. This is easy since the struct is either aligned to 4
11071 * bytes or 2 bytes off.
11073 req_out->list_of_streams[number_entries] = 0;
11075 /* now fix the chunk length */
11076 ch->chunk_length = htons(len + old_len);
11077 chk->book_size = len + old_len;
11078 chk->book_size_scale = 0;
11079 chk->send_size = SCTP_SIZE32(chk->book_size);
11080 SCTP_BUF_LEN(chk->data) = chk->send_size;
11086 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11087 int number_entries, uint16_t * list,
11090 int len, old_len, i;
11091 struct sctp_stream_reset_in_request *req_in;
11092 struct sctp_chunkhdr *ch;
11094 ch = mtod(chk->data, struct sctp_chunkhdr *);
11097 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11099 /* get to new offset for the param. */
11100 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11101 /* now how long will this param be? */
11102 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11103 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11104 req_in->ph.param_length = htons(len);
11105 req_in->request_seq = htonl(seq);
11106 if (number_entries) {
11107 for (i = 0; i < number_entries; i++) {
11108 req_in->list_of_streams[i] = htons(list[i]);
11111 if (SCTP_SIZE32(len) > len) {
11113 * Need to worry about the pad we may end up adding to the
11114 * end. This is easy since the struct is either aligned to 4
11115 * bytes or 2 bytes off.
11117 req_in->list_of_streams[number_entries] = 0;
11119 /* now fix the chunk length */
11120 ch->chunk_length = htons(len + old_len);
11121 chk->book_size = len + old_len;
11122 chk->book_size_scale = 0;
11123 chk->send_size = SCTP_SIZE32(chk->book_size);
11124 SCTP_BUF_LEN(chk->data) = chk->send_size;
11130 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11134 struct sctp_stream_reset_tsn_request *req_tsn;
11135 struct sctp_chunkhdr *ch;
11137 ch = mtod(chk->data, struct sctp_chunkhdr *);
11140 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11142 /* get to new offset for the param. */
11143 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11144 /* now how long will this param be? */
11145 len = sizeof(struct sctp_stream_reset_tsn_request);
11146 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11147 req_tsn->ph.param_length = htons(len);
11148 req_tsn->request_seq = htonl(seq);
11150 /* now fix the chunk length */
11151 ch->chunk_length = htons(len + old_len);
11152 chk->send_size = len + old_len;
11153 chk->book_size = SCTP_SIZE32(chk->send_size);
11154 chk->book_size_scale = 0;
11155 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11160 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11161 uint32_t resp_seq, uint32_t result)
11164 struct sctp_stream_reset_response *resp;
11165 struct sctp_chunkhdr *ch;
11167 ch = mtod(chk->data, struct sctp_chunkhdr *);
11170 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11172 /* get to new offset for the param. */
11173 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11174 /* now how long will this param be? */
11175 len = sizeof(struct sctp_stream_reset_response);
11176 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11177 resp->ph.param_length = htons(len);
11178 resp->response_seq = htonl(resp_seq);
11179 resp->result = ntohl(result);
11181 /* now fix the chunk length */
11182 ch->chunk_length = htons(len + old_len);
11183 chk->book_size = len + old_len;
11184 chk->book_size_scale = 0;
11185 chk->send_size = SCTP_SIZE32(chk->book_size);
11186 SCTP_BUF_LEN(chk->data) = chk->send_size;
11193 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11194 uint32_t resp_seq, uint32_t result,
11195 uint32_t send_una, uint32_t recv_next)
11198 struct sctp_stream_reset_response_tsn *resp;
11199 struct sctp_chunkhdr *ch;
11201 ch = mtod(chk->data, struct sctp_chunkhdr *);
11204 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11206 /* get to new offset for the param. */
11207 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11208 /* now how long will this param be? */
11209 len = sizeof(struct sctp_stream_reset_response_tsn);
11210 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11211 resp->ph.param_length = htons(len);
11212 resp->response_seq = htonl(resp_seq);
11213 resp->result = htonl(result);
11214 resp->senders_next_tsn = htonl(send_una);
11215 resp->receivers_next_tsn = htonl(recv_next);
11217 /* now fix the chunk length */
11218 ch->chunk_length = htons(len + old_len);
11219 chk->book_size = len + old_len;
11220 chk->send_size = SCTP_SIZE32(chk->book_size);
11221 chk->book_size_scale = 0;
11222 SCTP_BUF_LEN(chk->data) = chk->send_size;
11227 sctp_add_a_stream(struct sctp_tmit_chunk *chk,
11232 struct sctp_chunkhdr *ch;
11233 struct sctp_stream_reset_add_strm *addstr;
11235 ch = mtod(chk->data, struct sctp_chunkhdr *);
11236 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11238 /* get to new offset for the param. */
11239 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11240 /* now how long will this param be? */
11241 len = sizeof(struct sctp_stream_reset_add_strm);
11244 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
11245 addstr->ph.param_length = htons(len);
11246 addstr->request_seq = htonl(seq);
11247 addstr->number_of_streams = htons(adding);
11248 addstr->reserved = 0;
11250 /* now fix the chunk length */
11251 ch->chunk_length = htons(len + old_len);
11252 chk->send_size = len + old_len;
11253 chk->book_size = SCTP_SIZE32(chk->send_size);
11254 chk->book_size_scale = 0;
11255 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11260 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11261 int number_entries, uint16_t * list,
11262 uint8_t send_out_req,
11264 uint8_t send_in_req,
11265 uint8_t send_tsn_req,
11266 uint8_t add_stream,
11271 struct sctp_association *asoc;
11272 struct sctp_tmit_chunk *chk;
11273 struct sctp_chunkhdr *ch;
11276 asoc = &stcb->asoc;
11277 if (asoc->stream_reset_outstanding) {
11279 * Already one pending, must get ACK back to clear the flag.
11281 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11284 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11285 (add_stream == 0)) {
11286 /* nothing to do */
11287 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11290 if (send_tsn_req && (send_out_req || send_in_req)) {
11291 /* error, can't do that */
11292 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11295 sctp_alloc_a_chunk(stcb, chk);
11297 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11300 chk->copy_by_ref = 0;
11301 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11302 chk->rec.chunk_id.can_take_data = 0;
11303 chk->asoc = &stcb->asoc;
11304 chk->book_size = sizeof(struct sctp_chunkhdr);
11305 chk->send_size = SCTP_SIZE32(chk->book_size);
11306 chk->book_size_scale = 0;
11308 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11309 if (chk->data == NULL) {
11310 sctp_free_a_chunk(stcb, chk);
11311 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11314 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11316 /* setup chunk parameters */
11317 chk->sent = SCTP_DATAGRAM_UNSENT;
11318 chk->snd_count = 0;
11319 chk->whoTo = asoc->primary_destination;
11320 atomic_add_int(&chk->whoTo->ref_count, 1);
11322 ch = mtod(chk->data, struct sctp_chunkhdr *);
11323 ch->chunk_type = SCTP_STREAM_RESET;
11324 ch->chunk_flags = 0;
11325 ch->chunk_length = htons(chk->book_size);
11326 SCTP_BUF_LEN(chk->data) = chk->send_size;
11328 seq = stcb->asoc.str_reset_seq_out;
11329 if (send_out_req) {
11330 sctp_add_stream_reset_out(chk, number_entries, list,
11331 seq, resp_seq, (stcb->asoc.sending_seq - 1));
11332 asoc->stream_reset_out_is_outstanding = 1;
11334 asoc->stream_reset_outstanding++;
11337 sctp_add_a_stream(chk, seq, adding);
11339 asoc->stream_reset_outstanding++;
11342 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11343 asoc->stream_reset_outstanding++;
11345 if (send_tsn_req) {
11346 sctp_add_stream_reset_tsn(chk, seq);
11347 asoc->stream_reset_outstanding++;
11349 asoc->str_reset = chk;
11351 /* insert the chunk for sending */
11352 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11355 asoc->ctrl_queue_cnt++;
11356 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11361 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11362 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11365 * Formulate the abort message, and send it back down.
11367 struct mbuf *o_pak;
11369 struct sctp_abort_msg *abm;
11370 struct ip *iph, *iph_out;
11371 struct udphdr *udp;
11374 struct ip6_hdr *ip6, *ip6_out;
11377 int iphlen_out, len;
11379 /* don't respond to ABORT with ABORT */
11380 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11382 sctp_m_freem(err_cause);
11385 iph = mtod(m, struct ip *);
11386 switch (iph->ip_v) {
11388 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11391 case IPV6_VERSION >> 4:
11392 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11397 sctp_m_freem(err_cause);
11402 len += sizeof(struct udphdr);
11404 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11405 if (mout == NULL) {
11407 sctp_m_freem(err_cause);
11411 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11412 SCTP_BUF_LEN(mout) = len;
11413 SCTP_BUF_NEXT(mout) = err_cause;
11414 if (m->m_flags & M_FLOWID) {
11415 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
11416 mout->m_flags |= M_FLOWID;
11422 switch (iph->ip_v) {
11424 iph_out = mtod(mout, struct ip *);
11426 /* Fill in the IP header for the ABORT */
11427 iph_out->ip_v = IPVERSION;
11428 iph_out->ip_hl = (sizeof(struct ip) / 4);
11429 iph_out->ip_tos = (u_char)0;
11430 iph_out->ip_id = 0;
11431 iph_out->ip_off = 0;
11432 iph_out->ip_ttl = MAXTTL;
11434 iph_out->ip_p = IPPROTO_UDP;
11436 iph_out->ip_p = IPPROTO_SCTP;
11438 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11439 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11440 /* let IP layer calculate this */
11441 iph_out->ip_sum = 0;
11443 iphlen_out = sizeof(*iph_out);
11444 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
11447 case IPV6_VERSION >> 4:
11448 ip6 = (struct ip6_hdr *)iph;
11449 ip6_out = mtod(mout, struct ip6_hdr *);
11451 /* Fill in the IP6 header for the ABORT */
11452 ip6_out->ip6_flow = ip6->ip6_flow;
11453 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11455 ip6_out->ip6_nxt = IPPROTO_UDP;
11457 ip6_out->ip6_nxt = IPPROTO_SCTP;
11459 ip6_out->ip6_src = ip6->ip6_dst;
11460 ip6_out->ip6_dst = ip6->ip6_src;
11462 iphlen_out = sizeof(*ip6_out);
11463 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
11467 /* Currently not supported */
11468 sctp_m_freem(mout);
11472 udp = (struct udphdr *)abm;
11474 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11475 udp->uh_dport = port;
11476 /* set udp->uh_ulen later */
11478 iphlen_out += sizeof(struct udphdr);
11479 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
11481 abm->sh.src_port = sh->dest_port;
11482 abm->sh.dest_port = sh->src_port;
11483 abm->sh.checksum = 0;
11485 abm->sh.v_tag = sh->v_tag;
11486 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
11488 abm->sh.v_tag = htonl(vtag);
11489 abm->msg.ch.chunk_flags = 0;
11491 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11494 struct mbuf *m_tmp = err_cause;
11497 /* get length of the err_cause chain */
11498 while (m_tmp != NULL) {
11499 err_len += SCTP_BUF_LEN(m_tmp);
11500 m_tmp = SCTP_BUF_NEXT(m_tmp);
11502 len = SCTP_BUF_LEN(mout) + err_len;
11504 /* need pad at end of chunk */
11505 uint32_t cpthis = 0;
11508 padlen = 4 - (len % 4);
11509 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11512 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
11514 len = SCTP_BUF_LEN(mout);
11515 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
11518 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11520 sctp_m_freem(mout);
11523 if (iph_out != NULL) {
11527 /* zap the stack pointer to the route */
11528 bzero(&ro, sizeof ro);
11530 udp->uh_ulen = htons(len - sizeof(struct ip));
11531 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11533 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
11534 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
11535 /* set IPv4 length */
11536 iph_out->ip_len = len;
11538 #ifdef SCTP_PACKET_LOGGING
11539 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11540 sctp_packet_log(mout, len);
11542 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11544 #if defined(SCTP_WITH_NO_CSUM)
11545 SCTP_STAT_INCR(sctps_sendnocrc);
11547 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
11548 SCTP_STAT_INCR(sctps_sendswcrc);
11550 SCTP_ENABLE_UDP_CSUM(o_pak);
11552 #if defined(SCTP_WITH_NO_CSUM)
11553 SCTP_STAT_INCR(sctps_sendnocrc);
11555 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11556 mout->m_pkthdr.csum_data = 0;
11557 SCTP_STAT_INCR(sctps_sendhwcrc);
11560 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11562 /* Free the route if we got one back */
11567 if (ip6_out != NULL) {
11568 struct route_in6 ro;
11570 struct ifnet *ifp = NULL;
11572 /* zap the stack pointer to the route */
11573 bzero(&ro, sizeof(ro));
11575 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11577 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
11578 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
11579 ip6_out->ip6_plen = len - sizeof(*ip6_out);
11580 #ifdef SCTP_PACKET_LOGGING
11581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11582 sctp_packet_log(mout, len);
11584 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11586 #if defined(SCTP_WITH_NO_CSUM)
11587 SCTP_STAT_INCR(sctps_sendnocrc);
11589 abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11590 SCTP_STAT_INCR(sctps_sendswcrc);
11592 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11593 udp->uh_sum = 0xffff;
11596 #if defined(SCTP_WITH_NO_CSUM)
11597 SCTP_STAT_INCR(sctps_sendnocrc);
11599 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11600 mout->m_pkthdr.csum_data = 0;
11601 SCTP_STAT_INCR(sctps_sendhwcrc);
11604 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
11606 /* Free the route if we got one back */
11611 SCTP_STAT_INCR(sctps_sendpackets);
11612 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11613 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11617 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
11618 uint32_t vrf_id, uint16_t port)
11620 struct mbuf *o_pak;
11621 struct sctphdr *sh, *sh_out;
11622 struct sctp_chunkhdr *ch;
11623 struct ip *iph, *iph_out;
11624 struct udphdr *udp = NULL;
11628 struct ip6_hdr *ip6, *ip6_out;
11631 int iphlen_out, len;
11633 iph = mtod(m, struct ip *);
11634 sh = (struct sctphdr *)((caddr_t)iph + iphlen);
11635 switch (iph->ip_v) {
11637 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
11640 case IPV6_VERSION >> 4:
11641 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
11651 len += sizeof(struct udphdr);
11653 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11654 if (mout == NULL) {
11660 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11661 SCTP_BUF_LEN(mout) = len;
11662 SCTP_BUF_NEXT(mout) = scm;
11663 if (m->m_flags & M_FLOWID) {
11664 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
11665 mout->m_flags |= M_FLOWID;
11671 switch (iph->ip_v) {
11673 iph_out = mtod(mout, struct ip *);
11675 /* Fill in the IP header for the ABORT */
11676 iph_out->ip_v = IPVERSION;
11677 iph_out->ip_hl = (sizeof(struct ip) / 4);
11678 iph_out->ip_tos = (u_char)0;
11679 iph_out->ip_id = 0;
11680 iph_out->ip_off = 0;
11681 iph_out->ip_ttl = MAXTTL;
11683 iph_out->ip_p = IPPROTO_UDP;
11685 iph_out->ip_p = IPPROTO_SCTP;
11687 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11688 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11689 /* let IP layer calculate this */
11690 iph_out->ip_sum = 0;
11692 iphlen_out = sizeof(struct ip);
11693 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
11696 case IPV6_VERSION >> 4:
11697 ip6 = (struct ip6_hdr *)iph;
11698 ip6_out = mtod(mout, struct ip6_hdr *);
11700 /* Fill in the IP6 header for the ABORT */
11701 ip6_out->ip6_flow = ip6->ip6_flow;
11702 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11704 ip6_out->ip6_nxt = IPPROTO_UDP;
11706 ip6_out->ip6_nxt = IPPROTO_SCTP;
11708 ip6_out->ip6_src = ip6->ip6_dst;
11709 ip6_out->ip6_dst = ip6->ip6_src;
11711 iphlen_out = sizeof(struct ip6_hdr);
11712 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
11716 /* Currently not supported */
11717 sctp_m_freem(mout);
11721 udp = (struct udphdr *)sh_out;
11723 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11724 udp->uh_dport = port;
11725 /* set udp->uh_ulen later */
11727 iphlen_out += sizeof(struct udphdr);
11728 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
11730 sh_out->src_port = sh->dest_port;
11731 sh_out->dest_port = sh->src_port;
11732 sh_out->v_tag = vtag;
11733 sh_out->checksum = 0;
11735 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
11736 ch->chunk_type = SCTP_OPERATION_ERROR;
11737 ch->chunk_flags = 0;
11740 struct mbuf *m_tmp = scm;
11743 /* get length of the err_cause chain */
11744 while (m_tmp != NULL) {
11745 cause_len += SCTP_BUF_LEN(m_tmp);
11746 m_tmp = SCTP_BUF_NEXT(m_tmp);
11748 len = SCTP_BUF_LEN(mout) + cause_len;
11749 if (cause_len % 4) {
11750 /* need pad at end of chunk */
11751 uint32_t cpthis = 0;
11754 padlen = 4 - (len % 4);
11755 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11758 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11760 len = SCTP_BUF_LEN(mout);
11761 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
11764 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11766 sctp_m_freem(mout);
11769 if (iph_out != NULL) {
11773 /* zap the stack pointer to the route */
11774 bzero(&ro, sizeof ro);
11776 udp->uh_ulen = htons(len - sizeof(struct ip));
11777 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11779 /* set IPv4 length */
11780 iph_out->ip_len = len;
11782 #ifdef SCTP_PACKET_LOGGING
11783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11784 sctp_packet_log(mout, len);
11786 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11788 #if defined(SCTP_WITH_NO_CSUM)
11789 SCTP_STAT_INCR(sctps_sendnocrc);
11791 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
11792 SCTP_STAT_INCR(sctps_sendswcrc);
11794 SCTP_ENABLE_UDP_CSUM(o_pak);
11796 #if defined(SCTP_WITH_NO_CSUM)
11797 SCTP_STAT_INCR(sctps_sendnocrc);
11799 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11800 mout->m_pkthdr.csum_data = 0;
11801 SCTP_STAT_INCR(sctps_sendhwcrc);
11804 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11806 /* Free the route if we got one back */
11811 if (ip6_out != NULL) {
11812 struct route_in6 ro;
11814 struct ifnet *ifp = NULL;
11816 /* zap the stack pointer to the route */
11817 bzero(&ro, sizeof(ro));
11819 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11821 ip6_out->ip6_plen = len - sizeof(*ip6_out);
11822 #ifdef SCTP_PACKET_LOGGING
11823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11824 sctp_packet_log(mout, len);
11826 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11828 #if defined(SCTP_WITH_NO_CSUM)
11829 SCTP_STAT_INCR(sctps_sendnocrc);
11831 sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11832 SCTP_STAT_INCR(sctps_sendswcrc);
11834 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11835 udp->uh_sum = 0xffff;
11838 #if defined(SCTP_WITH_NO_CSUM)
11839 SCTP_STAT_INCR(sctps_sendnocrc);
11841 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11842 mout->m_pkthdr.csum_data = 0;
11843 SCTP_STAT_INCR(sctps_sendhwcrc);
11846 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
11848 /* Free the route if we got one back */
11853 SCTP_STAT_INCR(sctps_sendpackets);
11854 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11855 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11858 static struct mbuf *
11859 sctp_copy_resume(struct sctp_stream_queue_pending *sp,
11861 struct sctp_sndrcvinfo *srcv,
11863 int user_marks_eor,
11866 struct mbuf **new_tail)
11870 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
11871 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
11873 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11876 *sndout = m_length(m, NULL);
11877 *new_tail = m_last(m);
11883 sctp_copy_one(struct sctp_stream_queue_pending *sp,
11890 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
11892 if (sp->data == NULL) {
11893 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11896 sp->tail_mbuf = m_last(sp->data);
11902 static struct sctp_stream_queue_pending *
11903 sctp_copy_it_in(struct sctp_tcb *stcb,
11904 struct sctp_association *asoc,
11905 struct sctp_sndrcvinfo *srcv,
11907 struct sctp_nets *net,
11909 int user_marks_eor,
11914 * This routine must be very careful in its work. Protocol
11915 * processing is up and running so care must be taken to spl...()
11916 * when you need to do something that may effect the stcb/asoc. The
11917 * sb is locked however. When data is copied the protocol processing
11918 * should be enabled since this is a slower operation...
11920 struct sctp_stream_queue_pending *sp = NULL;
11924 /* Now can we send this? */
11925 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
11926 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
11927 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
11928 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
11929 /* got data while shutting down */
11930 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
11931 *error = ECONNRESET;
11934 sctp_alloc_a_strmoq(stcb, sp);
11936 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11941 sp->sender_all_done = 0;
11942 sp->sinfo_flags = srcv->sinfo_flags;
11943 sp->timetolive = srcv->sinfo_timetolive;
11944 sp->ppid = srcv->sinfo_ppid;
11945 sp->context = srcv->sinfo_context;
11947 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
11949 sp->stream = srcv->sinfo_stream;
11950 sp->length = min(uio->uio_resid, max_send_len);
11951 if ((sp->length == (uint32_t) uio->uio_resid) &&
11952 ((user_marks_eor == 0) ||
11953 (srcv->sinfo_flags & SCTP_EOF) ||
11954 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
11955 sp->msg_is_complete = 1;
11957 sp->msg_is_complete = 0;
11959 sp->sender_all_done = 0;
11960 sp->some_taken = 0;
11961 sp->put_last_out = 0;
11962 resv_in_first = sizeof(struct sctp_data_chunk);
11963 sp->data = sp->tail_mbuf = NULL;
11964 if (sp->length == 0) {
11968 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
11969 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
11970 sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid);
11971 sp->holds_key_ref = 1;
11973 *error = sctp_copy_one(sp, uio, resv_in_first);
11976 sctp_free_a_strmoq(stcb, sp);
11979 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
11981 atomic_add_int(&sp->net->ref_count, 1);
11985 sctp_set_prsctp_policy(sp);
11993 sctp_sosend(struct socket *so,
11994 struct sockaddr *addr,
11997 struct mbuf *control,
12002 int error, use_rcvinfo = 0;
12003 struct sctp_sndrcvinfo srcv;
12004 struct sockaddr *addr_to_use;
12006 #if defined(INET) && defined(INET6)
12007 struct sockaddr_in sin;
12012 /* process cmsg snd/rcv info (maybe a assoc-id) */
12013 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
12019 addr_to_use = addr;
12020 #if defined(INET) && defined(INET6)
12021 if ((addr) && (addr->sa_family == AF_INET6)) {
12022 struct sockaddr_in6 *sin6;
12024 sin6 = (struct sockaddr_in6 *)addr;
12025 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12026 in6_sin6_2_sin(&sin, sin6);
12027 addr_to_use = (struct sockaddr *)&sin;
12031 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12034 use_rcvinfo ? &srcv : NULL
12042 sctp_lower_sosend(struct socket *so,
12043 struct sockaddr *addr,
12045 struct mbuf *i_pak,
12046 struct mbuf *control,
12048 struct sctp_sndrcvinfo *srcv
12053 unsigned int sndlen = 0, max_len;
12055 struct mbuf *top = NULL;
12056 int queue_only = 0, queue_only_for_init = 0;
12057 int free_cnt_applied = 0;
12059 int now_filled = 0;
12060 unsigned int inqueue_bytes = 0;
12061 struct sctp_block_entry be;
12062 struct sctp_inpcb *inp;
12063 struct sctp_tcb *stcb = NULL;
12064 struct timeval now;
12065 struct sctp_nets *net;
12066 struct sctp_association *asoc;
12067 struct sctp_inpcb *t_inp;
12068 int user_marks_eor;
12069 int create_lock_applied = 0;
12070 int nagle_applies = 0;
12071 int some_on_control = 0;
12072 int got_all_of_the_send = 0;
12073 int hold_tcblock = 0;
12074 int non_blocking = 0;
12075 uint32_t local_add_more, local_soresv = 0;
12077 uint16_t sinfo_flags;
12078 sctp_assoc_t sinfo_assoc_id;
12085 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12087 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12090 SCTP_RELEASE_PKT(i_pak);
12094 if ((uio == NULL) && (i_pak == NULL)) {
12095 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12098 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12099 atomic_add_int(&inp->total_sends, 1);
12101 if (uio->uio_resid < 0) {
12102 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12105 sndlen = uio->uio_resid;
12107 top = SCTP_HEADER_TO_CHAIN(i_pak);
12108 sndlen = SCTP_HEADER_LEN(i_pak);
12110 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12113 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12114 (inp->sctp_socket->so_qlimit)) {
12115 /* The listener can NOT send */
12116 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12121 * Pre-screen address, if one is given the sin-len
12122 * must be set correctly!
12125 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12127 switch (raddr->sa.sa_family) {
12130 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12131 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12135 port = raddr->sin.sin_port;
12140 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12141 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12145 port = raddr->sin6.sin6_port;
12149 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12150 error = EAFNOSUPPORT;
12157 sinfo_flags = srcv->sinfo_flags;
12158 sinfo_assoc_id = srcv->sinfo_assoc_id;
12159 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12160 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12161 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12165 if (srcv->sinfo_flags)
12166 SCTP_STAT_INCR(sctps_sends_with_flags);
12168 sinfo_flags = inp->def_send.sinfo_flags;
12169 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12171 if (sinfo_flags & SCTP_SENDALL) {
12172 /* its a sendall */
12173 error = sctp_sendall(inp, uio, top, srcv);
12177 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12178 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12182 /* now we must find the assoc */
12183 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12184 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12185 SCTP_INP_RLOCK(inp);
12186 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12187 if (stcb == NULL) {
12188 SCTP_INP_RUNLOCK(inp);
12189 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12193 SCTP_TCB_LOCK(stcb);
12195 SCTP_INP_RUNLOCK(inp);
12196 } else if (sinfo_assoc_id) {
12197 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12200 * Since we did not use findep we must
12201 * increment it, and if we don't find a tcb
12204 SCTP_INP_WLOCK(inp);
12205 SCTP_INP_INCR_REF(inp);
12206 SCTP_INP_WUNLOCK(inp);
12207 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12208 if (stcb == NULL) {
12209 SCTP_INP_WLOCK(inp);
12210 SCTP_INP_DECR_REF(inp);
12211 SCTP_INP_WUNLOCK(inp);
12216 if ((stcb == NULL) && (addr)) {
12217 /* Possible implicit send? */
12218 SCTP_ASOC_CREATE_LOCK(inp);
12219 create_lock_applied = 1;
12220 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12221 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12222 /* Should I really unlock ? */
12223 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12228 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12229 (addr->sa_family == AF_INET6)) {
12230 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12234 SCTP_INP_WLOCK(inp);
12235 SCTP_INP_INCR_REF(inp);
12236 SCTP_INP_WUNLOCK(inp);
12237 /* With the lock applied look again */
12238 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12239 if (stcb == NULL) {
12240 SCTP_INP_WLOCK(inp);
12241 SCTP_INP_DECR_REF(inp);
12242 SCTP_INP_WUNLOCK(inp);
12246 if (t_inp != inp) {
12247 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12252 if (stcb == NULL) {
12253 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
12254 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12255 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12259 if (addr == NULL) {
12260 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12265 * UDP style, we must go ahead and start the INIT
12270 if ((sinfo_flags & SCTP_ABORT) ||
12271 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12273 * User asks to abort a non-existant assoc,
12274 * or EOF a non-existant assoc with no data
12276 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12280 /* get an asoc/stcb struct */
12281 vrf_id = inp->def_vrf_id;
12283 if (create_lock_applied == 0) {
12284 panic("Error, should hold create lock and I don't?");
12287 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12290 if (stcb == NULL) {
12291 /* Error is setup for us in the call */
12294 if (create_lock_applied) {
12295 SCTP_ASOC_CREATE_UNLOCK(inp);
12296 create_lock_applied = 0;
12298 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12301 * Turn on queue only flag to prevent data from
12305 asoc = &stcb->asoc;
12306 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12307 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12309 /* initialize authentication params for the assoc */
12310 sctp_initialize_auth_params(inp, stcb);
12314 * see if a init structure exists in cmsg
12317 struct sctp_initmsg initm;
12320 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
12323 * we have an INIT override of the
12326 if (initm.sinit_max_attempts)
12327 asoc->max_init_times = initm.sinit_max_attempts;
12328 if (initm.sinit_num_ostreams)
12329 asoc->pre_open_streams = initm.sinit_num_ostreams;
12330 if (initm.sinit_max_instreams)
12331 asoc->max_inbound_streams = initm.sinit_max_instreams;
12332 if (initm.sinit_max_init_timeo)
12333 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
12334 if (asoc->streamoutcnt < asoc->pre_open_streams) {
12335 struct sctp_stream_out *tmp_str;
12338 /* Default is NOT correct */
12339 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
12340 asoc->streamoutcnt, asoc->pre_open_streams);
12342 * What happens if this
12343 * fails? we panic ...
12346 if (hold_tcblock) {
12348 SCTP_TCB_UNLOCK(stcb);
12350 SCTP_MALLOC(tmp_str,
12351 struct sctp_stream_out *,
12352 (asoc->pre_open_streams *
12353 sizeof(struct sctp_stream_out)),
12356 SCTP_TCB_LOCK(stcb);
12358 if (tmp_str != NULL) {
12359 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
12360 asoc->strmout = tmp_str;
12361 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
12363 asoc->pre_open_streams = asoc->streamoutcnt;
12365 for (i = 0; i < asoc->streamoutcnt; i++) {
12367 * inbound side must be set
12368 * to 0xffff, also NOTE when
12369 * we get the INIT-ACK back
12370 * (for INIT sender) we MUST
12372 * (streamoutcnt) but first
12373 * check if we sent to any
12374 * of the upper streams that
12375 * were dropped (if some
12376 * were). Those that were
12377 * dropped must be notified
12378 * to the upper layer as
12381 asoc->strmout[i].next_sequence_sent = 0x0;
12382 TAILQ_INIT(&asoc->strmout[i].outqueue);
12383 asoc->strmout[i].stream_no = i;
12384 asoc->strmout[i].last_msg_incomplete = 0;
12385 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
12391 /* out with the INIT */
12392 queue_only_for_init = 1;
12394 * we may want to dig in after this call and adjust the MTU
12395 * value. It defaulted to 1500 (constant) but the ro
12396 * structure may now have an update and thus we may need to
12397 * change it BEFORE we append the message.
12401 asoc = &stcb->asoc;
12403 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12404 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12406 net = sctp_findnet(stcb, addr);
12409 if ((net == NULL) ||
12410 ((port != 0) && (port != stcb->rport))) {
12411 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12416 net = stcb->asoc.primary_destination;
12418 atomic_add_int(&stcb->total_sends, 1);
12419 /* Keep the stcb from being freed under our feet */
12420 atomic_add_int(&asoc->refcnt, 1);
12421 free_cnt_applied = 1;
12423 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12424 if (sndlen > asoc->smallest_mtu) {
12425 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12430 if ((SCTP_SO_IS_NBIO(so)
12431 || (flags & MSG_NBIO)
12435 /* would we block? */
12436 if (non_blocking) {
12437 if (hold_tcblock == 0) {
12438 SCTP_TCB_LOCK(stcb);
12441 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12442 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12443 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12444 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12445 if (sndlen > SCTP_SB_LIMIT_SND(so))
12448 error = EWOULDBLOCK;
12451 stcb->asoc.sb_send_resv += sndlen;
12452 SCTP_TCB_UNLOCK(stcb);
12455 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12457 local_soresv = sndlen;
12458 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12459 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12460 error = ECONNRESET;
12463 if (create_lock_applied) {
12464 SCTP_ASOC_CREATE_UNLOCK(inp);
12465 create_lock_applied = 0;
12467 if (asoc->stream_reset_outstanding) {
12469 * Can't queue any data while stream reset is underway.
12471 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12475 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12476 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12479 /* we are now done with all control */
12481 sctp_m_freem(control);
12484 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12485 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12486 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12487 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12488 if (srcv->sinfo_flags & SCTP_ABORT) {
12491 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12492 error = ECONNRESET;
12496 /* Ok, we will attempt a msgsnd :> */
12498 p->td_ru.ru_msgsnd++;
12500 /* Are we aborting? */
12501 if (srcv->sinfo_flags & SCTP_ABORT) {
12503 int tot_demand, tot_out = 0, max_out;
12505 SCTP_STAT_INCR(sctps_sends_with_abort);
12506 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12507 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12508 /* It has to be up before we abort */
12509 /* how big is the user initiated abort? */
12510 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12514 if (hold_tcblock) {
12515 SCTP_TCB_UNLOCK(stcb);
12519 struct mbuf *cntm = NULL;
12521 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
12525 tot_out += SCTP_BUF_LEN(cntm);
12526 cntm = SCTP_BUF_NEXT(cntm);
12529 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12531 /* Must fit in a MTU */
12533 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12534 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12536 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12540 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12543 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12547 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12548 max_out -= sizeof(struct sctp_abort_msg);
12549 if (tot_out > max_out) {
12553 struct sctp_paramhdr *ph;
12555 /* now move forward the data pointer */
12556 ph = mtod(mm, struct sctp_paramhdr *);
12557 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12558 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12560 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12562 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12565 * Here if we can't get his data we
12566 * still abort we just don't get to
12567 * send the users note :-0
12574 SCTP_BUF_NEXT(mm) = top;
12578 if (hold_tcblock == 0) {
12579 SCTP_TCB_LOCK(stcb);
12582 atomic_add_int(&stcb->asoc.refcnt, -1);
12583 free_cnt_applied = 0;
12584 /* release this lock, otherwise we hang on ourselves */
12585 sctp_abort_an_association(stcb->sctp_ep, stcb,
12586 SCTP_RESPONSE_TO_USER_REQ,
12587 mm, SCTP_SO_LOCKED);
12588 /* now relock the stcb so everything is sane */
12592 * In this case top is already chained to mm avoid double
12593 * free, since we free it below if top != NULL and driver
12594 * would free it after sending the packet out
12601 /* Calculate the maximum we can send */
12602 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12603 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12604 if (non_blocking) {
12605 /* we already checked for non-blocking above. */
12608 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12613 if (hold_tcblock) {
12614 SCTP_TCB_UNLOCK(stcb);
12617 /* Is the stream no. valid? */
12618 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12619 /* Invalid stream number */
12620 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12624 if (asoc->strmout == NULL) {
12625 /* huh? software error */
12626 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12630 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12631 if ((user_marks_eor == 0) &&
12632 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12633 /* It will NEVER fit */
12634 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12638 if ((uio == NULL) && user_marks_eor) {
12640 * We do not support eeor mode for
12641 * sending with mbuf chains (like sendfile).
12643 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12647 if (user_marks_eor) {
12648 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12651 * For non-eeor the whole message must fit in
12652 * the socket send buffer.
12654 local_add_more = sndlen;
12657 if (non_blocking) {
12658 goto skip_preblock;
12660 if (((max_len <= local_add_more) &&
12661 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12663 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12664 /* No room right now ! */
12665 SOCKBUF_LOCK(&so->so_snd);
12666 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12667 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12668 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12669 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12670 (unsigned int)SCTP_SB_LIMIT_SND(so),
12673 stcb->asoc.stream_queue_cnt,
12674 stcb->asoc.chunks_on_out_queue,
12675 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12676 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12677 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
12680 stcb->block_entry = &be;
12681 error = sbwait(&so->so_snd);
12682 stcb->block_entry = NULL;
12683 if (error || so->so_error || be.error) {
12686 error = so->so_error;
12691 SOCKBUF_UNLOCK(&so->so_snd);
12694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12695 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12696 so, asoc, stcb->asoc.total_output_queue_size);
12698 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12701 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12703 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12704 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12708 SOCKBUF_UNLOCK(&so->so_snd);
12711 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12715 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12716 * case NOTE: uio will be null when top/mbuf is passed
12719 if (srcv->sinfo_flags & SCTP_EOF) {
12720 got_all_of_the_send = 1;
12723 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12729 struct sctp_stream_queue_pending *sp;
12730 struct sctp_stream_out *strm;
12733 SCTP_TCB_SEND_LOCK(stcb);
12734 if ((asoc->stream_locked) &&
12735 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12736 SCTP_TCB_SEND_UNLOCK(stcb);
12737 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12741 SCTP_TCB_SEND_UNLOCK(stcb);
12743 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12744 if (strm->last_msg_incomplete == 0) {
12746 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
12747 if ((sp == NULL) || (error)) {
12750 SCTP_TCB_SEND_LOCK(stcb);
12751 if (sp->msg_is_complete) {
12752 strm->last_msg_incomplete = 0;
12753 asoc->stream_locked = 0;
12756 * Just got locked to this guy in case of an
12759 strm->last_msg_incomplete = 1;
12760 asoc->stream_locked = 1;
12761 asoc->stream_locked_on = srcv->sinfo_stream;
12762 sp->sender_all_done = 0;
12764 sctp_snd_sb_alloc(stcb, sp->length);
12765 atomic_add_int(&asoc->stream_queue_cnt, 1);
12766 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
12767 sp->strseq = strm->next_sequence_sent;
12768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
12769 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
12770 (uintptr_t) stcb, sp->length,
12771 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
12773 strm->next_sequence_sent++;
12775 SCTP_STAT_INCR(sctps_sends_with_unord);
12777 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12778 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
12779 SCTP_TCB_SEND_UNLOCK(stcb);
12781 SCTP_TCB_SEND_LOCK(stcb);
12782 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12783 SCTP_TCB_SEND_UNLOCK(stcb);
12785 /* ???? Huh ??? last msg is gone */
12787 panic("Warning: Last msg marked incomplete, yet nothing left?");
12789 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12790 strm->last_msg_incomplete = 0;
12796 while (uio->uio_resid > 0) {
12797 /* How much room do we have? */
12798 struct mbuf *new_tail, *mm;
12800 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12801 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12805 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12806 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12807 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
12810 if (hold_tcblock) {
12811 SCTP_TCB_UNLOCK(stcb);
12814 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
12815 if ((mm == NULL) || error) {
12821 /* Update the mbuf and count */
12822 SCTP_TCB_SEND_LOCK(stcb);
12823 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12825 * we need to get out. Peer probably
12829 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12830 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12831 error = ECONNRESET;
12833 SCTP_TCB_SEND_UNLOCK(stcb);
12836 if (sp->tail_mbuf) {
12837 /* tack it to the end */
12838 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12839 sp->tail_mbuf = new_tail;
12841 /* A stolen mbuf */
12843 sp->tail_mbuf = new_tail;
12845 sctp_snd_sb_alloc(stcb, sndout);
12846 atomic_add_int(&sp->length, sndout);
12849 /* Did we reach EOR? */
12850 if ((uio->uio_resid == 0) &&
12851 ((user_marks_eor == 0) ||
12852 (srcv->sinfo_flags & SCTP_EOF) ||
12853 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12854 sp->msg_is_complete = 1;
12856 sp->msg_is_complete = 0;
12858 SCTP_TCB_SEND_UNLOCK(stcb);
12860 if (uio->uio_resid == 0) {
12865 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12867 * This is ugly but we must assure locking
12870 if (hold_tcblock == 0) {
12871 SCTP_TCB_LOCK(stcb);
12874 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12875 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12876 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12877 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12883 SCTP_TCB_UNLOCK(stcb);
12886 /* wait for space now */
12887 if (non_blocking) {
12888 /* Non-blocking io in place out */
12891 /* What about the INIT, send it maybe */
12892 if (queue_only_for_init) {
12893 if (hold_tcblock == 0) {
12894 SCTP_TCB_LOCK(stcb);
12897 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
12898 /* a collision took us forward? */
12901 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
12902 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12906 if ((net->flight_size > net->cwnd) &&
12907 (asoc->sctp_cmt_on_off == 0)) {
12908 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12910 } else if (asoc->ifp_had_enobuf) {
12911 SCTP_STAT_INCR(sctps_ifnomemqueued);
12912 if (net->flight_size > (2 * net->mtu)) {
12915 asoc->ifp_had_enobuf = 0;
12917 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12918 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12919 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
12920 (stcb->asoc.total_flight > 0) &&
12921 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
12922 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
12925 * Ok, Nagle is set on and we have data outstanding.
12926 * Don't send anything and let SACKs drive out the
12927 * data unless wen have a "full" segment to send.
12929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12930 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
12932 SCTP_STAT_INCR(sctps_naglequeued);
12935 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12936 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
12937 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
12939 SCTP_STAT_INCR(sctps_naglesent);
12942 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12944 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
12945 nagle_applies, un_sent);
12946 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
12947 stcb->asoc.total_flight,
12948 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
12950 if (queue_only_for_init)
12951 queue_only_for_init = 0;
12952 if ((queue_only == 0) && (nagle_applies == 0)) {
12954 * need to start chunk output
12955 * before blocking.. note that if
12956 * a lock is already applied, then
12957 * the input via the net is happening
12958 * and I don't need to start output :-D
12960 if (hold_tcblock == 0) {
12961 if (SCTP_TCB_TRYLOCK(stcb)) {
12963 sctp_chunk_output(inp,
12965 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12968 sctp_chunk_output(inp,
12970 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12972 if (hold_tcblock == 1) {
12973 SCTP_TCB_UNLOCK(stcb);
12977 SOCKBUF_LOCK(&so->so_snd);
12979 * This is a bit strange, but I think it will
12980 * work. The total_output_queue_size is locked and
12981 * protected by the TCB_LOCK, which we just released.
12982 * There is a race that can occur between releasing it
12983 * above, and me getting the socket lock, where sacks
12984 * come in but we have not put the SB_WAIT on the
12985 * so_snd buffer to get the wakeup. After the LOCK
12986 * is applied the sack_processing will also need to
12987 * LOCK the so->so_snd to do the actual sowwakeup(). So
12988 * once we have the socket buffer lock if we recheck the
12989 * size we KNOW we will get to sleep safely with the
12990 * wakeup flag in place.
12992 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
12993 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
12994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12995 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
12996 so, asoc, uio->uio_resid);
12999 stcb->block_entry = &be;
13000 error = sbwait(&so->so_snd);
13001 stcb->block_entry = NULL;
13003 if (error || so->so_error || be.error) {
13006 error = so->so_error;
13011 SOCKBUF_UNLOCK(&so->so_snd);
13014 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13015 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13016 so, asoc, stcb->asoc.total_output_queue_size);
13019 SOCKBUF_UNLOCK(&so->so_snd);
13020 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13024 SCTP_TCB_SEND_LOCK(stcb);
13026 if (sp->msg_is_complete == 0) {
13027 strm->last_msg_incomplete = 1;
13028 asoc->stream_locked = 1;
13029 asoc->stream_locked_on = srcv->sinfo_stream;
13031 sp->sender_all_done = 1;
13032 strm->last_msg_incomplete = 0;
13033 asoc->stream_locked = 0;
13036 SCTP_PRINTF("Huh no sp TSNH?\n");
13037 strm->last_msg_incomplete = 0;
13038 asoc->stream_locked = 0;
13040 SCTP_TCB_SEND_UNLOCK(stcb);
13041 if (uio->uio_resid == 0) {
13042 got_all_of_the_send = 1;
13045 /* We send in a 0, since we do NOT have any locks */
13046 error = sctp_msg_append(stcb, net, top, srcv, 0);
13048 if (srcv->sinfo_flags & SCTP_EOF) {
13050 * This should only happen for Panda for the mbuf
13051 * send case, which does NOT yet support EEOR mode.
13052 * Thus, we can just set this flag to do the proper
13055 got_all_of_the_send = 1;
13063 if ((srcv->sinfo_flags & SCTP_EOF) &&
13064 (got_all_of_the_send == 1) &&
13065 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
13068 SCTP_STAT_INCR(sctps_sends_with_eof);
13070 if (hold_tcblock == 0) {
13071 SCTP_TCB_LOCK(stcb);
13074 cnt = sctp_is_there_unsent_data(stcb);
13075 if (TAILQ_EMPTY(&asoc->send_queue) &&
13076 TAILQ_EMPTY(&asoc->sent_queue) &&
13078 if (asoc->locked_on_sending) {
13081 /* there is nothing queued to send, so I'm done... */
13082 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13083 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13084 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13085 /* only send SHUTDOWN the first time through */
13086 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
13087 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13088 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13090 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13091 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13092 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13093 asoc->primary_destination);
13094 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13095 asoc->primary_destination);
13099 * we still got (or just got) data to send, so set
13103 * XXX sockets draft says that SCTP_EOF should be
13104 * sent with no data. currently, we will allow user
13105 * data to be sent first and move to
13108 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13109 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13110 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13111 if (hold_tcblock == 0) {
13112 SCTP_TCB_LOCK(stcb);
13115 if (asoc->locked_on_sending) {
13116 /* Locked to send out the data */
13117 struct sctp_stream_queue_pending *sp;
13119 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13121 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13122 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13125 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13126 if (TAILQ_EMPTY(&asoc->send_queue) &&
13127 TAILQ_EMPTY(&asoc->sent_queue) &&
13128 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13130 if (free_cnt_applied) {
13131 atomic_add_int(&stcb->asoc.refcnt, -1);
13132 free_cnt_applied = 0;
13134 sctp_abort_an_association(stcb->sctp_ep, stcb,
13135 SCTP_RESPONSE_TO_USER_REQ,
13136 NULL, SCTP_SO_LOCKED);
13138 * now relock the stcb so everything
13145 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13146 asoc->primary_destination);
13147 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13152 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13153 some_on_control = 1;
13155 if (queue_only_for_init) {
13156 if (hold_tcblock == 0) {
13157 SCTP_TCB_LOCK(stcb);
13160 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13161 /* a collision took us forward? */
13164 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13165 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13169 if ((net->flight_size > net->cwnd) &&
13170 (stcb->asoc.sctp_cmt_on_off == 0)) {
13171 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13173 } else if (asoc->ifp_had_enobuf) {
13174 SCTP_STAT_INCR(sctps_ifnomemqueued);
13175 if (net->flight_size > (2 * net->mtu)) {
13178 asoc->ifp_had_enobuf = 0;
13180 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13181 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13182 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13183 (stcb->asoc.total_flight > 0) &&
13184 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13185 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13187 * Ok, Nagle is set on and we have data outstanding.
13188 * Don't send anything and let SACKs drive out the
13189 * data unless wen have a "full" segment to send.
13191 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13192 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13194 SCTP_STAT_INCR(sctps_naglequeued);
13197 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13198 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13199 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13201 SCTP_STAT_INCR(sctps_naglesent);
13204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13205 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13206 nagle_applies, un_sent);
13207 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13208 stcb->asoc.total_flight,
13209 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13211 if (queue_only_for_init)
13212 queue_only_for_init = 0;
13213 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13214 /* we can attempt to send too. */
13215 if (hold_tcblock == 0) {
13217 * If there is activity recv'ing sacks no need to
13220 if (SCTP_TCB_TRYLOCK(stcb)) {
13221 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13225 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13227 } else if ((queue_only == 0) &&
13228 (stcb->asoc.peers_rwnd == 0) &&
13229 (stcb->asoc.total_flight == 0)) {
13230 /* We get to have a probe outstanding */
13231 if (hold_tcblock == 0) {
13233 SCTP_TCB_LOCK(stcb);
13235 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13236 } else if (some_on_control) {
13237 int num_out, reason, frag_point;
13239 /* Here we do control only */
13240 if (hold_tcblock == 0) {
13242 SCTP_TCB_LOCK(stcb);
13244 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13245 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13246 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13248 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13249 queue_only, stcb->asoc.peers_rwnd, un_sent,
13250 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13251 stcb->asoc.total_output_queue_size, error);
13256 if (local_soresv && stcb) {
13257 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13260 if (create_lock_applied) {
13261 SCTP_ASOC_CREATE_UNLOCK(inp);
13262 create_lock_applied = 0;
13264 if ((stcb) && hold_tcblock) {
13265 SCTP_TCB_UNLOCK(stcb);
13267 if (stcb && free_cnt_applied) {
13268 atomic_add_int(&stcb->asoc.refcnt, -1);
13272 if (mtx_owned(&stcb->tcb_mtx)) {
13273 panic("Leaving with tcb mtx owned?");
13275 if (mtx_owned(&stcb->tcb_send_mtx)) {
13276 panic("Leaving with tcb send mtx owned?");
13282 sctp_validate_no_locks(inp);
13284 printf("Warning - inp is NULL so cant validate locks\n");
13291 sctp_m_freem(control);
13298 * generate an AUTHentication chunk, if required
13301 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13302 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13303 struct sctp_tcb *stcb, uint8_t chunk)
13305 struct mbuf *m_auth;
13306 struct sctp_auth_chunk *auth;
13309 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13313 /* sysctl disabled auth? */
13314 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13317 /* peer doesn't do auth... */
13318 if (!stcb->asoc.peer_supports_auth) {
13321 /* does the requested chunk require auth? */
13322 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13325 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13326 if (m_auth == NULL) {
13330 /* reserve some space if this will be the first mbuf */
13332 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13333 /* fill in the AUTH chunk details */
13334 auth = mtod(m_auth, struct sctp_auth_chunk *);
13335 bzero(auth, sizeof(*auth));
13336 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13337 auth->ch.chunk_flags = 0;
13338 chunk_len = sizeof(*auth) +
13339 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13340 auth->ch.chunk_length = htons(chunk_len);
13341 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13342 /* key id and hmac digest will be computed and filled in upon send */
13344 /* save the offset where the auth was inserted into the chain */
13351 *offset += SCTP_BUF_LEN(cn);
13352 cn = SCTP_BUF_NEXT(cn);
13357 /* update length and return pointer to the auth chunk */
13358 SCTP_BUF_LEN(m_auth) = chunk_len;
13359 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13360 if (auth_ret != NULL)
13368 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13370 struct nd_prefix *pfx = NULL;
13371 struct nd_pfxrouter *pfxrtr = NULL;
13372 struct sockaddr_in6 gw6;
13374 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13377 /* get prefix entry of address */
13378 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13379 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13381 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13382 &src6->sin6_addr, &pfx->ndpr_mask))
13385 /* no prefix entry in the prefix list */
13387 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13388 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13391 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13392 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13394 /* search installed gateway from prefix entry */
13395 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
13396 pfxrtr->pfr_next) {
13397 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13398 gw6.sin6_family = AF_INET6;
13399 gw6.sin6_len = sizeof(struct sockaddr_in6);
13400 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13401 sizeof(struct in6_addr));
13402 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13403 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13404 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13405 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13406 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13407 ro->ro_rt->rt_gateway)) {
13408 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13412 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13419 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13421 struct sockaddr_in *sin, *mask;
13422 struct ifaddr *ifa;
13423 struct in_addr srcnetaddr, gwnetaddr;
13425 if (ro == NULL || ro->ro_rt == NULL ||
13426 sifa->address.sa.sa_family != AF_INET) {
13429 ifa = (struct ifaddr *)sifa->ifa;
13430 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13431 sin = (struct sockaddr_in *)&sifa->address.sin;
13432 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13433 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13434 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13435 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13437 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13438 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13439 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13440 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13441 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13442 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {