2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_indata.h>
52 #include <netinet/sctp_bsd_addr.h>
53 #include <netinet/sctp_input.h>
54 #include <netinet/sctp_crc32.h>
55 #include <netinet/udp.h>
56 #include <machine/in_cksum.h>
60 #define SCTP_MAX_GAPS_INARRAY 4
62 uint8_t right_edge; /* mergable on the right edge */
63 uint8_t left_edge; /* mergable on the left edge */
66 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
69 struct sack_track sack_array[256] = {
70 {0, 0, 0, 0, /* 0x00 */
77 {1, 0, 1, 0, /* 0x01 */
84 {0, 0, 1, 0, /* 0x02 */
91 {1, 0, 1, 0, /* 0x03 */
98 {0, 0, 1, 0, /* 0x04 */
105 {1, 0, 2, 0, /* 0x05 */
112 {0, 0, 1, 0, /* 0x06 */
119 {1, 0, 1, 0, /* 0x07 */
126 {0, 0, 1, 0, /* 0x08 */
133 {1, 0, 2, 0, /* 0x09 */
140 {0, 0, 2, 0, /* 0x0a */
147 {1, 0, 2, 0, /* 0x0b */
154 {0, 0, 1, 0, /* 0x0c */
161 {1, 0, 2, 0, /* 0x0d */
168 {0, 0, 1, 0, /* 0x0e */
175 {1, 0, 1, 0, /* 0x0f */
182 {0, 0, 1, 0, /* 0x10 */
189 {1, 0, 2, 0, /* 0x11 */
196 {0, 0, 2, 0, /* 0x12 */
203 {1, 0, 2, 0, /* 0x13 */
210 {0, 0, 2, 0, /* 0x14 */
217 {1, 0, 3, 0, /* 0x15 */
224 {0, 0, 2, 0, /* 0x16 */
231 {1, 0, 2, 0, /* 0x17 */
238 {0, 0, 1, 0, /* 0x18 */
245 {1, 0, 2, 0, /* 0x19 */
252 {0, 0, 2, 0, /* 0x1a */
259 {1, 0, 2, 0, /* 0x1b */
266 {0, 0, 1, 0, /* 0x1c */
273 {1, 0, 2, 0, /* 0x1d */
280 {0, 0, 1, 0, /* 0x1e */
287 {1, 0, 1, 0, /* 0x1f */
294 {0, 0, 1, 0, /* 0x20 */
301 {1, 0, 2, 0, /* 0x21 */
308 {0, 0, 2, 0, /* 0x22 */
315 {1, 0, 2, 0, /* 0x23 */
322 {0, 0, 2, 0, /* 0x24 */
329 {1, 0, 3, 0, /* 0x25 */
336 {0, 0, 2, 0, /* 0x26 */
343 {1, 0, 2, 0, /* 0x27 */
350 {0, 0, 2, 0, /* 0x28 */
357 {1, 0, 3, 0, /* 0x29 */
364 {0, 0, 3, 0, /* 0x2a */
371 {1, 0, 3, 0, /* 0x2b */
378 {0, 0, 2, 0, /* 0x2c */
385 {1, 0, 3, 0, /* 0x2d */
392 {0, 0, 2, 0, /* 0x2e */
399 {1, 0, 2, 0, /* 0x2f */
406 {0, 0, 1, 0, /* 0x30 */
413 {1, 0, 2, 0, /* 0x31 */
420 {0, 0, 2, 0, /* 0x32 */
427 {1, 0, 2, 0, /* 0x33 */
434 {0, 0, 2, 0, /* 0x34 */
441 {1, 0, 3, 0, /* 0x35 */
448 {0, 0, 2, 0, /* 0x36 */
455 {1, 0, 2, 0, /* 0x37 */
462 {0, 0, 1, 0, /* 0x38 */
469 {1, 0, 2, 0, /* 0x39 */
476 {0, 0, 2, 0, /* 0x3a */
483 {1, 0, 2, 0, /* 0x3b */
490 {0, 0, 1, 0, /* 0x3c */
497 {1, 0, 2, 0, /* 0x3d */
504 {0, 0, 1, 0, /* 0x3e */
511 {1, 0, 1, 0, /* 0x3f */
518 {0, 0, 1, 0, /* 0x40 */
525 {1, 0, 2, 0, /* 0x41 */
532 {0, 0, 2, 0, /* 0x42 */
539 {1, 0, 2, 0, /* 0x43 */
546 {0, 0, 2, 0, /* 0x44 */
553 {1, 0, 3, 0, /* 0x45 */
560 {0, 0, 2, 0, /* 0x46 */
567 {1, 0, 2, 0, /* 0x47 */
574 {0, 0, 2, 0, /* 0x48 */
581 {1, 0, 3, 0, /* 0x49 */
588 {0, 0, 3, 0, /* 0x4a */
595 {1, 0, 3, 0, /* 0x4b */
602 {0, 0, 2, 0, /* 0x4c */
609 {1, 0, 3, 0, /* 0x4d */
616 {0, 0, 2, 0, /* 0x4e */
623 {1, 0, 2, 0, /* 0x4f */
630 {0, 0, 2, 0, /* 0x50 */
637 {1, 0, 3, 0, /* 0x51 */
644 {0, 0, 3, 0, /* 0x52 */
651 {1, 0, 3, 0, /* 0x53 */
658 {0, 0, 3, 0, /* 0x54 */
665 {1, 0, 4, 0, /* 0x55 */
672 {0, 0, 3, 0, /* 0x56 */
679 {1, 0, 3, 0, /* 0x57 */
686 {0, 0, 2, 0, /* 0x58 */
693 {1, 0, 3, 0, /* 0x59 */
700 {0, 0, 3, 0, /* 0x5a */
707 {1, 0, 3, 0, /* 0x5b */
714 {0, 0, 2, 0, /* 0x5c */
721 {1, 0, 3, 0, /* 0x5d */
728 {0, 0, 2, 0, /* 0x5e */
735 {1, 0, 2, 0, /* 0x5f */
742 {0, 0, 1, 0, /* 0x60 */
749 {1, 0, 2, 0, /* 0x61 */
756 {0, 0, 2, 0, /* 0x62 */
763 {1, 0, 2, 0, /* 0x63 */
770 {0, 0, 2, 0, /* 0x64 */
777 {1, 0, 3, 0, /* 0x65 */
784 {0, 0, 2, 0, /* 0x66 */
791 {1, 0, 2, 0, /* 0x67 */
798 {0, 0, 2, 0, /* 0x68 */
805 {1, 0, 3, 0, /* 0x69 */
812 {0, 0, 3, 0, /* 0x6a */
819 {1, 0, 3, 0, /* 0x6b */
826 {0, 0, 2, 0, /* 0x6c */
833 {1, 0, 3, 0, /* 0x6d */
840 {0, 0, 2, 0, /* 0x6e */
847 {1, 0, 2, 0, /* 0x6f */
854 {0, 0, 1, 0, /* 0x70 */
861 {1, 0, 2, 0, /* 0x71 */
868 {0, 0, 2, 0, /* 0x72 */
875 {1, 0, 2, 0, /* 0x73 */
882 {0, 0, 2, 0, /* 0x74 */
889 {1, 0, 3, 0, /* 0x75 */
896 {0, 0, 2, 0, /* 0x76 */
903 {1, 0, 2, 0, /* 0x77 */
910 {0, 0, 1, 0, /* 0x78 */
917 {1, 0, 2, 0, /* 0x79 */
924 {0, 0, 2, 0, /* 0x7a */
931 {1, 0, 2, 0, /* 0x7b */
938 {0, 0, 1, 0, /* 0x7c */
945 {1, 0, 2, 0, /* 0x7d */
952 {0, 0, 1, 0, /* 0x7e */
959 {1, 0, 1, 0, /* 0x7f */
966 {0, 1, 1, 0, /* 0x80 */
973 {1, 1, 2, 0, /* 0x81 */
980 {0, 1, 2, 0, /* 0x82 */
987 {1, 1, 2, 0, /* 0x83 */
994 {0, 1, 2, 0, /* 0x84 */
1001 {1, 1, 3, 0, /* 0x85 */
1008 {0, 1, 2, 0, /* 0x86 */
1015 {1, 1, 2, 0, /* 0x87 */
1022 {0, 1, 2, 0, /* 0x88 */
1029 {1, 1, 3, 0, /* 0x89 */
1036 {0, 1, 3, 0, /* 0x8a */
1043 {1, 1, 3, 0, /* 0x8b */
1050 {0, 1, 2, 0, /* 0x8c */
1057 {1, 1, 3, 0, /* 0x8d */
1064 {0, 1, 2, 0, /* 0x8e */
1071 {1, 1, 2, 0, /* 0x8f */
1078 {0, 1, 2, 0, /* 0x90 */
1085 {1, 1, 3, 0, /* 0x91 */
1092 {0, 1, 3, 0, /* 0x92 */
1099 {1, 1, 3, 0, /* 0x93 */
1106 {0, 1, 3, 0, /* 0x94 */
1113 {1, 1, 4, 0, /* 0x95 */
1120 {0, 1, 3, 0, /* 0x96 */
1127 {1, 1, 3, 0, /* 0x97 */
1134 {0, 1, 2, 0, /* 0x98 */
1141 {1, 1, 3, 0, /* 0x99 */
1148 {0, 1, 3, 0, /* 0x9a */
1155 {1, 1, 3, 0, /* 0x9b */
1162 {0, 1, 2, 0, /* 0x9c */
1169 {1, 1, 3, 0, /* 0x9d */
1176 {0, 1, 2, 0, /* 0x9e */
1183 {1, 1, 2, 0, /* 0x9f */
1190 {0, 1, 2, 0, /* 0xa0 */
1197 {1, 1, 3, 0, /* 0xa1 */
1204 {0, 1, 3, 0, /* 0xa2 */
1211 {1, 1, 3, 0, /* 0xa3 */
1218 {0, 1, 3, 0, /* 0xa4 */
1225 {1, 1, 4, 0, /* 0xa5 */
1232 {0, 1, 3, 0, /* 0xa6 */
1239 {1, 1, 3, 0, /* 0xa7 */
1246 {0, 1, 3, 0, /* 0xa8 */
1253 {1, 1, 4, 0, /* 0xa9 */
1260 {0, 1, 4, 0, /* 0xaa */
1267 {1, 1, 4, 0, /* 0xab */
1274 {0, 1, 3, 0, /* 0xac */
1281 {1, 1, 4, 0, /* 0xad */
1288 {0, 1, 3, 0, /* 0xae */
1295 {1, 1, 3, 0, /* 0xaf */
1302 {0, 1, 2, 0, /* 0xb0 */
1309 {1, 1, 3, 0, /* 0xb1 */
1316 {0, 1, 3, 0, /* 0xb2 */
1323 {1, 1, 3, 0, /* 0xb3 */
1330 {0, 1, 3, 0, /* 0xb4 */
1337 {1, 1, 4, 0, /* 0xb5 */
1344 {0, 1, 3, 0, /* 0xb6 */
1351 {1, 1, 3, 0, /* 0xb7 */
1358 {0, 1, 2, 0, /* 0xb8 */
1365 {1, 1, 3, 0, /* 0xb9 */
1372 {0, 1, 3, 0, /* 0xba */
1379 {1, 1, 3, 0, /* 0xbb */
1386 {0, 1, 2, 0, /* 0xbc */
1393 {1, 1, 3, 0, /* 0xbd */
1400 {0, 1, 2, 0, /* 0xbe */
1407 {1, 1, 2, 0, /* 0xbf */
1414 {0, 1, 1, 0, /* 0xc0 */
1421 {1, 1, 2, 0, /* 0xc1 */
1428 {0, 1, 2, 0, /* 0xc2 */
1435 {1, 1, 2, 0, /* 0xc3 */
1442 {0, 1, 2, 0, /* 0xc4 */
1449 {1, 1, 3, 0, /* 0xc5 */
1456 {0, 1, 2, 0, /* 0xc6 */
1463 {1, 1, 2, 0, /* 0xc7 */
1470 {0, 1, 2, 0, /* 0xc8 */
1477 {1, 1, 3, 0, /* 0xc9 */
1484 {0, 1, 3, 0, /* 0xca */
1491 {1, 1, 3, 0, /* 0xcb */
1498 {0, 1, 2, 0, /* 0xcc */
1505 {1, 1, 3, 0, /* 0xcd */
1512 {0, 1, 2, 0, /* 0xce */
1519 {1, 1, 2, 0, /* 0xcf */
1526 {0, 1, 2, 0, /* 0xd0 */
1533 {1, 1, 3, 0, /* 0xd1 */
1540 {0, 1, 3, 0, /* 0xd2 */
1547 {1, 1, 3, 0, /* 0xd3 */
1554 {0, 1, 3, 0, /* 0xd4 */
1561 {1, 1, 4, 0, /* 0xd5 */
1568 {0, 1, 3, 0, /* 0xd6 */
1575 {1, 1, 3, 0, /* 0xd7 */
1582 {0, 1, 2, 0, /* 0xd8 */
1589 {1, 1, 3, 0, /* 0xd9 */
1596 {0, 1, 3, 0, /* 0xda */
1603 {1, 1, 3, 0, /* 0xdb */
1610 {0, 1, 2, 0, /* 0xdc */
1617 {1, 1, 3, 0, /* 0xdd */
1624 {0, 1, 2, 0, /* 0xde */
1631 {1, 1, 2, 0, /* 0xdf */
1638 {0, 1, 1, 0, /* 0xe0 */
1645 {1, 1, 2, 0, /* 0xe1 */
1652 {0, 1, 2, 0, /* 0xe2 */
1659 {1, 1, 2, 0, /* 0xe3 */
1666 {0, 1, 2, 0, /* 0xe4 */
1673 {1, 1, 3, 0, /* 0xe5 */
1680 {0, 1, 2, 0, /* 0xe6 */
1687 {1, 1, 2, 0, /* 0xe7 */
1694 {0, 1, 2, 0, /* 0xe8 */
1701 {1, 1, 3, 0, /* 0xe9 */
1708 {0, 1, 3, 0, /* 0xea */
1715 {1, 1, 3, 0, /* 0xeb */
1722 {0, 1, 2, 0, /* 0xec */
1729 {1, 1, 3, 0, /* 0xed */
1736 {0, 1, 2, 0, /* 0xee */
1743 {1, 1, 2, 0, /* 0xef */
1750 {0, 1, 1, 0, /* 0xf0 */
1757 {1, 1, 2, 0, /* 0xf1 */
1764 {0, 1, 2, 0, /* 0xf2 */
1771 {1, 1, 2, 0, /* 0xf3 */
1778 {0, 1, 2, 0, /* 0xf4 */
1785 {1, 1, 3, 0, /* 0xf5 */
1792 {0, 1, 2, 0, /* 0xf6 */
1799 {1, 1, 2, 0, /* 0xf7 */
1806 {0, 1, 1, 0, /* 0xf8 */
1813 {1, 1, 2, 0, /* 0xf9 */
1820 {0, 1, 2, 0, /* 0xfa */
1827 {1, 1, 2, 0, /* 0xfb */
1834 {0, 1, 1, 0, /* 0xfc */
1841 {1, 1, 2, 0, /* 0xfd */
1848 {0, 1, 1, 0, /* 0xfe */
1855 {1, 1, 1, 0, /* 0xff */
1866 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1867 int ipv4_addr_legal,
1868 int ipv6_addr_legal,
1870 int ipv4_local_scope,
1875 if ((loopback_scope == 0) &&
1876 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1878 * skip loopback if not in scope *
1882 switch (ifa->address.sa.sa_family) {
1885 if (ipv4_addr_legal) {
1886 struct sockaddr_in *sin;
1888 sin = (struct sockaddr_in *)&ifa->address.sin;
1889 if (sin->sin_addr.s_addr == 0) {
1890 /* not in scope , unspecified */
1893 if ((ipv4_local_scope == 0) &&
1894 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1895 /* private address not in scope */
1905 if (ipv6_addr_legal) {
1906 struct sockaddr_in6 *sin6;
1909 * Must update the flags, bummer, which means any
1910 * IFA locks must now be applied HERE <->
1913 sctp_gather_internal_ifa_flags(ifa);
1915 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1918 /* ok to use deprecated addresses? */
1919 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1920 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1921 /* skip unspecifed addresses */
1924 if ( /* (local_scope == 0) && */
1925 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1928 if ((site_scope == 0) &&
1929 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1943 static struct mbuf *
1944 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1946 struct sctp_paramhdr *parmh;
1950 switch (ifa->address.sa.sa_family) {
1953 len = sizeof(struct sctp_ipv4addr_param);
1958 len = sizeof(struct sctp_ipv6addr_param);
1964 if (M_TRAILINGSPACE(m) >= len) {
1965 /* easy side we just drop it on the end */
1966 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1969 /* Need more space */
1971 while (SCTP_BUF_NEXT(mret) != NULL) {
1972 mret = SCTP_BUF_NEXT(mret);
1974 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1975 if (SCTP_BUF_NEXT(mret) == NULL) {
1976 /* We are hosed, can't add more addresses */
1979 mret = SCTP_BUF_NEXT(mret);
1980 parmh = mtod(mret, struct sctp_paramhdr *);
1982 /* now add the parameter */
1983 switch (ifa->address.sa.sa_family) {
1987 struct sctp_ipv4addr_param *ipv4p;
1988 struct sockaddr_in *sin;
1990 sin = (struct sockaddr_in *)&ifa->address.sin;
1991 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1992 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1993 parmh->param_length = htons(len);
1994 ipv4p->addr = sin->sin_addr.s_addr;
1995 SCTP_BUF_LEN(mret) += len;
2002 struct sctp_ipv6addr_param *ipv6p;
2003 struct sockaddr_in6 *sin6;
2005 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2006 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2007 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2008 parmh->param_length = htons(len);
2009 memcpy(ipv6p->addr, &sin6->sin6_addr,
2010 sizeof(ipv6p->addr));
2011 /* clear embedded scope in the address */
2012 in6_clearscope((struct in6_addr *)ipv6p->addr);
2013 SCTP_BUF_LEN(mret) += len;
2025 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2026 struct sctp_scoping *scope,
2027 struct mbuf *m_at, int cnt_inits_to)
2029 struct sctp_vrf *vrf = NULL;
2030 int cnt, limit_out = 0, total_count;
2033 vrf_id = inp->def_vrf_id;
2034 SCTP_IPI_ADDR_RLOCK();
2035 vrf = sctp_find_vrf(vrf_id);
2037 SCTP_IPI_ADDR_RUNLOCK();
2040 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2041 struct sctp_ifa *sctp_ifap;
2042 struct sctp_ifn *sctp_ifnp;
2045 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2047 cnt = SCTP_ADDRESS_LIMIT;
2050 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2051 if ((scope->loopback_scope == 0) &&
2052 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2054 * Skip loopback devices if loopback_scope
2059 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2060 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2063 if (sctp_is_address_in_scope(sctp_ifap,
2064 scope->ipv4_addr_legal,
2065 scope->ipv6_addr_legal,
2066 scope->loopback_scope,
2067 scope->ipv4_local_scope,
2069 scope->site_scope, 1) == 0) {
2073 if (cnt > SCTP_ADDRESS_LIMIT) {
2077 if (cnt > SCTP_ADDRESS_LIMIT) {
2084 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2086 if ((scope->loopback_scope == 0) &&
2087 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2089 * Skip loopback devices if
2090 * loopback_scope not set
2094 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2095 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2098 if (sctp_is_address_in_scope(sctp_ifap,
2099 scope->ipv4_addr_legal,
2100 scope->ipv6_addr_legal,
2101 scope->loopback_scope,
2102 scope->ipv4_local_scope,
2104 scope->site_scope, 0) == 0) {
2107 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2118 if (total_count > SCTP_ADDRESS_LIMIT) {
2119 /* No more addresses */
2127 struct sctp_laddr *laddr;
2130 /* First, how many ? */
2131 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2132 if (laddr->ifa == NULL) {
2135 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2137 * Address being deleted by the system, dont
2141 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2143 * Address being deleted on this ep don't
2148 if (sctp_is_address_in_scope(laddr->ifa,
2149 scope->ipv4_addr_legal,
2150 scope->ipv6_addr_legal,
2151 scope->loopback_scope,
2152 scope->ipv4_local_scope,
2154 scope->site_scope, 1) == 0) {
2159 if (cnt > SCTP_ADDRESS_LIMIT) {
2163 * To get through a NAT we only list addresses if we have
2164 * more than one. That way if you just bind a single address
2165 * we let the source of the init dictate our address.
2168 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2170 if (laddr->ifa == NULL) {
2173 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2176 if (sctp_is_address_in_scope(laddr->ifa,
2177 scope->ipv4_addr_legal,
2178 scope->ipv6_addr_legal,
2179 scope->loopback_scope,
2180 scope->ipv4_local_scope,
2182 scope->site_scope, 0) == 0) {
2185 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2187 if (cnt >= SCTP_ADDRESS_LIMIT) {
2193 SCTP_IPI_ADDR_RUNLOCK();
2197 static struct sctp_ifa *
2198 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2199 uint8_t dest_is_loop,
2200 uint8_t dest_is_priv,
2203 uint8_t dest_is_global = 0;
2205 /* dest_is_priv is true if destination is a private address */
2206 /* dest_is_loop is true if destination is a loopback addresses */
2209 * Here we determine if its a preferred address. A preferred address
2210 * means it is the same scope or higher scope then the destination.
2211 * L = loopback, P = private, G = global
2212 * -----------------------------------------
2213 * src | dest | result
2214 * ----------------------------------------
2216 * -----------------------------------------
2217 * P | L | yes-v4 no-v6
2218 * -----------------------------------------
2219 * G | L | yes-v4 no-v6
2220 * -----------------------------------------
2222 * -----------------------------------------
2224 * -----------------------------------------
2226 * -----------------------------------------
2228 * -----------------------------------------
2230 * -----------------------------------------
2232 * -----------------------------------------
2235 if (ifa->address.sa.sa_family != fam) {
2236 /* forget mis-matched family */
2239 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2242 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2243 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2244 /* Ok the address may be ok */
2246 if (fam == AF_INET6) {
2247 /* ok to use deprecated addresses? no lets not! */
2248 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2249 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2252 if (ifa->src_is_priv && !ifa->src_is_loop) {
2254 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2258 if (ifa->src_is_glob) {
2260 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2267 * Now that we know what is what, implement or table this could in
2268 * theory be done slicker (it used to be), but this is
2269 * straightforward and easier to validate :-)
2271 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2272 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2273 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2274 dest_is_loop, dest_is_priv, dest_is_global);
2276 if ((ifa->src_is_loop) && (dest_is_priv)) {
2277 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2280 if ((ifa->src_is_glob) && (dest_is_priv)) {
2281 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2284 if ((ifa->src_is_loop) && (dest_is_global)) {
2285 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2288 if ((ifa->src_is_priv) && (dest_is_global)) {
2289 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2292 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2293 /* its a preferred address */
2297 static struct sctp_ifa *
2298 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2299 uint8_t dest_is_loop,
2300 uint8_t dest_is_priv,
2303 uint8_t dest_is_global = 0;
2306 * Here we determine if its a acceptable address. A acceptable
2307 * address means it is the same scope or higher scope but we can
2308 * allow for NAT which means its ok to have a global dest and a
2311 * L = loopback, P = private, G = global
2312 * -----------------------------------------
2313 * src | dest | result
2314 * -----------------------------------------
2316 * -----------------------------------------
2317 * P | L | yes-v4 no-v6
2318 * -----------------------------------------
2320 * -----------------------------------------
2322 * -----------------------------------------
2324 * -----------------------------------------
2325 * G | P | yes - May not work
2326 * -----------------------------------------
2328 * -----------------------------------------
2329 * P | G | yes - May not work
2330 * -----------------------------------------
2332 * -----------------------------------------
2335 if (ifa->address.sa.sa_family != fam) {
2336 /* forget non matching family */
2337 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2338 ifa->address.sa.sa_family, fam);
2341 /* Ok the address may be ok */
2342 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2343 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2344 dest_is_loop, dest_is_priv);
2345 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2349 if (fam == AF_INET6) {
2350 /* ok to use deprecated addresses? */
2351 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2354 if (ifa->src_is_priv) {
2355 /* Special case, linklocal to loop */
2362 * Now that we know what is what, implement our table. This could in
2363 * theory be done slicker (it used to be), but this is
2364 * straightforward and easier to validate :-)
2366 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2369 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2372 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2375 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2378 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2379 /* its an acceptable address */
2384 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2386 struct sctp_laddr *laddr;
2389 /* There are no restrictions, no TCB :-) */
2392 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2393 if (laddr->ifa == NULL) {
2394 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2398 if (laddr->ifa == ifa) {
2399 /* Yes it is on the list */
2408 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2410 struct sctp_laddr *laddr;
2414 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2415 if (laddr->ifa == NULL) {
2416 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2420 if ((laddr->ifa == ifa) && laddr->action == 0)
2429 static struct sctp_ifa *
2430 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2433 int non_asoc_addr_ok,
2434 uint8_t dest_is_priv,
2435 uint8_t dest_is_loop,
2438 struct sctp_laddr *laddr, *starting_point;
2441 struct sctp_ifn *sctp_ifn;
2442 struct sctp_ifa *sctp_ifa, *sifa;
2443 struct sctp_vrf *vrf;
2446 vrf = sctp_find_vrf(vrf_id);
2450 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2451 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2452 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2454 * first question, is the ifn we will emit on in our list, if so, we
2455 * want such an address. Note that we first looked for a preferred
2459 /* is a preferred one on the interface we route out? */
2460 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2461 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2462 (non_asoc_addr_ok == 0))
2464 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2469 if (sctp_is_addr_in_ep(inp, sifa)) {
2470 atomic_add_int(&sifa->refcount, 1);
2476 * ok, now we now need to find one on the list of the addresses. We
2477 * can't get one on the emitting interface so let's find first a
2478 * preferred one. If not that an acceptable one otherwise... we
2481 starting_point = inp->next_addr_touse;
2483 if (inp->next_addr_touse == NULL) {
2484 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2487 for (laddr = inp->next_addr_touse; laddr;
2488 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2489 if (laddr->ifa == NULL) {
2490 /* address has been removed */
2493 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2494 /* address is being deleted */
2497 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2501 atomic_add_int(&sifa->refcount, 1);
2504 if (resettotop == 0) {
2505 inp->next_addr_touse = NULL;
2508 inp->next_addr_touse = starting_point;
2511 if (inp->next_addr_touse == NULL) {
2512 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2515 /* ok, what about an acceptable address in the inp */
2516 for (laddr = inp->next_addr_touse; laddr;
2517 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2518 if (laddr->ifa == NULL) {
2519 /* address has been removed */
2522 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2523 /* address is being deleted */
2526 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2530 atomic_add_int(&sifa->refcount, 1);
2533 if (resettotop == 0) {
2534 inp->next_addr_touse = NULL;
2535 goto once_again_too;
2538 * no address bound can be a source for the destination we are in
2546 static struct sctp_ifa *
2547 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2548 struct sctp_tcb *stcb,
2549 struct sctp_nets *net,
2552 uint8_t dest_is_priv,
2553 uint8_t dest_is_loop,
2554 int non_asoc_addr_ok,
2557 struct sctp_laddr *laddr, *starting_point;
2559 struct sctp_ifn *sctp_ifn;
2560 struct sctp_ifa *sctp_ifa, *sifa;
2561 uint8_t start_at_beginning = 0;
2562 struct sctp_vrf *vrf;
2566 * first question, is the ifn we will emit on in our list, if so, we
2569 vrf = sctp_find_vrf(vrf_id);
2573 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2574 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2575 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2578 * first question, is the ifn we will emit on in our list? If so,
2579 * we want that one. First we look for a preferred. Second, we go
2580 * for an acceptable.
2583 /* first try for a preferred address on the ep */
2584 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2585 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2587 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2588 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2591 if (((non_asoc_addr_ok == 0) &&
2592 (sctp_is_addr_restricted(stcb, sifa))) ||
2593 (non_asoc_addr_ok &&
2594 (sctp_is_addr_restricted(stcb, sifa)) &&
2595 (!sctp_is_addr_pending(stcb, sifa)))) {
2596 /* on the no-no list */
2599 atomic_add_int(&sifa->refcount, 1);
2603 /* next try for an acceptable address on the ep */
2604 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2605 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2607 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2608 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2611 if (((non_asoc_addr_ok == 0) &&
2612 (sctp_is_addr_restricted(stcb, sifa))) ||
2613 (non_asoc_addr_ok &&
2614 (sctp_is_addr_restricted(stcb, sifa)) &&
2615 (!sctp_is_addr_pending(stcb, sifa)))) {
2616 /* on the no-no list */
2619 atomic_add_int(&sifa->refcount, 1);
2626 * if we can't find one like that then we must look at all addresses
2627 * bound to pick one at first preferable then secondly acceptable.
2629 starting_point = stcb->asoc.last_used_address;
2631 if (stcb->asoc.last_used_address == NULL) {
2632 start_at_beginning = 1;
2633 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2635 /* search beginning with the last used address */
2636 for (laddr = stcb->asoc.last_used_address; laddr;
2637 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2638 if (laddr->ifa == NULL) {
2639 /* address has been removed */
2642 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2643 /* address is being deleted */
2646 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2649 if (((non_asoc_addr_ok == 0) &&
2650 (sctp_is_addr_restricted(stcb, sifa))) ||
2651 (non_asoc_addr_ok &&
2652 (sctp_is_addr_restricted(stcb, sifa)) &&
2653 (!sctp_is_addr_pending(stcb, sifa)))) {
2654 /* on the no-no list */
2657 stcb->asoc.last_used_address = laddr;
2658 atomic_add_int(&sifa->refcount, 1);
2661 if (start_at_beginning == 0) {
2662 stcb->asoc.last_used_address = NULL;
2663 goto sctp_from_the_top;
2665 /* now try for any higher scope than the destination */
2666 stcb->asoc.last_used_address = starting_point;
2667 start_at_beginning = 0;
2669 if (stcb->asoc.last_used_address == NULL) {
2670 start_at_beginning = 1;
2671 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2673 /* search beginning with the last used address */
2674 for (laddr = stcb->asoc.last_used_address; laddr;
2675 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2676 if (laddr->ifa == NULL) {
2677 /* address has been removed */
2680 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2681 /* address is being deleted */
2684 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2688 if (((non_asoc_addr_ok == 0) &&
2689 (sctp_is_addr_restricted(stcb, sifa))) ||
2690 (non_asoc_addr_ok &&
2691 (sctp_is_addr_restricted(stcb, sifa)) &&
2692 (!sctp_is_addr_pending(stcb, sifa)))) {
2693 /* on the no-no list */
2696 stcb->asoc.last_used_address = laddr;
2697 atomic_add_int(&sifa->refcount, 1);
2700 if (start_at_beginning == 0) {
2701 stcb->asoc.last_used_address = NULL;
2702 goto sctp_from_the_top2;
2707 static struct sctp_ifa *
2708 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2709 struct sctp_tcb *stcb,
2710 int non_asoc_addr_ok,
2711 uint8_t dest_is_loop,
2712 uint8_t dest_is_priv,
2718 struct sctp_ifa *ifa, *sifa;
2719 int num_eligible_addr = 0;
2722 struct sockaddr_in6 sin6, lsa6;
2724 if (fam == AF_INET6) {
2725 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2726 (void)sa6_recoverscope(&sin6);
2729 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2730 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2731 (non_asoc_addr_ok == 0))
2733 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2738 if (fam == AF_INET6 &&
2740 sifa->src_is_loop && sifa->src_is_priv) {
2742 * don't allow fe80::1 to be a src on loop ::1, we
2743 * don't list it to the peer so we will get an
2748 if (fam == AF_INET6 &&
2749 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2750 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2752 * link-local <-> link-local must belong to the same
2755 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2756 (void)sa6_recoverscope(&lsa6);
2757 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2764 * Check if the IPv6 address matches to next-hop. In the
2765 * mobile case, old IPv6 address may be not deleted from the
2766 * interface. Then, the interface has previous and new
2767 * addresses. We should use one corresponding to the
2768 * next-hop. (by micchie)
2771 if (stcb && fam == AF_INET6 &&
2772 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2773 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2780 /* Avoid topologically incorrect IPv4 address */
2781 if (stcb && fam == AF_INET &&
2782 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2783 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2789 if (sctp_is_address_in_scope(ifa,
2790 stcb->asoc.ipv4_addr_legal,
2791 stcb->asoc.ipv6_addr_legal,
2792 stcb->asoc.loopback_scope,
2793 stcb->asoc.ipv4_local_scope,
2794 stcb->asoc.local_scope,
2795 stcb->asoc.site_scope, 0) == 0) {
2798 if (((non_asoc_addr_ok == 0) &&
2799 (sctp_is_addr_restricted(stcb, sifa))) ||
2800 (non_asoc_addr_ok &&
2801 (sctp_is_addr_restricted(stcb, sifa)) &&
2802 (!sctp_is_addr_pending(stcb, sifa)))) {
2804 * It is restricted for some reason..
2805 * probably not yet added.
2810 if (num_eligible_addr >= addr_wanted) {
2813 num_eligible_addr++;
2820 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2821 struct sctp_tcb *stcb,
2822 int non_asoc_addr_ok,
2823 uint8_t dest_is_loop,
2824 uint8_t dest_is_priv,
2827 struct sctp_ifa *ifa, *sifa;
2828 int num_eligible_addr = 0;
2830 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2831 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2832 (non_asoc_addr_ok == 0)) {
2835 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2841 if (sctp_is_address_in_scope(ifa,
2842 stcb->asoc.ipv4_addr_legal,
2843 stcb->asoc.ipv6_addr_legal,
2844 stcb->asoc.loopback_scope,
2845 stcb->asoc.ipv4_local_scope,
2846 stcb->asoc.local_scope,
2847 stcb->asoc.site_scope, 0) == 0) {
2850 if (((non_asoc_addr_ok == 0) &&
2851 (sctp_is_addr_restricted(stcb, sifa))) ||
2852 (non_asoc_addr_ok &&
2853 (sctp_is_addr_restricted(stcb, sifa)) &&
2854 (!sctp_is_addr_pending(stcb, sifa)))) {
2856 * It is restricted for some reason..
2857 * probably not yet added.
2862 num_eligible_addr++;
2864 return (num_eligible_addr);
2867 static struct sctp_ifa *
2868 sctp_choose_boundall(struct sctp_inpcb *inp,
2869 struct sctp_tcb *stcb,
2870 struct sctp_nets *net,
2873 uint8_t dest_is_priv,
2874 uint8_t dest_is_loop,
2875 int non_asoc_addr_ok,
2878 int cur_addr_num = 0, num_preferred = 0;
2880 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2881 struct sctp_ifa *sctp_ifa, *sifa;
2883 struct sctp_vrf *vrf;
2890 * For boundall we can use any address in the association.
2891 * If non_asoc_addr_ok is set we can use any address (at least in
2892 * theory). So we look for preferred addresses first. If we find one,
2893 * we use it. Otherwise we next try to get an address on the
2894 * interface, which we should be able to do (unless non_asoc_addr_ok
2895 * is false and we are routed out that way). In these cases where we
2896 * can't use the address of the interface we go through all the
2897 * ifn's looking for an address we can use and fill that in. Punting
2898 * means we send back address 0, which will probably cause problems
2899 * actually since then IP will fill in the address of the route ifn,
2900 * which means we probably already rejected it.. i.e. here comes an
2903 vrf = sctp_find_vrf(vrf_id);
2907 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2908 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2909 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2910 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2911 if (sctp_ifn == NULL) {
2912 /* ?? We don't have this guy ?? */
2913 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2914 goto bound_all_plan_b;
2916 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2917 ifn_index, sctp_ifn->ifn_name);
2920 cur_addr_num = net->indx_of_eligible_next_to_use;
2922 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2927 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2928 num_preferred, sctp_ifn->ifn_name);
2929 if (num_preferred == 0) {
2931 * no eligible addresses, we must use some other interface
2932 * address if we can find one.
2934 goto bound_all_plan_b;
2937 * Ok we have num_eligible_addr set with how many we can use, this
2938 * may vary from call to call due to addresses being deprecated
2941 if (cur_addr_num >= num_preferred) {
2945 * select the nth address from the list (where cur_addr_num is the
2946 * nth) and 0 is the first one, 1 is the second one etc...
2948 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2950 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2951 dest_is_priv, cur_addr_num, fam, ro);
2953 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2955 atomic_add_int(&sctp_ifa->refcount, 1);
2957 /* save off where the next one we will want */
2958 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2963 * plan_b: Look at all interfaces and find a preferred address. If
2964 * no preferred fall through to plan_c.
2967 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2968 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2969 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2970 sctp_ifn->ifn_name);
2971 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2972 /* wrong base scope */
2973 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2976 if ((sctp_ifn == looked_at) && looked_at) {
2977 /* already looked at this guy */
2978 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2981 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2982 dest_is_loop, dest_is_priv, fam);
2983 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2984 "Found ifn:%p %d preferred source addresses\n",
2985 ifn, num_preferred);
2986 if (num_preferred == 0) {
2987 /* None on this interface. */
2988 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2991 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2992 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2993 num_preferred, sctp_ifn, cur_addr_num);
2996 * Ok we have num_eligible_addr set with how many we can
2997 * use, this may vary from call to call due to addresses
2998 * being deprecated etc..
3000 if (cur_addr_num >= num_preferred) {
3003 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
3004 dest_is_priv, cur_addr_num, fam, ro);
3008 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3009 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3011 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3012 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3013 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3014 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3016 atomic_add_int(&sifa->refcount, 1);
3020 again_with_private_addresses_allowed:
3022 /* plan_c: do we have an acceptable address on the emit interface */
3024 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3025 if (emit_ifn == NULL) {
3026 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3029 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3030 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", sctp_ifa);
3031 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3032 (non_asoc_addr_ok == 0)) {
3033 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3036 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3039 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3043 if (sctp_is_address_in_scope(sifa,
3044 stcb->asoc.ipv4_addr_legal,
3045 stcb->asoc.ipv6_addr_legal,
3046 stcb->asoc.loopback_scope,
3047 stcb->asoc.ipv4_local_scope,
3048 stcb->asoc.local_scope,
3049 stcb->asoc.site_scope, 0) == 0) {
3050 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3054 if (((non_asoc_addr_ok == 0) &&
3055 (sctp_is_addr_restricted(stcb, sifa))) ||
3056 (non_asoc_addr_ok &&
3057 (sctp_is_addr_restricted(stcb, sifa)) &&
3058 (!sctp_is_addr_pending(stcb, sifa)))) {
3060 * It is restricted for some reason..
3061 * probably not yet added.
3063 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3068 printf("Stcb is null - no print\n");
3070 atomic_add_int(&sifa->refcount, 1);
3075 * plan_d: We are in trouble. No preferred address on the emit
3076 * interface. And not even a preferred address on all interfaces. Go
3077 * out and see if we can find an acceptable address somewhere
3078 * amongst all interfaces.
3080 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", looked_at);
3081 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3082 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3083 /* wrong base scope */
3086 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3087 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3088 (non_asoc_addr_ok == 0))
3090 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3096 if (sctp_is_address_in_scope(sifa,
3097 stcb->asoc.ipv4_addr_legal,
3098 stcb->asoc.ipv6_addr_legal,
3099 stcb->asoc.loopback_scope,
3100 stcb->asoc.ipv4_local_scope,
3101 stcb->asoc.local_scope,
3102 stcb->asoc.site_scope, 0) == 0) {
3106 if (((non_asoc_addr_ok == 0) &&
3107 (sctp_is_addr_restricted(stcb, sifa))) ||
3108 (non_asoc_addr_ok &&
3109 (sctp_is_addr_restricted(stcb, sifa)) &&
3110 (!sctp_is_addr_pending(stcb, sifa)))) {
3112 * It is restricted for some
3113 * reason.. probably not yet added.
3123 if ((retried == 0) && (stcb->asoc.ipv4_local_scope == 0)) {
3124 stcb->asoc.ipv4_local_scope = 1;
3126 goto again_with_private_addresses_allowed;
3127 } else if (retried == 1) {
3128 stcb->asoc.ipv4_local_scope = 0;
3135 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3136 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3137 /* wrong base scope */
3140 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3141 struct sctp_ifa *tmp_sifa;
3143 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3144 (non_asoc_addr_ok == 0))
3146 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3149 if (tmp_sifa == NULL) {
3152 if (tmp_sifa == sifa) {
3156 if (sctp_is_address_in_scope(tmp_sifa,
3157 stcb->asoc.ipv4_addr_legal,
3158 stcb->asoc.ipv6_addr_legal,
3159 stcb->asoc.loopback_scope,
3160 stcb->asoc.ipv4_local_scope,
3161 stcb->asoc.local_scope,
3162 stcb->asoc.site_scope, 0) == 0) {
3165 if (((non_asoc_addr_ok == 0) &&
3166 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3167 (non_asoc_addr_ok &&
3168 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3169 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3179 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3180 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3181 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3186 atomic_add_int(&sifa->refcount, 1);
3194 /* tcb may be NULL */
3196 sctp_source_address_selection(struct sctp_inpcb *inp,
3197 struct sctp_tcb *stcb,
3199 struct sctp_nets *net,
3200 int non_asoc_addr_ok, uint32_t vrf_id)
3202 struct sctp_ifa *answer;
3203 uint8_t dest_is_priv, dest_is_loop;
3207 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3211 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3216 * Rules: - Find the route if needed, cache if I can. - Look at
3217 * interface address in route, Is it in the bound list. If so we
3218 * have the best source. - If not we must rotate amongst the
3223 * Do we need to pay attention to scope. We can have a private address
3224 * or a global address we are sourcing or sending to. So if we draw
3226 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3228 * ------------------------------------------
3229 * source * dest * result
3230 * -----------------------------------------
3231 * <a> Private * Global * NAT
3232 * -----------------------------------------
3233 * <b> Private * Private * No problem
3234 * -----------------------------------------
3235 * <c> Global * Private * Huh, How will this work?
3236 * -----------------------------------------
3237 * <d> Global * Global * No Problem
3238 *------------------------------------------
3239 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3241 *------------------------------------------
3242 * source * dest * result
3243 * -----------------------------------------
3244 * <a> Linklocal * Global *
3245 * -----------------------------------------
3246 * <b> Linklocal * Linklocal * No problem
3247 * -----------------------------------------
3248 * <c> Global * Linklocal * Huh, How will this work?
3249 * -----------------------------------------
3250 * <d> Global * Global * No Problem
3251 *------------------------------------------
3252 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3254 * And then we add to that what happens if there are multiple addresses
3255 * assigned to an interface. Remember the ifa on a ifn is a linked
3256 * list of addresses. So one interface can have more than one IP
3257 * address. What happens if we have both a private and a global
3258 * address? Do we then use context of destination to sort out which
3259 * one is best? And what about NAT's sending P->G may get you a NAT
3260 * translation, or should you select the G thats on the interface in
3265 * - count the number of addresses on the interface.
3266 * - if it is one, no problem except case <c>.
3267 * For <a> we will assume a NAT out there.
3268 * - if there are more than one, then we need to worry about scope P
3269 * or G. We should prefer G -> G and P -> P if possible.
3270 * Then as a secondary fall back to mixed types G->P being a last
3272 * - The above all works for bound all, but bound specific we need to
3273 * use the same concept but instead only consider the bound
3274 * addresses. If the bound set is NOT assigned to the interface then
3275 * we must use rotation amongst the bound addresses..
3277 if (ro->ro_rt == NULL) {
3279 * Need a route to cache.
3281 SCTP_RTALLOC(ro, vrf_id);
3283 if (ro->ro_rt == NULL) {
3286 fam = ro->ro_dst.sa_family;
3287 dest_is_priv = dest_is_loop = 0;
3288 /* Setup our scopes for the destination */
3292 /* Scope based on outbound address */
3293 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3296 /* mark it as local */
3297 net->addr_is_local = 1;
3299 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3306 /* Scope based on outbound address */
3307 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3308 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3310 * If the address is a loopback address, which
3311 * consists of "::1" OR "fe80::1%lo0", we are
3312 * loopback scope. But we don't use dest_is_priv
3313 * (link local addresses).
3317 /* mark it as local */
3318 net->addr_is_local = 1;
3320 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3326 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3327 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3328 SCTP_IPI_ADDR_RLOCK();
3329 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3333 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3334 dest_is_priv, dest_is_loop,
3335 non_asoc_addr_ok, fam);
3336 SCTP_IPI_ADDR_RUNLOCK();
3343 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
3344 vrf_id, dest_is_priv,
3346 non_asoc_addr_ok, fam);
3348 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3353 SCTP_IPI_ADDR_RUNLOCK();
3358 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3361 int tlen, at, found;
3362 struct sctp_sndinfo sndinfo;
3363 struct sctp_prinfo prinfo;
3364 struct sctp_authinfo authinfo;
3366 tlen = SCTP_BUF_LEN(control);
3370 * Independent of how many mbufs, find the c_type inside the control
3371 * structure and copy out the data.
3374 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3375 /* There is not enough room for one more. */
3378 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3379 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3380 /* We dont't have a complete CMSG header. */
3383 if (((int)cmh.cmsg_len + at) > tlen) {
3384 /* We don't have the complete CMSG. */
3387 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3388 ((c_type == cmh.cmsg_type) ||
3389 ((c_type == SCTP_SNDRCV) &&
3390 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3391 (cmh.cmsg_type == SCTP_PRINFO) ||
3392 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3393 if (c_type == cmh.cmsg_type) {
3394 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3397 /* It is exactly what we want. Copy it out. */
3398 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), cpsize, (caddr_t)data);
3401 struct sctp_sndrcvinfo *sndrcvinfo;
3403 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3405 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3408 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3410 switch (cmh.cmsg_type) {
3412 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_sndinfo)) {
3415 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3416 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3417 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3418 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3419 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3420 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3423 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_prinfo)) {
3426 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3427 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3428 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3431 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_authinfo)) {
3434 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3435 sndrcvinfo->sinfo_keynumber_valid = 1;
3436 sndrcvinfo->sinfo_keynumber = authinfo.auth_keyid;
3444 at += CMSG_ALIGN(cmh.cmsg_len);
3450 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3454 struct sctp_initmsg initmsg;
3457 struct sockaddr_in sin;
3461 struct sockaddr_in6 sin6;
3465 tlen = SCTP_BUF_LEN(control);
3468 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3469 /* There is not enough room for one more. */
3473 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3474 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3475 /* We dont't have a complete CMSG header. */
3479 if (((int)cmh.cmsg_len + at) > tlen) {
3480 /* We don't have the complete CMSG. */
3484 if (cmh.cmsg_level == IPPROTO_SCTP) {
3485 switch (cmh.cmsg_type) {
3487 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_initmsg)) {
3491 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3492 if (initmsg.sinit_max_attempts)
3493 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3494 if (initmsg.sinit_num_ostreams)
3495 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3496 if (initmsg.sinit_max_instreams)
3497 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3498 if (initmsg.sinit_max_init_timeo)
3499 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3500 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3501 struct sctp_stream_out *tmp_str;
3504 /* Default is NOT correct */
3505 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3506 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3507 SCTP_TCB_UNLOCK(stcb);
3508 SCTP_MALLOC(tmp_str,
3509 struct sctp_stream_out *,
3510 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3512 SCTP_TCB_LOCK(stcb);
3513 if (tmp_str != NULL) {
3514 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3515 stcb->asoc.strmout = tmp_str;
3516 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3518 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3520 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3521 stcb->asoc.strmout[i].next_sequence_sent = 0;
3522 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3523 stcb->asoc.strmout[i].stream_no = i;
3524 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3525 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3530 case SCTP_DSTADDRV4:
3531 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3535 memset(&sin, 0, sizeof(struct sockaddr_in));
3536 sin.sin_family = AF_INET;
3537 sin.sin_len = sizeof(struct sockaddr_in);
3538 sin.sin_port = stcb->rport;
3539 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3540 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3541 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3542 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3546 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3553 case SCTP_DSTADDRV6:
3554 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3558 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3559 sin6.sin6_family = AF_INET6;
3560 sin6.sin6_len = sizeof(struct sockaddr_in6);
3561 sin6.sin6_port = stcb->rport;
3562 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3563 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3564 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3569 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3570 in6_sin6_2_sin(&sin, &sin6);
3571 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3572 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3573 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3577 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3583 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3593 at += CMSG_ALIGN(cmh.cmsg_len);
3598 static struct sctp_tcb *
3599 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3601 struct mbuf *control,
3602 struct sctp_nets **net_p,
3607 struct sctp_tcb *stcb;
3608 struct sockaddr *addr;
3611 struct sockaddr_in sin;
3615 struct sockaddr_in6 sin6;
3619 tlen = SCTP_BUF_LEN(control);
3622 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3623 /* There is not enough room for one more. */
3627 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3628 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3629 /* We dont't have a complete CMSG header. */
3633 if (((int)cmh.cmsg_len + at) > tlen) {
3634 /* We don't have the complete CMSG. */
3638 if (cmh.cmsg_level == IPPROTO_SCTP) {
3639 switch (cmh.cmsg_type) {
3641 case SCTP_DSTADDRV4:
3642 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3646 memset(&sin, 0, sizeof(struct sockaddr_in));
3647 sin.sin_family = AF_INET;
3648 sin.sin_len = sizeof(struct sockaddr_in);
3649 sin.sin_port = port;
3650 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3651 addr = (struct sockaddr *)&sin;
3655 case SCTP_DSTADDRV6:
3656 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3660 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3661 sin6.sin6_family = AF_INET6;
3662 sin6.sin6_len = sizeof(struct sockaddr_in6);
3663 sin6.sin6_port = port;
3664 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3666 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3667 in6_sin6_2_sin(&sin, &sin6);
3668 addr = (struct sockaddr *)&sin;
3671 addr = (struct sockaddr *)&sin6;
3679 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3685 at += CMSG_ALIGN(cmh.cmsg_len);
3690 static struct mbuf *
3691 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
3692 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3694 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3695 struct sctp_state_cookie *stc;
3696 struct sctp_paramhdr *ph;
3702 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3703 sizeof(struct sctp_paramhdr)), 0,
3704 M_DONTWAIT, 1, MT_DATA);
3708 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3709 if (copy_init == NULL) {
3713 #ifdef SCTP_MBUF_LOGGING
3714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3719 if (SCTP_BUF_IS_EXTENDED(mat)) {
3720 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3722 mat = SCTP_BUF_NEXT(mat);
3726 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3728 if (copy_initack == NULL) {
3730 sctp_m_freem(copy_init);
3733 #ifdef SCTP_MBUF_LOGGING
3734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3739 if (SCTP_BUF_IS_EXTENDED(mat)) {
3740 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3742 mat = SCTP_BUF_NEXT(mat);
3746 /* easy side we just drop it on the end */
3747 ph = mtod(mret, struct sctp_paramhdr *);
3748 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3749 sizeof(struct sctp_paramhdr);
3750 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3751 sizeof(struct sctp_paramhdr));
3752 ph->param_type = htons(SCTP_STATE_COOKIE);
3753 ph->param_length = 0; /* fill in at the end */
3754 /* Fill in the stc cookie data */
3755 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3757 /* tack the INIT and then the INIT-ACK onto the chain */
3760 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3761 cookie_sz += SCTP_BUF_LEN(m_at);
3762 if (SCTP_BUF_NEXT(m_at) == NULL) {
3763 SCTP_BUF_NEXT(m_at) = copy_init;
3768 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3769 cookie_sz += SCTP_BUF_LEN(m_at);
3770 if (SCTP_BUF_NEXT(m_at) == NULL) {
3771 SCTP_BUF_NEXT(m_at) = copy_initack;
3776 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3777 cookie_sz += SCTP_BUF_LEN(m_at);
3778 if (SCTP_BUF_NEXT(m_at) == NULL) {
3782 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3784 /* no space, so free the entire chain */
3788 SCTP_BUF_LEN(sig) = 0;
3789 SCTP_BUF_NEXT(m_at) = sig;
3791 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3792 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3794 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3795 cookie_sz += SCTP_SIGNATURE_SIZE;
3796 ph->param_length = htons(cookie_sz);
3802 sctp_get_ect(struct sctp_tcb *stcb,
3803 struct sctp_tmit_chunk *chk)
3805 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3806 return (SCTP_ECT0_BIT);
3813 sctp_handle_no_route(struct sctp_tcb *stcb,
3814 struct sctp_nets *net,
3817 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3820 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3821 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3822 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3823 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3824 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3825 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3827 SCTP_FAILED_THRESHOLD,
3830 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3831 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3833 * JRS 5/14/07 - If a destination is
3834 * unreachable, the PF bit is turned off.
3835 * This allows an unambiguous use of the PF
3836 * bit for destinations that are reachable
3837 * but potentially failed. If the
3838 * destination is set to the unreachable
3839 * state, also set the destination to the PF
3843 * Add debug message here if destination is
3846 /* Stop any running T3 timers here? */
3847 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
3848 (stcb->asoc.sctp_cmt_pf > 0)) {
3849 net->dest_state &= ~SCTP_ADDR_PF;
3850 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
3856 if (net == stcb->asoc.primary_destination) {
3857 /* need a new primary */
3858 struct sctp_nets *alt;
3860 alt = sctp_find_alternate_net(stcb, net, 0);
3862 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, alt) == 0) {
3863 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3864 if (net->ro._s_addr) {
3865 sctp_free_ifa(net->ro._s_addr);
3866 net->ro._s_addr = NULL;
3868 net->src_addr_selected = 0;
3877 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3878 struct sctp_tcb *stcb, /* may be NULL */
3879 struct sctp_nets *net,
3880 struct sockaddr *to,
3882 uint32_t auth_offset,
3883 struct sctp_auth_chunk *auth,
3884 uint16_t auth_keyid,
3885 int nofragment_flag,
3887 struct sctp_tmit_chunk *chk,
3894 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3897 union sctp_sockstore *over_addr,
3900 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3903 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3904 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3905 * - fill in the HMAC digest of any AUTH chunk in the packet.
3906 * - calculate and fill in the SCTP checksum.
3907 * - prepend an IP address header.
3908 * - if boundall use INADDR_ANY.
3909 * - if boundspecific do source address selection.
3910 * - set fragmentation option for ipV4.
3911 * - On return from IP output, check/adjust mtu size of output
3912 * interface and smallest_mtu size as well.
3914 /* Will need ifdefs around this */
3917 struct sctphdr *sctphdr;
3921 sctp_route_t *ro = NULL;
3922 struct udphdr *udp = NULL;
3924 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3925 struct socket *so = NULL;
3929 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3930 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3935 vrf_id = stcb->asoc.vrf_id;
3937 vrf_id = inp->def_vrf_id;
3940 /* fill in the HMAC digest for any AUTH chunk in the packet */
3941 if ((auth != NULL) && (stcb != NULL)) {
3942 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3944 switch (to->sa_family) {
3948 struct ip *ip = NULL;
3949 sctp_route_t iproute;
3953 len = sizeof(struct ip) + sizeof(struct sctphdr);
3955 len += sizeof(struct udphdr);
3957 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3960 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3963 SCTP_ALIGN_TO_END(newm, len);
3964 SCTP_BUF_LEN(newm) = len;
3965 SCTP_BUF_NEXT(newm) = m;
3969 if (net->flowidset == 0) {
3970 panic("Flow ID not set");
3973 m->m_pkthdr.flowid = net->flowid;
3974 m->m_flags |= M_FLOWID;
3976 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
3977 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
3978 m->m_flags |= M_FLOWID;
3981 packet_length = sctp_calculate_len(m);
3982 ip = mtod(m, struct ip *);
3983 ip->ip_v = IPVERSION;
3984 ip->ip_hl = (sizeof(struct ip) >> 2);
3986 tos_value = net->tos_flowlabel & 0x000000ff;
3988 tos_value = inp->ip_inp.inp.inp_ip_tos;
3990 if ((nofragment_flag) && (port == 0)) {
3995 /* FreeBSD has a function for ip_id's */
3996 ip->ip_id = ip_newid();
3998 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3999 ip->ip_len = packet_length;
4000 ip->ip_tos = tos_value & 0xfc;
4002 ip->ip_tos |= sctp_get_ect(stcb, chk);
4005 ip->ip_p = IPPROTO_UDP;
4007 ip->ip_p = IPPROTO_SCTP;
4012 memset(&iproute, 0, sizeof(iproute));
4013 memcpy(&ro->ro_dst, to, to->sa_len);
4015 ro = (sctp_route_t *) & net->ro;
4017 /* Now the address selection part */
4018 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4020 /* call the routine to select the src address */
4021 if (net && out_of_asoc_ok == 0) {
4022 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4023 sctp_free_ifa(net->ro._s_addr);
4024 net->ro._s_addr = NULL;
4025 net->src_addr_selected = 0;
4031 if (net->src_addr_selected == 0) {
4032 /* Cache the source address */
4033 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4036 net->src_addr_selected = 1;
4038 if (net->ro._s_addr == NULL) {
4039 /* No route to host */
4040 net->src_addr_selected = 0;
4041 sctp_handle_no_route(stcb, net, so_locked);
4042 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4044 return (EHOSTUNREACH);
4046 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4048 if (over_addr == NULL) {
4049 struct sctp_ifa *_lsrc;
4051 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4055 if (_lsrc == NULL) {
4056 sctp_handle_no_route(stcb, net, so_locked);
4057 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4059 return (EHOSTUNREACH);
4061 ip->ip_src = _lsrc->address.sin.sin_addr;
4062 sctp_free_ifa(_lsrc);
4064 ip->ip_src = over_addr->sin.sin_addr;
4065 SCTP_RTALLOC(ro, vrf_id);
4069 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4070 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4071 udp->uh_dport = port;
4072 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4073 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4074 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4076 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4079 sctphdr->src_port = src_port;
4080 sctphdr->dest_port = dest_port;
4081 sctphdr->v_tag = v_tag;
4082 sctphdr->checksum = 0;
4085 * If source address selection fails and we find no
4086 * route then the ip_output should fail as well with
4087 * a NO_ROUTE_TO_HOST type error. We probably should
4088 * catch that somewhere and abort the association
4089 * right away (assuming this is an INIT being sent).
4091 if (ro->ro_rt == NULL) {
4093 * src addr selection failed to find a route
4094 * (or valid source addr), so we can't get
4095 * there from here (yet)!
4097 sctp_handle_no_route(stcb, net, so_locked);
4098 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4100 return (EHOSTUNREACH);
4102 if (ro != &iproute) {
4103 memcpy(&iproute, ro, sizeof(*ro));
4105 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4106 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4107 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4108 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4109 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4112 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4113 /* failed to prepend data, give up */
4114 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4118 #ifdef SCTP_PACKET_LOGGING
4119 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4120 sctp_packet_log(m, packet_length);
4122 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4124 #if defined(SCTP_WITH_NO_CSUM)
4125 SCTP_STAT_INCR(sctps_sendnocrc);
4127 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4129 (stcb->asoc.loopback_scope))) {
4130 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4131 SCTP_STAT_INCR(sctps_sendswcrc);
4133 SCTP_STAT_INCR(sctps_sendnocrc);
4136 SCTP_ENABLE_UDP_CSUM(o_pak);
4138 #if defined(SCTP_WITH_NO_CSUM)
4139 SCTP_STAT_INCR(sctps_sendnocrc);
4141 m->m_pkthdr.csum_flags = CSUM_SCTP;
4142 m->m_pkthdr.csum_data = 0;
4143 SCTP_STAT_INCR(sctps_sendhwcrc);
4146 /* send it out. table id is taken from stcb */
4147 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4148 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4149 so = SCTP_INP_SO(inp);
4150 SCTP_SOCKET_UNLOCK(so, 0);
4153 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4154 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4155 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4156 atomic_add_int(&stcb->asoc.refcnt, 1);
4157 SCTP_TCB_UNLOCK(stcb);
4158 SCTP_SOCKET_LOCK(so, 0);
4159 SCTP_TCB_LOCK(stcb);
4160 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4163 SCTP_STAT_INCR(sctps_sendpackets);
4164 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4166 SCTP_STAT_INCR(sctps_senderrors);
4168 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4170 /* free tempy routes */
4177 * PMTU check versus smallest asoc MTU goes
4180 if ((ro->ro_rt != NULL) &&
4181 (net->ro._s_addr)) {
4184 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4186 mtu -= sizeof(struct udphdr);
4188 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4189 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4192 } else if (ro->ro_rt == NULL) {
4193 /* route was freed */
4194 if (net->ro._s_addr &&
4195 net->src_addr_selected) {
4196 sctp_free_ifa(net->ro._s_addr);
4197 net->ro._s_addr = NULL;
4199 net->src_addr_selected = 0;
4209 struct ip6_hdr *ip6h;
4210 struct route_in6 ip6route;
4213 uint16_t flowBottom;
4214 u_char tosBottom, tosTop;
4215 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4217 struct sockaddr_in6 lsa6_storage;
4219 u_short prev_port = 0;
4223 flowlabel = net->tos_flowlabel;
4225 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
4228 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4230 len += sizeof(struct udphdr);
4232 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4235 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4238 SCTP_ALIGN_TO_END(newm, len);
4239 SCTP_BUF_LEN(newm) = len;
4240 SCTP_BUF_NEXT(newm) = m;
4244 if (net->flowidset == 0) {
4245 panic("Flow ID not set");
4248 m->m_pkthdr.flowid = net->flowid;
4249 m->m_flags |= M_FLOWID;
4251 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
4252 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
4253 m->m_flags |= M_FLOWID;
4256 packet_length = sctp_calculate_len(m);
4258 ip6h = mtod(m, struct ip6_hdr *);
4260 * We assume here that inp_flow is in host byte
4261 * order within the TCB!
4263 flowBottom = flowlabel & 0x0000ffff;
4264 flowTop = ((flowlabel & 0x000f0000) >> 16);
4265 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
4266 /* protect *sin6 from overwrite */
4267 sin6 = (struct sockaddr_in6 *)to;
4271 /* KAME hack: embed scopeid */
4272 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4273 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4277 memset(&ip6route, 0, sizeof(ip6route));
4278 ro = (sctp_route_t *) & ip6route;
4279 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4281 ro = (sctp_route_t *) & net->ro;
4283 tosBottom = (((struct in6pcb *)inp)->in6p_flowinfo & 0x0c);
4285 tosBottom |= sctp_get_ect(stcb, chk);
4288 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
4290 ip6h->ip6_nxt = IPPROTO_UDP;
4292 ip6h->ip6_nxt = IPPROTO_SCTP;
4294 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4295 ip6h->ip6_dst = sin6->sin6_addr;
4298 * Add SRC address selection here: we can only reuse
4299 * to a limited degree the kame src-addr-sel, since
4300 * we can try their selection but it may not be
4303 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4304 lsa6_tmp.sin6_family = AF_INET6;
4305 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4307 if (net && out_of_asoc_ok == 0) {
4308 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4309 sctp_free_ifa(net->ro._s_addr);
4310 net->ro._s_addr = NULL;
4311 net->src_addr_selected = 0;
4317 if (net->src_addr_selected == 0) {
4318 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4319 /* KAME hack: embed scopeid */
4320 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4321 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4324 /* Cache the source address */
4325 net->ro._s_addr = sctp_source_address_selection(inp,
4331 (void)sa6_recoverscope(sin6);
4332 net->src_addr_selected = 1;
4334 if (net->ro._s_addr == NULL) {
4335 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4336 net->src_addr_selected = 0;
4337 sctp_handle_no_route(stcb, net, so_locked);
4338 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4340 return (EHOSTUNREACH);
4342 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4344 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4345 /* KAME hack: embed scopeid */
4346 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4347 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4350 if (over_addr == NULL) {
4351 struct sctp_ifa *_lsrc;
4353 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4357 if (_lsrc == NULL) {
4358 sctp_handle_no_route(stcb, net, so_locked);
4359 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4361 return (EHOSTUNREACH);
4363 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4364 sctp_free_ifa(_lsrc);
4366 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4367 SCTP_RTALLOC(ro, vrf_id);
4369 (void)sa6_recoverscope(sin6);
4371 lsa6->sin6_port = inp->sctp_lport;
4373 if (ro->ro_rt == NULL) {
4375 * src addr selection failed to find a route
4376 * (or valid source addr), so we can't get
4379 sctp_handle_no_route(stcb, net, so_locked);
4380 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4382 return (EHOSTUNREACH);
4385 * XXX: sa6 may not have a valid sin6_scope_id in
4386 * the non-SCOPEDROUTING case.
4388 bzero(&lsa6_storage, sizeof(lsa6_storage));
4389 lsa6_storage.sin6_family = AF_INET6;
4390 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4391 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4392 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4393 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4398 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4399 lsa6_storage.sin6_port = inp->sctp_lport;
4400 lsa6 = &lsa6_storage;
4401 ip6h->ip6_src = lsa6->sin6_addr;
4404 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4405 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4406 udp->uh_dport = port;
4407 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4409 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4411 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4414 sctphdr->src_port = src_port;
4415 sctphdr->dest_port = dest_port;
4416 sctphdr->v_tag = v_tag;
4417 sctphdr->checksum = 0;
4420 * We set the hop limit now since there is a good
4421 * chance that our ro pointer is now filled
4423 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4424 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4427 /* Copy to be sure something bad is not happening */
4428 sin6->sin6_addr = ip6h->ip6_dst;
4429 lsa6->sin6_addr = ip6h->ip6_src;
4432 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4433 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4434 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4435 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4436 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4438 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4440 * preserve the port and scope for link
4443 prev_scope = sin6->sin6_scope_id;
4444 prev_port = sin6->sin6_port;
4446 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4447 /* failed to prepend data, give up */
4449 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4452 #ifdef SCTP_PACKET_LOGGING
4453 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4454 sctp_packet_log(m, packet_length);
4456 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4458 #if defined(SCTP_WITH_NO_CSUM)
4459 SCTP_STAT_INCR(sctps_sendnocrc);
4461 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4463 (stcb->asoc.loopback_scope))) {
4464 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4465 SCTP_STAT_INCR(sctps_sendswcrc);
4467 SCTP_STAT_INCR(sctps_sendnocrc);
4470 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4471 udp->uh_sum = 0xffff;
4474 #if defined(SCTP_WITH_NO_CSUM)
4475 SCTP_STAT_INCR(sctps_sendnocrc);
4477 m->m_pkthdr.csum_flags = CSUM_SCTP;
4478 m->m_pkthdr.csum_data = 0;
4479 SCTP_STAT_INCR(sctps_sendhwcrc);
4482 /* send it out. table id is taken from stcb */
4483 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4484 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4485 so = SCTP_INP_SO(inp);
4486 SCTP_SOCKET_UNLOCK(so, 0);
4489 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4490 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4491 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4492 atomic_add_int(&stcb->asoc.refcnt, 1);
4493 SCTP_TCB_UNLOCK(stcb);
4494 SCTP_SOCKET_LOCK(so, 0);
4495 SCTP_TCB_LOCK(stcb);
4496 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4500 /* for link local this must be done */
4501 sin6->sin6_scope_id = prev_scope;
4502 sin6->sin6_port = prev_port;
4504 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4505 SCTP_STAT_INCR(sctps_sendpackets);
4506 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4508 SCTP_STAT_INCR(sctps_senderrors);
4511 /* Now if we had a temp route free it */
4517 * PMTU check versus smallest asoc MTU goes
4520 if (ro->ro_rt == NULL) {
4521 /* Route was freed */
4522 if (net->ro._s_addr &&
4523 net->src_addr_selected) {
4524 sctp_free_ifa(net->ro._s_addr);
4525 net->ro._s_addr = NULL;
4527 net->src_addr_selected = 0;
4529 if ((ro->ro_rt != NULL) &&
4530 (net->ro._s_addr)) {
4533 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4535 (stcb->asoc.smallest_mtu > mtu)) {
4536 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4539 net->mtu -= sizeof(struct udphdr);
4543 if (ND_IFINFO(ifp)->linkmtu &&
4544 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4545 sctp_mtu_size_reset(inp,
4547 ND_IFINFO(ifp)->linkmtu);
4555 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4556 ((struct sockaddr *)to)->sa_family);
4558 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4565 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4566 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4571 struct mbuf *m, *m_at, *mp_last;
4572 struct sctp_nets *net;
4573 struct sctp_init_chunk *init;
4574 struct sctp_supported_addr_param *sup_addr;
4575 struct sctp_adaptation_layer_indication *ali;
4576 struct sctp_ecn_supported_param *ecn;
4577 struct sctp_prsctp_supported_param *prsctp;
4578 struct sctp_supported_chunk_types_param *pr_supported;
4579 int cnt_inits_to = 0;
4584 /* INIT's always go to the primary (and usually ONLY address) */
4586 net = stcb->asoc.primary_destination;
4588 net = TAILQ_FIRST(&stcb->asoc.nets);
4593 /* we confirm any address we send an INIT to */
4594 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4595 (void)sctp_set_primary_addr(stcb, NULL, net);
4597 /* we confirm any address we send an INIT to */
4598 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4600 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4602 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4604 * special hook, if we are sending to link local it will not
4605 * show up in our private address count.
4607 struct sockaddr_in6 *sin6l;
4609 sin6l = &net->ro._l_addr.sin6;
4610 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4614 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4615 /* This case should not happen */
4616 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4619 /* start the INIT timer */
4620 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4622 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4624 /* No memory, INIT timer will re-attempt. */
4625 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4628 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4630 * assume peer supports asconf in order to be able to queue local
4631 * address changes while an INIT is in flight and before the assoc
4634 stcb->asoc.peer_supports_asconf = 1;
4635 /* Now lets put the SCTP header in place */
4636 init = mtod(m, struct sctp_init_chunk *);
4637 /* now the chunk header */
4638 init->ch.chunk_type = SCTP_INITIATION;
4639 init->ch.chunk_flags = 0;
4640 /* fill in later from mbuf we build */
4641 init->ch.chunk_length = 0;
4642 /* place in my tag */
4643 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4644 /* set up some of the credits. */
4645 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4646 SCTP_MINIMAL_RWND));
4648 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4649 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4650 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4651 /* now the address restriction */
4652 /* XXX Should we take the address family of the socket into account? */
4653 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4655 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4658 /* we support 2 types: IPv4/IPv6 */
4659 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
4660 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4661 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4663 /* we support 1 type: IPv6 */
4664 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
4665 sup_addr->addr_type[0] = htons(SCTP_IPV6_ADDRESS);
4666 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4669 /* we support 1 type: IPv4 */
4670 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
4671 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4672 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4674 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
4675 /* adaptation layer indication parameter */
4676 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
4677 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4678 ali->ph.param_length = htons(sizeof(*ali));
4679 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4680 SCTP_BUF_LEN(m) += sizeof(*ali);
4681 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4683 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4684 /* Add NAT friendly parameter */
4685 struct sctp_paramhdr *ph;
4687 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4688 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4689 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4690 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4691 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4693 /* now any cookie time extensions */
4694 if (stcb->asoc.cookie_preserve_req) {
4695 struct sctp_cookie_perserve_param *cookie_preserve;
4697 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4698 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4699 cookie_preserve->ph.param_length = htons(
4700 sizeof(*cookie_preserve));
4701 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4702 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4703 ecn = (struct sctp_ecn_supported_param *)(
4704 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4705 stcb->asoc.cookie_preserve_req = 0;
4708 if (stcb->asoc.ecn_allowed == 1) {
4709 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4710 ecn->ph.param_length = htons(sizeof(*ecn));
4711 SCTP_BUF_LEN(m) += sizeof(*ecn);
4712 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4715 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4717 /* And now tell the peer we do pr-sctp */
4718 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4719 prsctp->ph.param_length = htons(sizeof(*prsctp));
4720 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4722 /* And now tell the peer we do all the extensions */
4723 pr_supported = (struct sctp_supported_chunk_types_param *)
4724 ((caddr_t)prsctp + sizeof(*prsctp));
4725 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4727 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4728 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4729 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4730 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4731 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4732 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4733 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4735 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4736 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4738 p_len = sizeof(*pr_supported) + num_ext;
4739 pr_supported->ph.param_length = htons(p_len);
4740 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4741 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4744 /* add authentication parameters */
4745 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4746 struct sctp_auth_random *randp;
4747 struct sctp_auth_hmac_algo *hmacs;
4748 struct sctp_auth_chunk_list *chunks;
4750 /* attach RANDOM parameter, if available */
4751 if (stcb->asoc.authinfo.random != NULL) {
4752 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4753 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4754 /* random key already contains the header */
4755 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4756 /* zero out any padding required */
4757 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4758 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4760 /* add HMAC_ALGO parameter */
4761 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4762 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4763 (uint8_t *) hmacs->hmac_ids);
4765 p_len += sizeof(*hmacs);
4766 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4767 hmacs->ph.param_length = htons(p_len);
4768 /* zero out any padding required */
4769 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4770 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4772 /* add CHUNKS parameter */
4773 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4774 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4775 chunks->chunk_types);
4777 p_len += sizeof(*chunks);
4778 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4779 chunks->ph.param_length = htons(p_len);
4780 /* zero out any padding required */
4781 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4782 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4786 /* now the addresses */
4788 struct sctp_scoping scp;
4791 * To optimize this we could put the scoping stuff into a
4792 * structure and remove the individual uint8's from the
4793 * assoc structure. Then we could just sifa in the address
4794 * within the stcb.. but for now this is a quick hack to get
4795 * the address stuff teased apart.
4797 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4798 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4799 scp.loopback_scope = stcb->asoc.loopback_scope;
4800 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4801 scp.local_scope = stcb->asoc.local_scope;
4802 scp.site_scope = stcb->asoc.site_scope;
4804 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to);
4807 /* calulate the size and update pkt header and chunk header */
4809 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4810 if (SCTP_BUF_NEXT(m_at) == NULL)
4812 p_len += SCTP_BUF_LEN(m_at);
4814 init->ch.chunk_length = htons(p_len);
4816 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4817 * here since the timer will drive a retranmission.
4820 /* I don't expect this to execute but we will be safe here */
4822 if ((padval) && (mp_last)) {
4824 * The compiler worries that mp_last may not be set even
4825 * though I think it is impossible :-> however we add
4826 * mp_last here just in case.
4828 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4830 /* Houston we have a problem, no space */
4836 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4837 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4838 (struct sockaddr *)&net->ro._l_addr,
4839 m, 0, NULL, 0, 0, 0, NULL, 0,
4840 inp->sctp_lport, stcb->rport, htonl(0),
4841 net->port, so_locked, NULL, NULL);
4842 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4843 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4844 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4848 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4849 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4852 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4853 * being equal to the beginning of the params i.e. (iphlen +
4854 * sizeof(struct sctp_init_msg) parse through the parameters to the
4855 * end of the mbuf verifying that all parameters are known.
4857 * For unknown parameters build and return a mbuf with
4858 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4859 * processing this chunk stop, and set *abort_processing to 1.
4861 * By having param_offset be pre-set to where parameters begin it is
4862 * hoped that this routine may be reused in the future by new
4865 struct sctp_paramhdr *phdr, params;
4867 struct mbuf *mat, *op_err;
4868 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4869 int at, limit, pad_needed;
4870 uint16_t ptype, plen, padded_size;
4873 *abort_processing = 0;
4876 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4879 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4880 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4881 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4882 ptype = ntohs(phdr->param_type);
4883 plen = ntohs(phdr->param_length);
4884 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4885 /* wacked parameter */
4886 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4889 limit -= SCTP_SIZE32(plen);
4891 * All parameters for all chunks that we know/understand are
4892 * listed here. We process them other places and make
4893 * appropriate stop actions per the upper bits. However this
4894 * is the generic routine processor's can call to get back
4895 * an operr.. to either incorporate (init-ack) or send.
4897 padded_size = SCTP_SIZE32(plen);
4899 /* Param's with variable size */
4900 case SCTP_HEARTBEAT_INFO:
4901 case SCTP_STATE_COOKIE:
4902 case SCTP_UNRECOG_PARAM:
4903 case SCTP_ERROR_CAUSE_IND:
4907 /* Param's with variable size within a range */
4908 case SCTP_CHUNK_LIST:
4909 case SCTP_SUPPORTED_CHUNK_EXT:
4910 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4911 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4916 case SCTP_SUPPORTED_ADDRTYPE:
4917 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4918 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4924 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4925 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4930 case SCTP_SET_PRIM_ADDR:
4931 case SCTP_DEL_IP_ADDRESS:
4932 case SCTP_ADD_IP_ADDRESS:
4933 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4934 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4935 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4940 /* Param's with a fixed size */
4941 case SCTP_IPV4_ADDRESS:
4942 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4943 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4948 case SCTP_IPV6_ADDRESS:
4949 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4950 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4955 case SCTP_COOKIE_PRESERVE:
4956 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4957 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4962 case SCTP_HAS_NAT_SUPPORT:
4965 case SCTP_PRSCTP_SUPPORTED:
4967 if (padded_size != sizeof(struct sctp_paramhdr)) {
4968 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4973 case SCTP_ECN_CAPABLE:
4974 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4975 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4980 case SCTP_ULP_ADAPTATION:
4981 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4982 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4987 case SCTP_SUCCESS_REPORT:
4988 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4989 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4994 case SCTP_HOSTNAME_ADDRESS:
4996 /* We can NOT handle HOST NAME addresses!! */
4999 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5000 *abort_processing = 1;
5001 if (op_err == NULL) {
5002 /* Ok need to try to get a mbuf */
5004 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5006 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5009 l_len += sizeof(struct sctp_paramhdr);
5010 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5012 SCTP_BUF_LEN(op_err) = 0;
5014 * pre-reserve space for ip
5015 * and sctp header and
5019 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5021 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5023 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5024 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5028 /* If we have space */
5029 struct sctp_paramhdr s;
5032 uint32_t cpthis = 0;
5034 pad_needed = 4 - (err_at % 4);
5035 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5036 err_at += pad_needed;
5038 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5039 s.param_length = htons(sizeof(s) + plen);
5040 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5041 err_at += sizeof(s);
5042 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5044 sctp_m_freem(op_err);
5046 * we are out of memory but
5047 * we still need to have a
5048 * look at what to do (the
5049 * system is in trouble
5054 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5062 * we do not recognize the parameter figure out what
5065 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5066 if ((ptype & 0x4000) == 0x4000) {
5067 /* Report bit is set?? */
5068 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5069 if (op_err == NULL) {
5072 /* Ok need to try to get an mbuf */
5074 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5076 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5079 l_len += sizeof(struct sctp_paramhdr);
5080 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5082 SCTP_BUF_LEN(op_err) = 0;
5084 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5086 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5088 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5089 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5093 /* If we have space */
5094 struct sctp_paramhdr s;
5097 uint32_t cpthis = 0;
5099 pad_needed = 4 - (err_at % 4);
5100 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5101 err_at += pad_needed;
5103 s.param_type = htons(SCTP_UNRECOG_PARAM);
5104 s.param_length = htons(sizeof(s) + plen);
5105 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5106 err_at += sizeof(s);
5107 if (plen > sizeof(tempbuf)) {
5108 plen = sizeof(tempbuf);
5110 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5112 sctp_m_freem(op_err);
5114 * we are out of memory but
5115 * we still need to have a
5116 * look at what to do (the
5117 * system is in trouble
5121 goto more_processing;
5123 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5128 if ((ptype & 0x8000) == 0x0000) {
5129 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5132 /* skip this chunk and continue processing */
5133 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5134 at += SCTP_SIZE32(plen);
5139 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5143 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5144 *abort_processing = 1;
5145 if ((op_err == NULL) && phdr) {
5149 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5151 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5153 l_len += (2 * sizeof(struct sctp_paramhdr));
5154 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5156 SCTP_BUF_LEN(op_err) = 0;
5158 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5160 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5162 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5163 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5166 if ((op_err) && phdr) {
5167 struct sctp_paramhdr s;
5170 uint32_t cpthis = 0;
5172 pad_needed = 4 - (err_at % 4);
5173 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5174 err_at += pad_needed;
5176 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5177 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5178 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5179 err_at += sizeof(s);
5180 /* Only copy back the p-hdr that caused the issue */
5181 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5187 sctp_are_there_new_addresses(struct sctp_association *asoc,
5188 struct mbuf *in_initpkt, int iphlen, int offset)
5191 * Given a INIT packet, look through the packet to verify that there
5192 * are NO new addresses. As we go through the parameters add reports
5193 * of any un-understood parameters that require an error. Also we
5194 * must return (1) to drop the packet if we see a un-understood
5195 * parameter that tells us to drop the chunk.
5197 struct sockaddr *sa_touse;
5198 struct sockaddr *sa;
5199 struct sctp_paramhdr *phdr, params;
5200 uint16_t ptype, plen;
5202 struct sctp_nets *net;
5206 struct sockaddr_in sin4, *sa4;
5210 struct sockaddr_in6 sin6, *sa6;
5211 struct ip6_hdr *ip6h;
5216 memset(&sin4, 0, sizeof(sin4));
5217 sin4.sin_family = AF_INET;
5218 sin4.sin_len = sizeof(sin4);
5221 memset(&sin6, 0, sizeof(sin6));
5222 sin6.sin6_family = AF_INET6;
5223 sin6.sin6_len = sizeof(sin6);
5226 /* First what about the src address of the pkt ? */
5227 iph = mtod(in_initpkt, struct ip *);
5228 switch (iph->ip_v) {
5231 /* source addr is IPv4 */
5232 sin4.sin_addr = iph->ip_src;
5233 sa_touse = (struct sockaddr *)&sin4;
5237 case IPV6_VERSION >> 4:
5238 /* source addr is IPv6 */
5239 ip6h = mtod(in_initpkt, struct ip6_hdr *);
5240 sin6.sin6_addr = ip6h->ip6_src;
5241 sa_touse = (struct sockaddr *)&sin6;
5249 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5250 sa = (struct sockaddr *)&net->ro._l_addr;
5251 if (sa->sa_family == sa_touse->sa_family) {
5253 if (sa->sa_family == AF_INET) {
5254 sa4 = (struct sockaddr_in *)sa;
5255 if (sa4->sin_addr.s_addr == sin4.sin_addr.s_addr) {
5262 if (sa->sa_family == AF_INET6) {
5263 sa6 = (struct sockaddr_in6 *)sa;
5264 if (SCTP6_ARE_ADDR_EQUAL(sa6, &sin6)) {
5273 /* New address added! no need to look futher. */
5276 /* Ok so far lets munge through the rest of the packet */
5277 offset += sizeof(struct sctp_init_chunk);
5278 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5281 ptype = ntohs(phdr->param_type);
5282 plen = ntohs(phdr->param_length);
5285 case SCTP_IPV4_ADDRESS:
5287 struct sctp_ipv4addr_param *p4, p4_buf;
5289 phdr = sctp_get_next_param(in_initpkt, offset,
5290 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5291 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5295 p4 = (struct sctp_ipv4addr_param *)phdr;
5296 sin4.sin_addr.s_addr = p4->addr;
5297 sa_touse = (struct sockaddr *)&sin4;
5301 case SCTP_IPV6_ADDRESS:
5303 struct sctp_ipv6addr_param *p6, p6_buf;
5305 phdr = sctp_get_next_param(in_initpkt, offset,
5306 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5307 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5311 p6 = (struct sctp_ipv6addr_param *)phdr;
5312 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5314 sa_touse = (struct sockaddr *)&sin6;
5321 /* ok, sa_touse points to one to check */
5323 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5324 sa = (struct sockaddr *)&net->ro._l_addr;
5325 if (sa->sa_family != sa_touse->sa_family) {
5329 if (sa->sa_family == AF_INET) {
5330 sa4 = (struct sockaddr_in *)sa;
5331 if (sa4->sin_addr.s_addr ==
5332 sin4.sin_addr.s_addr) {
5339 if (sa->sa_family == AF_INET6) {
5340 sa6 = (struct sockaddr_in6 *)sa;
5341 if (SCTP6_ARE_ADDR_EQUAL(
5350 /* New addr added! no need to look further */
5354 offset += SCTP_SIZE32(plen);
5355 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5361 * Given a MBUF chain that was sent into us containing an INIT. Build a
5362 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5363 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5364 * message (i.e. the struct sctp_init_msg).
5367 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5368 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
5369 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5371 struct sctp_association *asoc;
5372 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5373 struct sctp_init_ack_chunk *initack;
5374 struct sctp_adaptation_layer_indication *ali;
5375 struct sctp_ecn_supported_param *ecn;
5376 struct sctp_prsctp_supported_param *prsctp;
5377 struct sctp_supported_chunk_types_param *pr_supported;
5378 union sctp_sockstore store, store1, *over_addr;
5381 struct sockaddr_in *sin, *to_sin;
5385 struct sockaddr_in6 *sin6, *to_sin6;
5391 struct ip6_hdr *ip6;
5394 struct sockaddr *to;
5395 struct sctp_state_cookie stc;
5396 struct sctp_nets *net = NULL;
5397 uint8_t *signature = NULL;
5398 int cnt_inits_to = 0;
5399 uint16_t his_limit, i_want;
5400 int abort_flag, padval;
5403 int nat_friendly = 0;
5411 if ((asoc != NULL) &&
5412 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5413 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
5414 /* new addresses, out of here in non-cookie-wait states */
5416 * Send a ABORT, we don't add the new address error clause
5417 * though we even set the T bit and copy in the 0 tag.. this
5418 * looks no different than if no listener was present.
5420 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
5424 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5425 (offset + sizeof(struct sctp_init_chunk)),
5426 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5429 sctp_send_abort(init_pkt, iphlen, sh,
5430 init_chk->init.initiate_tag, op_err, vrf_id, port);
5433 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5435 /* No memory, INIT timer will re-attempt. */
5437 sctp_m_freem(op_err);
5440 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5442 /* the time I built cookie */
5443 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5445 /* populate any tie tags */
5447 /* unlock before tag selections */
5448 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5449 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5450 stc.cookie_life = asoc->cookie_life;
5451 net = asoc->primary_destination;
5453 stc.tie_tag_my_vtag = 0;
5454 stc.tie_tag_peer_vtag = 0;
5455 /* life I will award this cookie */
5456 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5459 /* copy in the ports for later check */
5460 stc.myport = sh->dest_port;
5461 stc.peerport = sh->src_port;
5464 * If we wanted to honor cookie life extentions, we would add to
5465 * stc.cookie_life. For now we should NOT honor any extension
5467 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5468 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5469 struct inpcb *in_inp;
5471 /* Its a V6 socket */
5472 in_inp = (struct inpcb *)inp;
5473 stc.ipv6_addr_legal = 1;
5474 /* Now look at the binding flag to see if V4 will be legal */
5475 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5476 stc.ipv4_addr_legal = 1;
5478 /* V4 addresses are NOT legal on the association */
5479 stc.ipv4_addr_legal = 0;
5482 /* Its a V4 socket, no - V6 */
5483 stc.ipv4_addr_legal = 1;
5484 stc.ipv6_addr_legal = 0;
5487 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5492 /* now for scope setup */
5493 memset((caddr_t)&store, 0, sizeof(store));
5494 memset((caddr_t)&store1, 0, sizeof(store1));
5497 to_sin = &store1.sin;
5501 to_sin6 = &store1.sin6;
5503 iph = mtod(init_pkt, struct ip *);
5504 /* establish the to_addr's */
5505 switch (iph->ip_v) {
5508 to_sin->sin_port = sh->dest_port;
5509 to_sin->sin_family = AF_INET;
5510 to_sin->sin_len = sizeof(struct sockaddr_in);
5511 to_sin->sin_addr = iph->ip_dst;
5515 case IPV6_VERSION >> 4:
5516 ip6 = mtod(init_pkt, struct ip6_hdr *);
5517 to_sin6->sin6_addr = ip6->ip6_dst;
5518 to_sin6->sin6_scope_id = 0;
5519 to_sin6->sin6_port = sh->dest_port;
5520 to_sin6->sin6_family = AF_INET6;
5521 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5530 to = (struct sockaddr *)&store;
5531 switch (iph->ip_v) {
5535 sin->sin_family = AF_INET;
5536 sin->sin_len = sizeof(struct sockaddr_in);
5537 sin->sin_port = sh->src_port;
5538 sin->sin_addr = iph->ip_src;
5539 /* lookup address */
5540 stc.address[0] = sin->sin_addr.s_addr;
5544 stc.addr_type = SCTP_IPV4_ADDRESS;
5545 /* local from address */
5546 stc.laddress[0] = to_sin->sin_addr.s_addr;
5547 stc.laddress[1] = 0;
5548 stc.laddress[2] = 0;
5549 stc.laddress[3] = 0;
5550 stc.laddr_type = SCTP_IPV4_ADDRESS;
5551 /* scope_id is only for v6 */
5553 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5554 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5559 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5560 /* Must use the address in this case */
5561 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5562 stc.loopback_scope = 1;
5565 stc.local_scope = 0;
5571 case IPV6_VERSION >> 4:
5573 ip6 = mtod(init_pkt, struct ip6_hdr *);
5574 sin6->sin6_family = AF_INET6;
5575 sin6->sin6_len = sizeof(struct sockaddr_in6);
5576 sin6->sin6_port = sh->src_port;
5577 sin6->sin6_addr = ip6->ip6_src;
5578 /* lookup address */
5579 memcpy(&stc.address, &sin6->sin6_addr,
5580 sizeof(struct in6_addr));
5581 sin6->sin6_scope_id = 0;
5582 stc.addr_type = SCTP_IPV6_ADDRESS;
5584 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5586 * FIX ME: does this have scope from
5589 (void)sa6_recoverscope(sin6);
5590 stc.scope_id = sin6->sin6_scope_id;
5591 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5592 stc.loopback_scope = 1;
5593 stc.local_scope = 0;
5596 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5598 * If the new destination is a
5599 * LINK_LOCAL we must have common
5600 * both site and local scope. Don't
5601 * set local scope though since we
5602 * must depend on the source to be
5603 * added implicitly. We cannot
5604 * assure just because we share one
5605 * link that all links are common.
5607 stc.local_scope = 0;
5611 * we start counting for the private
5612 * address stuff at 1. since the
5613 * link local we source from won't
5614 * show up in our scoped count.
5618 * pull out the scope_id from
5622 * FIX ME: does this have scope from
5625 (void)sa6_recoverscope(sin6);
5626 stc.scope_id = sin6->sin6_scope_id;
5627 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5628 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5630 * If the new destination is
5631 * SITE_LOCAL then we must have site
5636 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5637 stc.laddr_type = SCTP_IPV6_ADDRESS;
5647 /* set the scope per the existing tcb */
5650 struct sctp_nets *lnet;
5654 stc.loopback_scope = asoc->loopback_scope;
5655 stc.ipv4_scope = asoc->ipv4_local_scope;
5656 stc.site_scope = asoc->site_scope;
5657 stc.local_scope = asoc->local_scope;
5659 /* Why do we not consider IPv4 LL addresses? */
5660 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5661 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5662 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5664 * if we have a LL address, start
5672 /* use the net pointer */
5673 to = (struct sockaddr *)&net->ro._l_addr;
5674 switch (to->sa_family) {
5677 sin = (struct sockaddr_in *)to;
5678 stc.address[0] = sin->sin_addr.s_addr;
5682 stc.addr_type = SCTP_IPV4_ADDRESS;
5683 if (net->src_addr_selected == 0) {
5685 * strange case here, the INIT should have
5686 * did the selection.
5688 net->ro._s_addr = sctp_source_address_selection(inp,
5689 stcb, (sctp_route_t *) & net->ro,
5691 if (net->ro._s_addr == NULL)
5694 net->src_addr_selected = 1;
5697 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5698 stc.laddress[1] = 0;
5699 stc.laddress[2] = 0;
5700 stc.laddress[3] = 0;
5701 stc.laddr_type = SCTP_IPV4_ADDRESS;
5706 sin6 = (struct sockaddr_in6 *)to;
5707 memcpy(&stc.address, &sin6->sin6_addr,
5708 sizeof(struct in6_addr));
5709 stc.addr_type = SCTP_IPV6_ADDRESS;
5710 if (net->src_addr_selected == 0) {
5712 * strange case here, the INIT should have
5713 * did the selection.
5715 net->ro._s_addr = sctp_source_address_selection(inp,
5716 stcb, (sctp_route_t *) & net->ro,
5718 if (net->ro._s_addr == NULL)
5721 net->src_addr_selected = 1;
5723 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5724 sizeof(struct in6_addr));
5725 stc.laddr_type = SCTP_IPV6_ADDRESS;
5730 /* Now lets put the SCTP header in place */
5731 initack = mtod(m, struct sctp_init_ack_chunk *);
5732 /* Save it off for quick ref */
5733 stc.peers_vtag = init_chk->init.initiate_tag;
5735 memcpy(stc.identification, SCTP_VERSION_STRING,
5736 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5737 /* now the chunk header */
5738 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5739 initack->ch.chunk_flags = 0;
5740 /* fill in later from mbuf we build */
5741 initack->ch.chunk_length = 0;
5742 /* place in my tag */
5743 if ((asoc != NULL) &&
5744 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5745 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5746 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5747 /* re-use the v-tags and init-seq here */
5748 initack->init.initiate_tag = htonl(asoc->my_vtag);
5749 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5751 uint32_t vtag, itsn;
5753 if (hold_inp_lock) {
5754 SCTP_INP_INCR_REF(inp);
5755 SCTP_INP_RUNLOCK(inp);
5758 atomic_add_int(&asoc->refcnt, 1);
5759 SCTP_TCB_UNLOCK(stcb);
5761 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5762 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5764 * Got a duplicate vtag on some guy behind a
5765 * nat make sure we don't use it.
5769 initack->init.initiate_tag = htonl(vtag);
5770 /* get a TSN to use too */
5771 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5772 initack->init.initial_tsn = htonl(itsn);
5773 SCTP_TCB_LOCK(stcb);
5774 atomic_add_int(&asoc->refcnt, -1);
5776 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5777 initack->init.initiate_tag = htonl(vtag);
5778 /* get a TSN to use too */
5779 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5781 if (hold_inp_lock) {
5782 SCTP_INP_RLOCK(inp);
5783 SCTP_INP_DECR_REF(inp);
5786 /* save away my tag to */
5787 stc.my_vtag = initack->init.initiate_tag;
5789 /* set up some of the credits. */
5790 so = inp->sctp_socket;
5792 /* memory problem */
5796 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5798 /* set what I want */
5799 his_limit = ntohs(init_chk->init.num_inbound_streams);
5800 /* choose what I want */
5802 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5803 i_want = asoc->streamoutcnt;
5805 i_want = inp->sctp_ep.pre_open_stream_count;
5808 i_want = inp->sctp_ep.pre_open_stream_count;
5810 if (his_limit < i_want) {
5811 /* I Want more :< */
5812 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5814 /* I can have what I want :> */
5815 initack->init.num_outbound_streams = htons(i_want);
5817 /* tell him his limt. */
5818 initack->init.num_inbound_streams =
5819 htons(inp->sctp_ep.max_open_streams_intome);
5821 /* adaptation layer indication parameter */
5822 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5823 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5824 ali->ph.param_length = htons(sizeof(*ali));
5825 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5826 SCTP_BUF_LEN(m) += sizeof(*ali);
5827 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5830 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5831 (inp->sctp_ecn_enable == 1)) {
5832 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5833 ecn->ph.param_length = htons(sizeof(*ecn));
5834 SCTP_BUF_LEN(m) += sizeof(*ecn);
5836 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5839 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5841 /* And now tell the peer we do pr-sctp */
5842 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5843 prsctp->ph.param_length = htons(sizeof(*prsctp));
5844 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5846 /* Add NAT friendly parameter */
5847 struct sctp_paramhdr *ph;
5849 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5850 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5851 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5852 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5854 /* And now tell the peer we do all the extensions */
5855 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5856 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5858 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5859 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5860 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5861 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5862 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5863 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5864 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5865 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5866 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5867 p_len = sizeof(*pr_supported) + num_ext;
5868 pr_supported->ph.param_length = htons(p_len);
5869 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5870 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5872 /* add authentication parameters */
5873 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5874 struct sctp_auth_random *randp;
5875 struct sctp_auth_hmac_algo *hmacs;
5876 struct sctp_auth_chunk_list *chunks;
5877 uint16_t random_len;
5879 /* generate and add RANDOM parameter */
5880 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5881 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5882 randp->ph.param_type = htons(SCTP_RANDOM);
5883 p_len = sizeof(*randp) + random_len;
5884 randp->ph.param_length = htons(p_len);
5885 SCTP_READ_RANDOM(randp->random_data, random_len);
5886 /* zero out any padding required */
5887 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5888 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5890 /* add HMAC_ALGO parameter */
5891 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5892 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5893 (uint8_t *) hmacs->hmac_ids);
5895 p_len += sizeof(*hmacs);
5896 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5897 hmacs->ph.param_length = htons(p_len);
5898 /* zero out any padding required */
5899 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5900 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5902 /* add CHUNKS parameter */
5903 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5904 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5905 chunks->chunk_types);
5907 p_len += sizeof(*chunks);
5908 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5909 chunks->ph.param_length = htons(p_len);
5910 /* zero out any padding required */
5911 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5912 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5916 /* now the addresses */
5918 struct sctp_scoping scp;
5921 * To optimize this we could put the scoping stuff into a
5922 * structure and remove the individual uint8's from the stc
5923 * structure. Then we could just sifa in the address within
5924 * the stc.. but for now this is a quick hack to get the
5925 * address stuff teased apart.
5927 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5928 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5929 scp.loopback_scope = stc.loopback_scope;
5930 scp.ipv4_local_scope = stc.ipv4_scope;
5931 scp.local_scope = stc.local_scope;
5932 scp.site_scope = stc.site_scope;
5933 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to);
5936 /* tack on the operational error if present */
5944 llen += SCTP_BUF_LEN(ol);
5945 ol = SCTP_BUF_NEXT(ol);
5948 /* must add a pad to the param */
5949 uint32_t cpthis = 0;
5952 padlen = 4 - (llen % 4);
5953 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5955 while (SCTP_BUF_NEXT(m_at) != NULL) {
5956 m_at = SCTP_BUF_NEXT(m_at);
5958 SCTP_BUF_NEXT(m_at) = op_err;
5959 while (SCTP_BUF_NEXT(m_at) != NULL) {
5960 m_at = SCTP_BUF_NEXT(m_at);
5963 /* pre-calulate the size and update pkt header and chunk header */
5965 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5966 p_len += SCTP_BUF_LEN(m_tmp);
5967 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5968 /* m_tmp should now point to last one */
5973 /* Now we must build a cookie */
5974 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature);
5975 if (m_cookie == NULL) {
5976 /* memory problem */
5980 /* Now append the cookie to the end and update the space/size */
5981 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5983 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5984 p_len += SCTP_BUF_LEN(m_tmp);
5985 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5986 /* m_tmp should now point to last one */
5992 * Place in the size, but we don't include the last pad (if any) in
5995 initack->ch.chunk_length = htons(p_len);
5998 * Time to sign the cookie, we don't sign over the cookie signature
5999 * though thus we set trailer.
6001 (void)sctp_hmac_m(SCTP_HMAC,
6002 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6003 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6004 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6006 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6007 * here since the timer will drive a retranmission.
6010 if ((padval) && (mp_last)) {
6011 /* see my previous comments on mp_last */
6014 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
6016 /* Houston we have a problem, no space */
6022 if (stc.loopback_scope) {
6023 over_addr = &store1;
6028 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6030 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6031 port, SCTP_SO_NOT_LOCKED, over_addr, init_pkt);
6032 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6037 sctp_prune_prsctp(struct sctp_tcb *stcb,
6038 struct sctp_association *asoc,
6039 struct sctp_sndrcvinfo *srcv,
6043 struct sctp_tmit_chunk *chk, *nchk;
6045 SCTP_TCB_LOCK_ASSERT(stcb);
6046 if ((asoc->peer_supports_prsctp) &&
6047 (asoc->sent_queue_cnt_removeable > 0)) {
6048 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6050 * Look for chunks marked with the PR_SCTP flag AND
6051 * the buffer space flag. If the one being sent is
6052 * equal or greater priority then purge the old one
6053 * and free some space.
6055 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6057 * This one is PR-SCTP AND buffer space
6060 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6062 * Lower numbers equates to higher
6063 * priority so if the one we are
6064 * looking at has a larger or equal
6065 * priority we want to drop the data
6066 * and NOT retransmit it.
6070 * We release the book_size
6071 * if the mbuf is here
6076 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6077 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
6079 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
6080 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6083 freed_spc += ret_spc;
6084 if (freed_spc >= dataout) {
6087 } /* if chunk was present */
6088 } /* if of sufficent priority */
6089 } /* if chunk has enabled */
6090 } /* tailqforeach */
6092 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6093 /* Here we must move to the sent queue and mark */
6094 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6095 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6098 * We release the book_size
6099 * if the mbuf is here
6103 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6104 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
6107 freed_spc += ret_spc;
6108 if (freed_spc >= dataout) {
6111 } /* end if chk->data */
6112 } /* end if right class */
6113 } /* end if chk pr-sctp */
6114 } /* tailqforeachsafe (chk) */
6115 } /* if enabled in asoc */
6119 sctp_get_frag_point(struct sctp_tcb *stcb,
6120 struct sctp_association *asoc)
6125 * For endpoints that have both v6 and v4 addresses we must reserve
6126 * room for the ipv6 header, for those that are only dealing with V4
6127 * we use a larger frag point.
6129 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6130 ovh = SCTP_MED_OVERHEAD;
6132 ovh = SCTP_MED_V4_OVERHEAD;
6135 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6136 siz = asoc->smallest_mtu - ovh;
6138 siz = (stcb->asoc.sctp_frag_point - ovh);
6140 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6142 /* A data chunk MUST fit in a cluster */
6143 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6146 /* adjust for an AUTH chunk if DATA requires auth */
6147 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6148 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6151 /* make it an even word boundary please */
6158 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6162 * We assume that the user wants PR_SCTP_TTL if the user provides a
6163 * positive lifetime but does not specify any PR_SCTP policy. This
6164 * is a BAD assumption and causes problems at least with the
6165 * U-Vancovers MPI folks. I will change this to be no policy means
6168 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6169 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6174 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6175 case CHUNK_FLAGS_PR_SCTP_BUF:
6177 * Time to live is a priority stored in tv_sec when doing
6178 * the buffer drop thing.
6180 sp->ts.tv_sec = sp->timetolive;
6183 case CHUNK_FLAGS_PR_SCTP_TTL:
6187 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6188 tv.tv_sec = sp->timetolive / 1000;
6189 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6191 * TODO sctp_constants.h needs alternative time
6192 * macros when _KERNEL is undefined.
6194 timevaladd(&sp->ts, &tv);
6197 case CHUNK_FLAGS_PR_SCTP_RTX:
6199 * Time to live is a the number or retransmissions stored in
6202 sp->ts.tv_sec = sp->timetolive;
6206 SCTPDBG(SCTP_DEBUG_USRREQ1,
6207 "Unknown PR_SCTP policy %u.\n",
6208 PR_SCTP_POLICY(sp->sinfo_flags));
6214 sctp_msg_append(struct sctp_tcb *stcb,
6215 struct sctp_nets *net,
6217 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6219 int error = 0, holds_lock;
6221 struct sctp_stream_queue_pending *sp = NULL;
6222 struct sctp_stream_out *strm;
6225 * Given an mbuf chain, put it into the association send queue and
6226 * place it on the wheel
6228 holds_lock = hold_stcb_lock;
6229 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6230 /* Invalid stream number */
6231 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6235 if ((stcb->asoc.stream_locked) &&
6236 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6237 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6241 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6242 /* Now can we send this? */
6243 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6244 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6245 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6246 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6247 /* got data while shutting down */
6248 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6252 sctp_alloc_a_strmoq(stcb, sp);
6254 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6258 sp->sinfo_flags = srcv->sinfo_flags;
6259 sp->timetolive = srcv->sinfo_timetolive;
6260 sp->ppid = srcv->sinfo_ppid;
6261 sp->context = srcv->sinfo_context;
6263 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6265 atomic_add_int(&sp->net->ref_count, 1);
6269 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6270 sp->stream = srcv->sinfo_stream;
6271 sp->msg_is_complete = 1;
6272 sp->sender_all_done = 1;
6275 sp->tail_mbuf = NULL;
6276 sctp_set_prsctp_policy(sp);
6278 * We could in theory (for sendall) sifa the length in, but we would
6279 * still have to hunt through the chain since we need to setup the
6283 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6284 if (SCTP_BUF_NEXT(at) == NULL)
6286 sp->length += SCTP_BUF_LEN(at);
6288 if (srcv->sinfo_keynumber_valid) {
6289 sp->auth_keyid = srcv->sinfo_keynumber;
6291 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6293 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6294 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6295 sp->holds_key_ref = 1;
6297 SCTP_TCB_SEND_LOCK(stcb);
6298 sctp_snd_sb_alloc(stcb, sp->length);
6299 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6300 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6301 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
6302 sp->strseq = strm->next_sequence_sent;
6303 strm->next_sequence_sent++;
6305 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6307 SCTP_TCB_SEND_UNLOCK(stcb);
6316 static struct mbuf *
6317 sctp_copy_mbufchain(struct mbuf *clonechain,
6318 struct mbuf *outchain,
6319 struct mbuf **endofchain,
6322 uint8_t copy_by_ref)
6325 struct mbuf *appendchain;
6329 if (endofchain == NULL) {
6333 sctp_m_freem(outchain);
6336 if (can_take_mbuf) {
6337 appendchain = clonechain;
6340 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6342 /* Its not in a cluster */
6343 if (*endofchain == NULL) {
6344 /* lets get a mbuf cluster */
6345 if (outchain == NULL) {
6346 /* This is the general case */
6348 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6349 if (outchain == NULL) {
6352 SCTP_BUF_LEN(outchain) = 0;
6353 *endofchain = outchain;
6354 /* get the prepend space */
6355 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6358 * We really should not get a NULL
6364 if (SCTP_BUF_NEXT(m) == NULL) {
6368 m = SCTP_BUF_NEXT(m);
6371 if (*endofchain == NULL) {
6373 * huh, TSNH XXX maybe we
6376 sctp_m_freem(outchain);
6380 /* get the new end of length */
6381 len = M_TRAILINGSPACE(*endofchain);
6383 /* how much is left at the end? */
6384 len = M_TRAILINGSPACE(*endofchain);
6386 /* Find the end of the data, for appending */
6387 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6389 /* Now lets copy it out */
6390 if (len >= sizeofcpy) {
6391 /* It all fits, copy it in */
6392 m_copydata(clonechain, 0, sizeofcpy, cp);
6393 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6395 /* fill up the end of the chain */
6397 m_copydata(clonechain, 0, len, cp);
6398 SCTP_BUF_LEN((*endofchain)) += len;
6399 /* now we need another one */
6402 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6407 SCTP_BUF_NEXT((*endofchain)) = m;
6409 cp = mtod((*endofchain), caddr_t);
6410 m_copydata(clonechain, len, sizeofcpy, cp);
6411 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6415 /* copy the old fashion way */
6416 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6417 #ifdef SCTP_MBUF_LOGGING
6418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6423 if (SCTP_BUF_IS_EXTENDED(mat)) {
6424 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6426 mat = SCTP_BUF_NEXT(mat);
6432 if (appendchain == NULL) {
6435 sctp_m_freem(outchain);
6439 /* tack on to the end */
6440 if (*endofchain != NULL) {
6441 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6445 if (SCTP_BUF_NEXT(m) == NULL) {
6446 SCTP_BUF_NEXT(m) = appendchain;
6449 m = SCTP_BUF_NEXT(m);
6453 * save off the end and update the end-chain postion
6457 if (SCTP_BUF_NEXT(m) == NULL) {
6461 m = SCTP_BUF_NEXT(m);
6465 /* save off the end and update the end-chain postion */
6468 if (SCTP_BUF_NEXT(m) == NULL) {
6472 m = SCTP_BUF_NEXT(m);
6474 return (appendchain);
6479 sctp_med_chunk_output(struct sctp_inpcb *inp,
6480 struct sctp_tcb *stcb,
6481 struct sctp_association *asoc,
6484 int control_only, int from_where,
6485 struct timeval *now, int *now_filled, int frag_point, int so_locked
6486 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6492 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6495 struct sctp_copy_all *ca;
6498 int added_control = 0;
6499 int un_sent, do_chunk_output = 1;
6500 struct sctp_association *asoc;
6502 ca = (struct sctp_copy_all *)ptr;
6503 if (ca->m == NULL) {
6506 if (ca->inp != inp) {
6510 if ((ca->m) && ca->sndlen) {
6511 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6513 /* can't copy so we are done */
6517 #ifdef SCTP_MBUF_LOGGING
6518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6523 if (SCTP_BUF_IS_EXTENDED(mat)) {
6524 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6526 mat = SCTP_BUF_NEXT(mat);
6533 SCTP_TCB_LOCK_ASSERT(stcb);
6534 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6535 /* Abort this assoc with m as the user defined reason */
6537 struct sctp_paramhdr *ph;
6539 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6541 ph = mtod(m, struct sctp_paramhdr *);
6542 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6543 ph->param_length = htons(ca->sndlen);
6546 * We add one here to keep the assoc from
6547 * dis-appearing on us.
6549 atomic_add_int(&stcb->asoc.refcnt, 1);
6550 sctp_abort_an_association(inp, stcb,
6551 SCTP_RESPONSE_TO_USER_REQ,
6552 m, SCTP_SO_NOT_LOCKED);
6554 * sctp_abort_an_association calls sctp_free_asoc()
6555 * free association will NOT free it since we
6556 * incremented the refcnt .. we do this to prevent
6557 * it being freed and things getting tricky since we
6558 * could end up (from free_asoc) calling inpcb_free
6559 * which would get a recursive lock call to the
6560 * iterator lock.. But as a consequence of that the
6561 * stcb will return to us un-locked.. since
6562 * free_asoc returns with either no TCB or the TCB
6563 * unlocked, we must relock.. to unlock in the
6564 * iterator timer :-0
6566 SCTP_TCB_LOCK(stcb);
6567 atomic_add_int(&stcb->asoc.refcnt, -1);
6568 goto no_chunk_output;
6572 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
6576 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6577 /* shutdown this assoc */
6580 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6582 if (TAILQ_EMPTY(&asoc->send_queue) &&
6583 TAILQ_EMPTY(&asoc->sent_queue) &&
6585 if (asoc->locked_on_sending) {
6589 * there is nothing queued to send, so I'm
6592 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6593 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6594 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6596 * only send SHUTDOWN the first time
6599 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
6600 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6601 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6603 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6604 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6605 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6606 asoc->primary_destination);
6607 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6608 asoc->primary_destination);
6610 do_chunk_output = 0;
6614 * we still got (or just got) data to send,
6615 * so set SHUTDOWN_PENDING
6618 * XXX sockets draft says that SCTP_EOF
6619 * should be sent with no data. currently,
6620 * we will allow user data to be sent first
6621 * and move to SHUTDOWN-PENDING
6623 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6624 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6625 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6626 if (asoc->locked_on_sending) {
6628 * Locked to send out the
6631 struct sctp_stream_queue_pending *sp;
6633 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6635 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6636 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6639 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6640 if (TAILQ_EMPTY(&asoc->send_queue) &&
6641 TAILQ_EMPTY(&asoc->sent_queue) &&
6642 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6644 atomic_add_int(&stcb->asoc.refcnt, 1);
6645 sctp_abort_an_association(stcb->sctp_ep, stcb,
6646 SCTP_RESPONSE_TO_USER_REQ,
6647 NULL, SCTP_SO_NOT_LOCKED);
6648 atomic_add_int(&stcb->asoc.refcnt, -1);
6649 goto no_chunk_output;
6651 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6652 asoc->primary_destination);
6658 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6659 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6661 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6662 (stcb->asoc.total_flight > 0) &&
6663 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6665 do_chunk_output = 0;
6667 if (do_chunk_output)
6668 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6669 else if (added_control) {
6670 int num_out = 0, reason = 0, now_filled = 0;
6674 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6675 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6676 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6687 sctp_sendall_completes(void *ptr, uint32_t val)
6689 struct sctp_copy_all *ca;
6691 ca = (struct sctp_copy_all *)ptr;
6693 * Do a notify here? Kacheong suggests that the notify be done at
6694 * the send time.. so you would push up a notification if any send
6695 * failed. Don't know if this is feasable since the only failures we
6696 * have is "memory" related and if you cannot get an mbuf to send
6697 * the data you surely can't get an mbuf to send up to notify the
6698 * user you can't send the data :->
6701 /* now free everything */
6702 sctp_m_freem(ca->m);
6703 SCTP_FREE(ca, SCTP_M_COPYAL);
6707 #define MC_ALIGN(m, len) do { \
6708 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6713 static struct mbuf *
6714 sctp_copy_out_all(struct uio *uio, int len)
6716 struct mbuf *ret, *at;
6717 int left, willcpy, cancpy, error;
6719 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6725 SCTP_BUF_LEN(ret) = 0;
6726 /* save space for the data chunk header */
6727 cancpy = M_TRAILINGSPACE(ret);
6728 willcpy = min(cancpy, left);
6731 /* Align data to the end */
6732 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6738 SCTP_BUF_LEN(at) = willcpy;
6739 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6742 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6743 if (SCTP_BUF_NEXT(at) == NULL) {
6746 at = SCTP_BUF_NEXT(at);
6747 SCTP_BUF_LEN(at) = 0;
6748 cancpy = M_TRAILINGSPACE(at);
6749 willcpy = min(cancpy, left);
6756 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6757 struct sctp_sndrcvinfo *srcv)
6760 struct sctp_copy_all *ca;
6762 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6766 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6769 memset(ca, 0, sizeof(struct sctp_copy_all));
6773 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6776 * take off the sendall flag, it would be bad if we failed to do
6779 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6780 /* get length and mbuf chain */
6782 ca->sndlen = uio->uio_resid;
6783 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6784 if (ca->m == NULL) {
6785 SCTP_FREE(ca, SCTP_M_COPYAL);
6786 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6790 /* Gather the length of the send */
6796 ca->sndlen += SCTP_BUF_LEN(m);
6797 m = SCTP_BUF_NEXT(m);
6801 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6802 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6803 SCTP_ASOC_ANY_STATE,
6805 sctp_sendall_completes, inp, 1);
6807 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6808 SCTP_FREE(ca, SCTP_M_COPYAL);
6809 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6817 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6819 struct sctp_tmit_chunk *chk, *nchk;
6821 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6822 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6823 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6825 sctp_m_freem(chk->data);
6828 asoc->ctrl_queue_cnt--;
6829 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6835 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6837 struct sctp_association *asoc;
6838 struct sctp_tmit_chunk *chk, *nchk;
6839 struct sctp_asconf_chunk *acp;
6842 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6843 /* find SCTP_ASCONF chunk in queue */
6844 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6846 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6847 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6852 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6854 sctp_m_freem(chk->data);
6857 asoc->ctrl_queue_cnt--;
6858 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6865 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6866 struct sctp_association *asoc,
6867 struct sctp_tmit_chunk **data_list,
6869 struct sctp_nets *net)
6872 struct sctp_tmit_chunk *tp1;
6874 for (i = 0; i < bundle_at; i++) {
6875 /* off of the send queue */
6876 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6877 asoc->send_queue_cnt--;
6880 * Any chunk NOT 0 you zap the time chunk 0 gets
6881 * zapped or set based on if a RTO measurment is
6884 data_list[i]->do_rtt = 0;
6887 data_list[i]->sent_rcv_time = net->last_sent_time;
6888 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6889 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6890 if (data_list[i]->whoTo == NULL) {
6891 data_list[i]->whoTo = net;
6892 atomic_add_int(&net->ref_count, 1);
6894 /* on to the sent queue */
6895 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6896 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6897 struct sctp_tmit_chunk *tpp;
6899 /* need to move back */
6901 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6903 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6907 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6910 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6912 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6917 /* This does not lower until the cum-ack passes it */
6918 asoc->sent_queue_cnt++;
6919 if ((asoc->peers_rwnd <= 0) &&
6920 (asoc->total_flight == 0) &&
6922 /* Mark the chunk as being a window probe */
6923 SCTP_STAT_INCR(sctps_windowprobed);
6925 #ifdef SCTP_AUDITING_ENABLED
6926 sctp_audit_log(0xC2, 3);
6928 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6929 data_list[i]->snd_count = 1;
6930 data_list[i]->rec.data.chunk_was_revoked = 0;
6931 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6932 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6933 data_list[i]->whoTo->flight_size,
6934 data_list[i]->book_size,
6935 (uintptr_t) data_list[i]->whoTo,
6936 data_list[i]->rec.data.TSN_seq);
6938 sctp_flight_size_increase(data_list[i]);
6939 sctp_total_flight_increase(stcb, data_list[i]);
6940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6941 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6942 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6944 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6945 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6946 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6947 /* SWS sender side engages */
6948 asoc->peers_rwnd = 0;
6951 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6952 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6957 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6958 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6963 struct sctp_tmit_chunk *chk, *nchk;
6965 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6966 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6967 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6968 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6969 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6970 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6971 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6972 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6973 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6974 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6975 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6976 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6977 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6978 /* Stray chunks must be cleaned up */
6980 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6982 sctp_m_freem(chk->data);
6985 asoc->ctrl_queue_cnt--;
6986 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
6987 asoc->fwd_tsn_cnt--;
6988 sctp_free_a_chunk(stcb, chk, so_locked);
6989 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6990 /* special handling, we must look into the param */
6991 if (chk != asoc->str_reset) {
6992 goto clean_up_anyway;
7000 sctp_can_we_split_this(struct sctp_tcb *stcb,
7002 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7005 * Make a decision on if I should split a msg into multiple parts.
7006 * This is only asked of incomplete messages.
7010 * If we are doing EEOR we need to always send it if its the
7011 * entire thing, since it might be all the guy is putting in
7014 if (goal_mtu >= length) {
7016 * If we have data outstanding,
7017 * we get another chance when the sack
7018 * arrives to transmit - wait for more data
7020 if (stcb->asoc.total_flight == 0) {
7022 * If nothing is in flight, we zero the
7030 /* You can fill the rest */
7035 * For those strange folk that make the send buffer
7036 * smaller than our fragmentation point, we can't
7037 * get a full msg in so we have to allow splitting.
7039 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7042 if ((length <= goal_mtu) ||
7043 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7044 /* Sub-optimial residual don't split in non-eeor mode. */
7048 * If we reach here length is larger than the goal_mtu. Do we wish
7049 * to split it for the sake of packet putting together?
7051 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7052 /* Its ok to split it */
7053 return (min(goal_mtu, frag_point));
7055 /* Nope, can't split */
7061 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7062 struct sctp_stream_out *strq,
7064 uint32_t frag_point,
7070 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7075 /* Move from the stream to the send_queue keeping track of the total */
7076 struct sctp_association *asoc;
7077 struct sctp_stream_queue_pending *sp;
7078 struct sctp_tmit_chunk *chk;
7079 struct sctp_data_chunk *dchkh;
7080 uint32_t to_move, length;
7081 uint8_t rcv_flags = 0;
7083 uint8_t send_lock_up = 0;
7085 SCTP_TCB_LOCK_ASSERT(stcb);
7088 /* sa_ignore FREED_MEMORY */
7089 sp = TAILQ_FIRST(&strq->outqueue);
7092 if (send_lock_up == 0) {
7093 SCTP_TCB_SEND_LOCK(stcb);
7096 sp = TAILQ_FIRST(&strq->outqueue);
7100 if (strq->last_msg_incomplete) {
7101 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7103 strq->last_msg_incomplete);
7104 strq->last_msg_incomplete = 0;
7108 SCTP_TCB_SEND_UNLOCK(stcb);
7113 if ((sp->msg_is_complete) && (sp->length == 0)) {
7114 if (sp->sender_all_done) {
7116 * We are doing differed cleanup. Last time through
7117 * when we took all the data the sender_all_done was
7120 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7121 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7122 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7123 sp->sender_all_done,
7125 sp->msg_is_complete,
7129 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7130 SCTP_TCB_SEND_LOCK(stcb);
7133 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7134 TAILQ_REMOVE(&strq->outqueue, sp, next);
7135 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7137 sctp_free_remote_addr(sp->net);
7141 sctp_m_freem(sp->data);
7144 sctp_free_a_strmoq(stcb, sp, so_locked);
7145 /* we can't be locked to it */
7147 stcb->asoc.locked_on_sending = NULL;
7149 SCTP_TCB_SEND_UNLOCK(stcb);
7152 /* back to get the next msg */
7156 * sender just finished this but still holds a
7165 /* is there some to get */
7166 if (sp->length == 0) {
7172 } else if (sp->discard_rest) {
7173 if (send_lock_up == 0) {
7174 SCTP_TCB_SEND_LOCK(stcb);
7177 /* Whack down the size */
7178 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7179 if ((stcb->sctp_socket != NULL) && \
7180 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7181 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7182 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7185 sctp_m_freem(sp->data);
7187 sp->tail_mbuf = NULL;
7197 some_taken = sp->some_taken;
7198 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7199 sp->msg_is_complete = 1;
7202 length = sp->length;
7203 if (sp->msg_is_complete) {
7204 /* The message is complete */
7205 to_move = min(length, frag_point);
7206 if (to_move == length) {
7207 /* All of it fits in the MTU */
7208 if (sp->some_taken) {
7209 rcv_flags |= SCTP_DATA_LAST_FRAG;
7210 sp->put_last_out = 1;
7212 rcv_flags |= SCTP_DATA_NOT_FRAG;
7213 sp->put_last_out = 1;
7216 /* Not all of it fits, we fragment */
7217 if (sp->some_taken == 0) {
7218 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7223 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7226 * We use a snapshot of length in case it
7227 * is expanding during the compare.
7232 if (to_move >= llen) {
7234 if (send_lock_up == 0) {
7236 * We are taking all of an incomplete msg
7237 * thus we need a send lock.
7239 SCTP_TCB_SEND_LOCK(stcb);
7241 if (sp->msg_is_complete) {
7243 * the sender finished the
7250 if (sp->some_taken == 0) {
7251 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7255 /* Nothing to take. */
7256 if (sp->some_taken) {
7265 /* If we reach here, we can copy out a chunk */
7266 sctp_alloc_a_chunk(stcb, chk);
7268 /* No chunk memory */
7274 * Setup for unordered if needed by looking at the user sent info
7277 if (sp->sinfo_flags & SCTP_UNORDERED) {
7278 rcv_flags |= SCTP_DATA_UNORDERED;
7280 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7281 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7282 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7284 /* clear out the chunk before setting up */
7285 memset(chk, 0, sizeof(*chk));
7286 chk->rec.data.rcv_flags = rcv_flags;
7288 if (to_move >= length) {
7289 /* we think we can steal the whole thing */
7290 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7291 SCTP_TCB_SEND_LOCK(stcb);
7294 if (to_move < sp->length) {
7295 /* bail, it changed */
7298 chk->data = sp->data;
7299 chk->last_mbuf = sp->tail_mbuf;
7300 /* register the stealing */
7301 sp->data = sp->tail_mbuf = NULL;
7306 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
7307 chk->last_mbuf = NULL;
7308 if (chk->data == NULL) {
7309 sp->some_taken = some_taken;
7310 sctp_free_a_chunk(stcb, chk, so_locked);
7315 #ifdef SCTP_MBUF_LOGGING
7316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7321 if (SCTP_BUF_IS_EXTENDED(mat)) {
7322 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7324 mat = SCTP_BUF_NEXT(mat);
7328 /* Pull off the data */
7329 m_adj(sp->data, to_move);
7330 /* Now lets work our way down and compact it */
7332 while (m && (SCTP_BUF_LEN(m) == 0)) {
7333 sp->data = SCTP_BUF_NEXT(m);
7334 SCTP_BUF_NEXT(m) = NULL;
7335 if (sp->tail_mbuf == m) {
7337 * Freeing tail? TSNH since
7338 * we supposedly were taking less
7339 * than the sp->length.
7342 panic("Huh, freing tail? - TSNH");
7344 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7345 sp->tail_mbuf = sp->data = NULL;
7354 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7355 chk->copy_by_ref = 1;
7357 chk->copy_by_ref = 0;
7360 * get last_mbuf and counts of mb useage This is ugly but hopefully
7361 * its only one mbuf.
7363 if (chk->last_mbuf == NULL) {
7364 chk->last_mbuf = chk->data;
7365 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7366 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7369 if (to_move > length) {
7370 /*- This should not happen either
7371 * since we always lower to_move to the size
7372 * of sp->length if its larger.
7375 panic("Huh, how can to_move be larger?");
7377 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7381 atomic_subtract_int(&sp->length, to_move);
7383 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7384 /* Not enough room for a chunk header, get some */
7387 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7390 * we're in trouble here. _PREPEND below will free
7391 * all the data if there is no leading space, so we
7392 * must put the data back and restore.
7394 if (send_lock_up == 0) {
7395 SCTP_TCB_SEND_LOCK(stcb);
7398 if (chk->data == NULL) {
7399 /* unsteal the data */
7400 sp->data = chk->data;
7401 sp->tail_mbuf = chk->last_mbuf;
7405 /* reassemble the data */
7407 sp->data = chk->data;
7408 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7410 sp->some_taken = some_taken;
7411 atomic_add_int(&sp->length, to_move);
7414 sctp_free_a_chunk(stcb, chk, so_locked);
7418 SCTP_BUF_LEN(m) = 0;
7419 SCTP_BUF_NEXT(m) = chk->data;
7421 M_ALIGN(chk->data, 4);
7424 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7425 if (chk->data == NULL) {
7426 /* HELP, TSNH since we assured it would not above? */
7428 panic("prepend failes HELP?");
7430 SCTP_PRINTF("prepend fails HELP?\n");
7431 sctp_free_a_chunk(stcb, chk, so_locked);
7437 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7438 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7439 chk->book_size_scale = 0;
7440 chk->sent = SCTP_DATAGRAM_UNSENT;
7443 chk->asoc = &stcb->asoc;
7444 chk->pad_inplace = 0;
7445 chk->no_fr_allowed = 0;
7446 chk->rec.data.stream_seq = sp->strseq;
7447 chk->rec.data.stream_number = sp->stream;
7448 chk->rec.data.payloadtype = sp->ppid;
7449 chk->rec.data.context = sp->context;
7450 chk->rec.data.doing_fast_retransmit = 0;
7452 chk->rec.data.timetodrop = sp->ts;
7453 chk->flags = sp->act_flags;
7456 chk->whoTo = sp->net;
7457 atomic_add_int(&chk->whoTo->ref_count, 1);
7461 if (sp->holds_key_ref) {
7462 chk->auth_keyid = sp->auth_keyid;
7463 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7464 chk->holds_key_ref = 1;
7466 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7468 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7469 (uintptr_t) stcb, sp->length,
7470 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7471 chk->rec.data.TSN_seq);
7473 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7475 * Put the rest of the things in place now. Size was done earlier in
7476 * previous loop prior to padding.
7479 #ifdef SCTP_ASOCLOG_OF_TSNS
7480 SCTP_TCB_LOCK_ASSERT(stcb);
7481 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7482 asoc->tsn_out_at = 0;
7483 asoc->tsn_out_wrapped = 1;
7485 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7486 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7487 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7488 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7489 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7490 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7491 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7492 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7496 dchkh->ch.chunk_type = SCTP_DATA;
7497 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7498 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7499 dchkh->dp.stream_id = htons(strq->stream_no);
7500 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7501 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7502 dchkh->ch.chunk_length = htons(chk->send_size);
7503 /* Now advance the chk->send_size by the actual pad needed. */
7504 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7509 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7510 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7511 chk->pad_inplace = 1;
7513 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7514 /* pad added an mbuf */
7515 chk->last_mbuf = lm;
7517 chk->send_size += pads;
7519 /* We only re-set the policy if it is on */
7520 if (sp->pr_sctp_on) {
7521 sctp_set_prsctp_policy(sp);
7522 asoc->pr_sctp_cnt++;
7523 chk->pr_sctp_on = 1;
7525 chk->pr_sctp_on = 0;
7527 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7528 /* All done pull and kill the message */
7529 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7530 if (sp->put_last_out == 0) {
7531 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7532 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7533 sp->sender_all_done,
7535 sp->msg_is_complete,
7539 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7540 SCTP_TCB_SEND_LOCK(stcb);
7543 TAILQ_REMOVE(&strq->outqueue, sp, next);
7544 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7546 sctp_free_remote_addr(sp->net);
7550 sctp_m_freem(sp->data);
7553 sctp_free_a_strmoq(stcb, sp, so_locked);
7555 /* we can't be locked to it */
7557 stcb->asoc.locked_on_sending = NULL;
7559 /* more to go, we are locked */
7562 asoc->chunks_on_out_queue++;
7563 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7564 asoc->send_queue_cnt++;
7567 SCTP_TCB_SEND_UNLOCK(stcb);
7575 sctp_fill_outqueue(struct sctp_tcb *stcb,
7576 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7577 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7582 struct sctp_association *asoc;
7583 struct sctp_stream_out *strq, *strqn;
7584 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7587 SCTP_TCB_LOCK_ASSERT(stcb);
7590 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
7591 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7593 /* ?? not sure what else to do */
7594 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7597 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7599 /* Need an allowance for the data chunk header too */
7600 goal_mtu -= sizeof(struct sctp_data_chunk);
7602 /* must make even word boundary */
7603 goal_mtu &= 0xfffffffc;
7604 if (asoc->locked_on_sending) {
7605 /* We are stuck on one stream until the message completes. */
7606 strq = asoc->locked_on_sending;
7609 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7613 while ((goal_mtu > 0) && strq) {
7616 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7617 &giveup, eeor_mode, &bail, so_locked);
7619 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7622 asoc->locked_on_sending = strq;
7623 if ((moved_how_much == 0) || (giveup) || bail)
7624 /* no more to move for now */
7627 asoc->locked_on_sending = NULL;
7628 if ((giveup) || bail) {
7631 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7636 total_moved += moved_how_much;
7637 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7638 goal_mtu &= 0xfffffffc;
7643 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7645 if (total_moved == 0) {
7646 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7647 (net == stcb->asoc.primary_destination)) {
7648 /* ran dry for primary network net */
7649 SCTP_STAT_INCR(sctps_primary_randry);
7650 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7651 /* ran dry with CMT on */
7652 SCTP_STAT_INCR(sctps_cmt_randry);
7658 sctp_fix_ecn_echo(struct sctp_association *asoc)
7660 struct sctp_tmit_chunk *chk;
7662 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7663 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7664 chk->sent = SCTP_DATAGRAM_UNSENT;
7670 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7672 struct sctp_association *asoc;
7673 struct sctp_tmit_chunk *chk;
7674 struct sctp_stream_queue_pending *sp;
7681 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7682 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7683 if (sp->net == net) {
7684 sctp_free_remote_addr(sp->net);
7689 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7690 if (chk->whoTo == net) {
7691 sctp_free_remote_addr(chk->whoTo);
7698 sctp_med_chunk_output(struct sctp_inpcb *inp,
7699 struct sctp_tcb *stcb,
7700 struct sctp_association *asoc,
7703 int control_only, int from_where,
7704 struct timeval *now, int *now_filled, int frag_point, int so_locked
7705 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7711 * Ok this is the generic chunk service queue. we must do the
7712 * following: - Service the stream queue that is next, moving any
7713 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7714 * LAST to the out queue in one pass) and assigning TSN's - Check to
7715 * see if the cwnd/rwnd allows any output, if so we go ahead and
7716 * fomulate and send the low level chunks. Making sure to combine
7717 * any control in the control chunk queue also.
7719 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7720 struct mbuf *outchain, *endoutchain;
7721 struct sctp_tmit_chunk *chk, *nchk;
7723 /* temp arrays for unlinking */
7724 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7725 int no_fragmentflg, error;
7726 unsigned int max_rwnd_per_dest, max_send_per_dest;
7727 int one_chunk, hbflag, skip_data_for_this_net;
7728 int asconf, cookie, no_out_cnt;
7729 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7730 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7732 uint32_t auth_offset = 0;
7733 struct sctp_auth_chunk *auth = NULL;
7734 uint16_t auth_keyid;
7735 int override_ok = 1;
7736 int data_auth_reqd = 0;
7739 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7746 auth_keyid = stcb->asoc.authinfo.active_keyid;
7748 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7749 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7750 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7755 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7757 * First lets prime the pump. For each destination, if there is room
7758 * in the flight size, attempt to pull an MTU's worth out of the
7759 * stream queues into the general send_queue
7761 #ifdef SCTP_AUDITING_ENABLED
7762 sctp_audit_log(0xC2, 2);
7764 SCTP_TCB_LOCK_ASSERT(stcb);
7766 if ((control_only) || (asoc->stream_reset_outstanding))
7771 /* Nothing to possible to send? */
7772 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7773 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7774 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7775 TAILQ_EMPTY(&asoc->send_queue) &&
7776 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7781 if (asoc->peers_rwnd == 0) {
7782 /* No room in peers rwnd */
7784 if (asoc->total_flight > 0) {
7785 /* we are allowed one chunk in flight */
7789 if (stcb->asoc.ecn_echo_cnt_onq) {
7790 /* Record where a sack goes, if any */
7791 if (no_data_chunks &&
7792 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7793 /* Nothing but ECNe to send - we don't do that */
7794 goto nothing_to_send;
7796 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7797 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7798 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7799 sack_goes_to = chk->whoTo;
7804 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7805 if (stcb->sctp_socket)
7806 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7808 max_send_per_dest = 0;
7809 if ((no_data_chunks == 0) &&
7810 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7811 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7813 * This for loop we are in takes in each net, if
7814 * its's got space in cwnd and has data sent to it
7815 * (when CMT is off) then it calls
7816 * sctp_fill_outqueue for the net. This gets data on
7817 * the send queue for that network.
7819 * In sctp_fill_outqueue TSN's are assigned and data is
7820 * copied out of the stream buffers. Note mostly
7821 * copy by reference (we hope).
7823 net->window_probe = 0;
7824 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ||
7825 (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
7826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7827 sctp_log_cwnd(stcb, net, 1,
7828 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7832 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7833 (net->flight_size == 0)) {
7834 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7836 if ((asoc->sctp_cmt_on_off == 0) &&
7837 (asoc->primary_destination != net) &&
7838 (net->ref_count < 2)) {
7839 /* nothing can be in queue for this guy */
7840 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7841 sctp_log_cwnd(stcb, net, 2,
7842 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7846 if (net->flight_size >= net->cwnd) {
7847 /* skip this network, no room - can't fill */
7848 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7849 sctp_log_cwnd(stcb, net, 3,
7850 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7855 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7857 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7859 /* memory alloc failure */
7865 /* now service each destination and send out what we can for it */
7866 /* Nothing to send? */
7867 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7868 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7869 TAILQ_EMPTY(&asoc->send_queue)) {
7873 if (asoc->sctp_cmt_on_off > 0) {
7874 /* get the last start point */
7875 start_at = asoc->last_net_cmt_send_started;
7876 if (start_at == NULL) {
7877 /* null so to beginning */
7878 start_at = TAILQ_FIRST(&asoc->nets);
7880 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7881 if (start_at == NULL) {
7882 start_at = TAILQ_FIRST(&asoc->nets);
7885 asoc->last_net_cmt_send_started = start_at;
7887 start_at = TAILQ_FIRST(&asoc->nets);
7889 old_start_at = NULL;
7890 again_one_more_time:
7891 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7892 /* how much can we send? */
7893 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7894 if (old_start_at && (old_start_at == net)) {
7895 /* through list ocmpletely. */
7899 if ((asoc->sctp_cmt_on_off == 0) &&
7900 (asoc->primary_destination != net) &&
7901 (net->ref_count < 2)) {
7903 * Ref-count of 1 so we cannot have data or control
7904 * queued to this address. Skip it (non-CMT).
7908 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7909 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7910 (net->flight_size >= net->cwnd)) {
7912 * Nothing on control or asconf and flight is full,
7913 * we can skip even in the CMT case.
7917 ctl_cnt = bundle_at = 0;
7918 endoutchain = outchain = NULL;
7921 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7922 skip_data_for_this_net = 1;
7924 skip_data_for_this_net = 0;
7926 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7928 * if we have a route and an ifp check to see if we
7929 * have room to send to this guy
7933 ifp = net->ro.ro_rt->rt_ifp;
7934 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7935 SCTP_STAT_INCR(sctps_ifnomemqueued);
7936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7937 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7942 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7945 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7950 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7960 if (mtu > asoc->peers_rwnd) {
7961 if (asoc->total_flight > 0) {
7962 /* We have a packet in flight somewhere */
7963 r_mtu = asoc->peers_rwnd;
7965 /* We are always allowed to send one MTU out */
7972 /************************/
7973 /* ASCONF transmission */
7974 /************************/
7975 /* Now first lets go through the asconf queue */
7976 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
7977 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7980 if (chk->whoTo != net) {
7982 * No, not sent to the network we are
7987 if (chk->data == NULL) {
7990 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7991 chk->sent != SCTP_DATAGRAM_RESEND) {
7995 * if no AUTH is yet included and this chunk
7996 * requires it, make sure to account for it. We
7997 * don't apply the size until the AUTH chunk is
7998 * actually added below in case there is no room for
7999 * this chunk. NOTE: we overload the use of "omtu"
8002 if ((auth == NULL) &&
8003 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8004 stcb->asoc.peer_auth_chunks)) {
8005 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8008 /* Here we do NOT factor the r_mtu */
8009 if ((chk->send_size < (int)(mtu - omtu)) ||
8010 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8012 * We probably should glom the mbuf chain
8013 * from the chk->data for control but the
8014 * problem is it becomes yet one more level
8015 * of tracking to do if for some reason
8016 * output fails. Then I have got to
8017 * reconstruct the merged control chain.. el
8018 * yucko.. for now we take the easy way and
8022 * Add an AUTH chunk, if chunk requires it
8023 * save the offset into the chain for AUTH
8025 if ((auth == NULL) &&
8026 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8027 stcb->asoc.peer_auth_chunks))) {
8028 outchain = sctp_add_auth_chunk(outchain,
8033 chk->rec.chunk_id.id);
8034 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8036 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8037 (int)chk->rec.chunk_id.can_take_data,
8038 chk->send_size, chk->copy_by_ref);
8039 if (outchain == NULL) {
8041 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8044 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8045 /* update our MTU size */
8046 if (mtu > (chk->send_size + omtu))
8047 mtu -= (chk->send_size + omtu);
8050 to_out += (chk->send_size + omtu);
8051 /* Do clear IP_DF ? */
8052 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8055 if (chk->rec.chunk_id.can_take_data)
8058 * set hb flag since we can use these for
8064 * should sysctl this: don't bundle data
8065 * with ASCONF since it requires AUTH
8068 chk->sent = SCTP_DATAGRAM_SENT;
8072 * Ok we are out of room but we can
8073 * output without effecting the
8074 * flight size since this little guy
8075 * is a control only packet.
8077 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8079 * do NOT clear the asconf flag as
8080 * it is used to do appropriate
8081 * source address selection.
8083 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8084 (struct sockaddr *)&net->ro._l_addr,
8085 outchain, auth_offset, auth,
8086 stcb->asoc.authinfo.active_keyid,
8087 no_fragmentflg, 0, NULL, asconf,
8088 inp->sctp_lport, stcb->rport,
8089 htonl(stcb->asoc.peer_vtag),
8090 net->port, so_locked, NULL, NULL))) {
8091 if (error == ENOBUFS) {
8092 asoc->ifp_had_enobuf = 1;
8093 SCTP_STAT_INCR(sctps_lowlevelerr);
8095 if (from_where == 0) {
8096 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8098 if (*now_filled == 0) {
8099 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8101 *now = net->last_sent_time;
8103 net->last_sent_time = *now;
8106 /* error, could not output */
8107 if (error == EHOSTUNREACH) {
8113 sctp_move_chunks_from_net(stcb, net);
8118 asoc->ifp_had_enobuf = 0;
8119 if (*now_filled == 0) {
8120 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8122 *now = net->last_sent_time;
8124 net->last_sent_time = *now;
8128 * increase the number we sent, if a
8129 * cookie is sent we don't tell them
8132 outchain = endoutchain = NULL;
8136 *num_out += ctl_cnt;
8137 /* recalc a clean slate and setup */
8138 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8139 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8141 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
8148 /************************/
8149 /* Control transmission */
8150 /************************/
8151 /* Now first lets go through the control queue */
8152 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8153 if ((sack_goes_to) &&
8154 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8155 (chk->whoTo != sack_goes_to)) {
8157 * if we have a sack in queue, and we are
8158 * looking at an ecn echo that is NOT queued
8159 * to where the sack is going..
8161 if (chk->whoTo == net) {
8163 * Don't transmit it to where its
8164 * going (current net)
8167 } else if (sack_goes_to == net) {
8169 * But do transmit it to this
8172 goto skip_net_check;
8175 if (chk->whoTo != net) {
8177 * No, not sent to the network we are
8183 if (chk->data == NULL) {
8186 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8188 * It must be unsent. Cookies and ASCONF's
8189 * hang around but there timers will force
8190 * when marked for resend.
8195 * if no AUTH is yet included and this chunk
8196 * requires it, make sure to account for it. We
8197 * don't apply the size until the AUTH chunk is
8198 * actually added below in case there is no room for
8199 * this chunk. NOTE: we overload the use of "omtu"
8202 if ((auth == NULL) &&
8203 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8204 stcb->asoc.peer_auth_chunks)) {
8205 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8208 /* Here we do NOT factor the r_mtu */
8209 if ((chk->send_size <= (int)(mtu - omtu)) ||
8210 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8212 * We probably should glom the mbuf chain
8213 * from the chk->data for control but the
8214 * problem is it becomes yet one more level
8215 * of tracking to do if for some reason
8216 * output fails. Then I have got to
8217 * reconstruct the merged control chain.. el
8218 * yucko.. for now we take the easy way and
8222 * Add an AUTH chunk, if chunk requires it
8223 * save the offset into the chain for AUTH
8225 if ((auth == NULL) &&
8226 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8227 stcb->asoc.peer_auth_chunks))) {
8228 outchain = sctp_add_auth_chunk(outchain,
8233 chk->rec.chunk_id.id);
8234 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8236 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8237 (int)chk->rec.chunk_id.can_take_data,
8238 chk->send_size, chk->copy_by_ref);
8239 if (outchain == NULL) {
8241 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8244 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8245 /* update our MTU size */
8246 if (mtu > (chk->send_size + omtu))
8247 mtu -= (chk->send_size + omtu);
8250 to_out += (chk->send_size + omtu);
8251 /* Do clear IP_DF ? */
8252 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8255 if (chk->rec.chunk_id.can_take_data)
8257 /* Mark things to be removed, if needed */
8258 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8259 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8260 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8261 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8262 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8263 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8264 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8265 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8266 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8267 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8268 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8270 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8273 * JRS 5/14/07 - Set the
8274 * flag to say a heartbeat
8279 /* remove these chunks at the end */
8280 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8281 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8282 /* turn off the timer */
8283 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8284 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8285 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8291 * Other chunks, since they have
8292 * timers running (i.e. COOKIE) we
8293 * just "trust" that it gets sent or
8297 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8300 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8302 * Increment ecne send count
8303 * here this means we may be
8304 * over-zealous in our
8305 * counting if the send
8306 * fails, but its the best
8307 * place to do it (we used
8308 * to do it in the queue of
8309 * the chunk, but that did
8310 * not tell how many times
8313 SCTP_STAT_INCR(sctps_sendecne);
8315 chk->sent = SCTP_DATAGRAM_SENT;
8320 * Ok we are out of room but we can
8321 * output without effecting the
8322 * flight size since this little guy
8323 * is a control only packet.
8326 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8328 * do NOT clear the asconf
8329 * flag as it is used to do
8330 * appropriate source
8331 * address selection.
8335 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8338 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8339 (struct sockaddr *)&net->ro._l_addr,
8342 stcb->asoc.authinfo.active_keyid,
8343 no_fragmentflg, 0, NULL, asconf,
8344 inp->sctp_lport, stcb->rport,
8345 htonl(stcb->asoc.peer_vtag),
8346 net->port, so_locked, NULL, NULL))) {
8347 if (error == ENOBUFS) {
8348 asoc->ifp_had_enobuf = 1;
8349 SCTP_STAT_INCR(sctps_lowlevelerr);
8351 if (from_where == 0) {
8352 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8354 /* error, could not output */
8356 if (*now_filled == 0) {
8357 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8359 *now = net->last_sent_time;
8361 net->last_sent_time = *now;
8365 if (error == EHOSTUNREACH) {
8371 sctp_move_chunks_from_net(stcb, net);
8376 asoc->ifp_had_enobuf = 0;
8377 /* Only HB or ASCONF advances time */
8379 if (*now_filled == 0) {
8380 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8382 *now = net->last_sent_time;
8384 net->last_sent_time = *now;
8389 * increase the number we sent, if a
8390 * cookie is sent we don't tell them
8393 outchain = endoutchain = NULL;
8397 *num_out += ctl_cnt;
8398 /* recalc a clean slate and setup */
8399 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8400 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8402 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
8409 /* JRI: if dest is in PF state, do not send data to it */
8410 if ((asoc->sctp_cmt_on_off > 0) &&
8411 (asoc->sctp_cmt_pf > 0) &&
8412 (net->dest_state & SCTP_ADDR_PF)) {
8415 if (net->flight_size >= net->cwnd) {
8418 if ((asoc->sctp_cmt_on_off > 0) &&
8419 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8420 (net->flight_size > max_rwnd_per_dest)) {
8424 * We need a specific accounting for the usage of the send
8425 * buffer. We also need to check the number of messages per
8426 * net. For now, this is better than nothing and it disabled
8429 if ((asoc->sctp_cmt_on_off > 0) &&
8430 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8431 (max_send_per_dest > 0) &&
8432 (net->flight_size > max_send_per_dest)) {
8435 /*********************/
8436 /* Data transmission */
8437 /*********************/
8439 * if AUTH for DATA is required and no AUTH has been added
8440 * yet, account for this in the mtu now... if no data can be
8441 * bundled, this adjustment won't matter anyways since the
8442 * packet will be going out...
8444 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8445 stcb->asoc.peer_auth_chunks);
8446 if (data_auth_reqd && (auth == NULL)) {
8447 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8449 /* now lets add any data within the MTU constraints */
8450 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8452 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8453 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8459 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8460 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8470 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8471 (skip_data_for_this_net == 0)) ||
8473 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8474 if (no_data_chunks) {
8475 /* let only control go out */
8479 if (net->flight_size >= net->cwnd) {
8480 /* skip this net, no room for data */
8484 if ((chk->whoTo != NULL) &&
8485 (chk->whoTo != net)) {
8486 /* Don't send the chunk on this net */
8489 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8491 * strange, we have a chunk that is
8492 * to big for its destination and
8493 * yet no fragment ok flag.
8494 * Something went wrong when the
8495 * PMTU changed...we did not mark
8496 * this chunk for some reason?? I
8497 * will fix it here by letting IP
8498 * fragment it for now and printing
8499 * a warning. This really should not
8502 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8503 chk->send_size, mtu);
8504 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8506 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8507 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8508 struct sctp_data_chunk *dchkh;
8510 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8511 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8513 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8514 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8515 /* ok we will add this one */
8518 * Add an AUTH chunk, if chunk
8519 * requires it, save the offset into
8520 * the chain for AUTH
8522 if (data_auth_reqd) {
8524 outchain = sctp_add_auth_chunk(outchain,
8530 auth_keyid = chk->auth_keyid;
8532 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8533 } else if (override_ok) {
8538 auth_keyid = chk->auth_keyid;
8540 } else if (auth_keyid != chk->auth_keyid) {
8548 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8549 chk->send_size, chk->copy_by_ref);
8550 if (outchain == NULL) {
8551 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8552 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8553 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8556 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8559 /* upate our MTU size */
8560 /* Do clear IP_DF ? */
8561 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8564 /* unsigned subtraction of mtu */
8565 if (mtu > chk->send_size)
8566 mtu -= chk->send_size;
8569 /* unsigned subtraction of r_mtu */
8570 if (r_mtu > chk->send_size)
8571 r_mtu -= chk->send_size;
8575 to_out += chk->send_size;
8576 if ((to_out > mx_mtu) && no_fragmentflg) {
8578 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8580 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8584 chk->window_probe = 0;
8585 data_list[bundle_at++] = chk;
8586 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8590 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8591 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8592 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8594 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8596 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8597 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8607 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8609 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8610 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8611 data_list[0]->window_probe = 1;
8612 net->window_probe = 1;
8618 * Must be sent in order of the
8619 * TSN's (on a network)
8623 } /* for (chunk gather loop for this net) */
8624 } /* if asoc.state OPEN */
8626 /* Is there something to send for this destination? */
8628 /* We may need to start a control timer or two */
8630 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8633 * do NOT clear the asconf flag as it is
8634 * used to do appropriate source address
8639 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8642 /* must start a send timer if data is being sent */
8643 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8645 * no timer running on this destination
8648 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8649 } else if ((asoc->sctp_cmt_on_off > 0) &&
8650 (asoc->sctp_cmt_pf > 0) &&
8652 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
8653 (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8655 * JRS 5/14/07 - If a HB has been sent to a
8656 * PF destination and no T3 timer is
8657 * currently running, start the T3 timer to
8658 * track the HBs that were sent.
8660 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8662 /* Now send it, if there is anything to send :> */
8663 if ((error = sctp_lowlevel_chunk_output(inp,
8666 (struct sockaddr *)&net->ro._l_addr,
8675 inp->sctp_lport, stcb->rport,
8676 htonl(stcb->asoc.peer_vtag),
8677 net->port, so_locked, NULL, NULL))) {
8678 /* error, we could not output */
8679 if (error == ENOBUFS) {
8680 SCTP_STAT_INCR(sctps_lowlevelerr);
8681 asoc->ifp_had_enobuf = 1;
8683 if (from_where == 0) {
8684 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8686 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8688 if (*now_filled == 0) {
8689 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8691 *now = net->last_sent_time;
8693 net->last_sent_time = *now;
8697 if (error == EHOSTUNREACH) {
8699 * Destination went unreachable
8702 sctp_move_chunks_from_net(stcb, net);
8706 * I add this line to be paranoid. As far as
8707 * I can tell the continue, takes us back to
8708 * the top of the for, but just to make sure
8709 * I will reset these again here.
8711 ctl_cnt = bundle_at = 0;
8712 continue; /* This takes us back to the
8713 * for() for the nets. */
8715 asoc->ifp_had_enobuf = 0;
8717 outchain = endoutchain = NULL;
8720 if (bundle_at || hbflag) {
8721 /* For data/asconf and hb set time */
8722 if (*now_filled == 0) {
8723 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8725 *now = net->last_sent_time;
8727 net->last_sent_time = *now;
8731 *num_out += (ctl_cnt + bundle_at);
8734 /* setup for a RTO measurement */
8735 tsns_sent = data_list[0]->rec.data.TSN_seq;
8736 /* fill time if not already filled */
8737 if (*now_filled == 0) {
8738 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8740 *now = asoc->time_last_sent;
8742 asoc->time_last_sent = *now;
8744 if (net->rto_needed) {
8745 data_list[0]->do_rtt = 1;
8746 net->rto_needed = 0;
8748 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8749 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8750 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8751 if (net->flight_size < net->cwnd) {
8752 /* start or restart it */
8753 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8754 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8755 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
8757 SCTP_STAT_INCR(sctps_earlyfrstrout);
8758 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
8760 /* stop it if its running */
8761 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8762 SCTP_STAT_INCR(sctps_earlyfrstpout);
8763 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8764 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
8773 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8774 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8777 if (old_start_at == NULL) {
8778 old_start_at = start_at;
8779 start_at = TAILQ_FIRST(&asoc->nets);
8781 goto again_one_more_time;
8784 * At the end there should be no NON timed chunks hanging on this
8787 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8788 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8790 if ((*num_out == 0) && (*reason_code == 0)) {
8795 sctp_clean_up_ctl(stcb, asoc, so_locked);
8800 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8803 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8804 * the control chunk queue.
8806 struct sctp_chunkhdr *hdr;
8807 struct sctp_tmit_chunk *chk;
8810 SCTP_TCB_LOCK_ASSERT(stcb);
8811 sctp_alloc_a_chunk(stcb, chk);
8814 sctp_m_freem(op_err);
8817 chk->copy_by_ref = 0;
8818 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8819 if (op_err == NULL) {
8820 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8825 while (mat != NULL) {
8826 chk->send_size += SCTP_BUF_LEN(mat);
8827 mat = SCTP_BUF_NEXT(mat);
8829 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8830 chk->rec.chunk_id.can_take_data = 1;
8831 chk->sent = SCTP_DATAGRAM_UNSENT;
8834 chk->asoc = &stcb->asoc;
8836 chk->whoTo = chk->asoc->primary_destination;
8837 atomic_add_int(&chk->whoTo->ref_count, 1);
8838 hdr = mtod(op_err, struct sctp_chunkhdr *);
8839 hdr->chunk_type = SCTP_OPERATION_ERROR;
8840 hdr->chunk_flags = 0;
8841 hdr->chunk_length = htons(chk->send_size);
8842 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8845 chk->asoc->ctrl_queue_cnt++;
8849 sctp_send_cookie_echo(struct mbuf *m,
8851 struct sctp_tcb *stcb,
8852 struct sctp_nets *net)
8855 * pull out the cookie and put it at the front of the control chunk
8859 struct mbuf *cookie;
8860 struct sctp_paramhdr parm, *phdr;
8861 struct sctp_chunkhdr *hdr;
8862 struct sctp_tmit_chunk *chk;
8863 uint16_t ptype, plen;
8865 /* First find the cookie in the param area */
8867 at = offset + sizeof(struct sctp_init_chunk);
8869 SCTP_TCB_LOCK_ASSERT(stcb);
8871 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8875 ptype = ntohs(phdr->param_type);
8876 plen = ntohs(phdr->param_length);
8877 if (ptype == SCTP_STATE_COOKIE) {
8880 /* found the cookie */
8881 if ((pad = (plen % 4))) {
8884 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8885 if (cookie == NULL) {
8889 #ifdef SCTP_MBUF_LOGGING
8890 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8895 if (SCTP_BUF_IS_EXTENDED(mat)) {
8896 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8898 mat = SCTP_BUF_NEXT(mat);
8904 at += SCTP_SIZE32(plen);
8906 if (cookie == NULL) {
8907 /* Did not find the cookie */
8910 /* ok, we got the cookie lets change it into a cookie echo chunk */
8912 /* first the change from param to cookie */
8913 hdr = mtod(cookie, struct sctp_chunkhdr *);
8914 hdr->chunk_type = SCTP_COOKIE_ECHO;
8915 hdr->chunk_flags = 0;
8916 /* get the chunk stuff now and place it in the FRONT of the queue */
8917 sctp_alloc_a_chunk(stcb, chk);
8920 sctp_m_freem(cookie);
8923 chk->copy_by_ref = 0;
8924 chk->send_size = plen;
8925 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8926 chk->rec.chunk_id.can_take_data = 0;
8927 chk->sent = SCTP_DATAGRAM_UNSENT;
8929 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8930 chk->asoc = &stcb->asoc;
8932 chk->whoTo = chk->asoc->primary_destination;
8933 atomic_add_int(&chk->whoTo->ref_count, 1);
8934 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8935 chk->asoc->ctrl_queue_cnt++;
8940 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8944 struct sctp_nets *net)
8947 * take a HB request and make it into a HB ack and send it.
8949 struct mbuf *outchain;
8950 struct sctp_chunkhdr *chdr;
8951 struct sctp_tmit_chunk *chk;
8955 /* must have a net pointer */
8958 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8959 if (outchain == NULL) {
8960 /* gak out of memory */
8963 #ifdef SCTP_MBUF_LOGGING
8964 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8969 if (SCTP_BUF_IS_EXTENDED(mat)) {
8970 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8972 mat = SCTP_BUF_NEXT(mat);
8976 chdr = mtod(outchain, struct sctp_chunkhdr *);
8977 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8978 chdr->chunk_flags = 0;
8979 if (chk_length % 4) {
8981 uint32_t cpthis = 0;
8984 padlen = 4 - (chk_length % 4);
8985 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8987 sctp_alloc_a_chunk(stcb, chk);
8990 sctp_m_freem(outchain);
8993 chk->copy_by_ref = 0;
8994 chk->send_size = chk_length;
8995 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8996 chk->rec.chunk_id.can_take_data = 1;
8997 chk->sent = SCTP_DATAGRAM_UNSENT;
9000 chk->asoc = &stcb->asoc;
9001 chk->data = outchain;
9003 atomic_add_int(&chk->whoTo->ref_count, 1);
9004 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9005 chk->asoc->ctrl_queue_cnt++;
9009 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9011 /* formulate and queue a cookie-ack back to sender */
9012 struct mbuf *cookie_ack;
9013 struct sctp_chunkhdr *hdr;
9014 struct sctp_tmit_chunk *chk;
9017 SCTP_TCB_LOCK_ASSERT(stcb);
9019 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
9020 if (cookie_ack == NULL) {
9024 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9025 sctp_alloc_a_chunk(stcb, chk);
9028 sctp_m_freem(cookie_ack);
9031 chk->copy_by_ref = 0;
9032 chk->send_size = sizeof(struct sctp_chunkhdr);
9033 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9034 chk->rec.chunk_id.can_take_data = 1;
9035 chk->sent = SCTP_DATAGRAM_UNSENT;
9038 chk->asoc = &stcb->asoc;
9039 chk->data = cookie_ack;
9040 if (chk->asoc->last_control_chunk_from != NULL) {
9041 chk->whoTo = chk->asoc->last_control_chunk_from;
9043 chk->whoTo = chk->asoc->primary_destination;
9045 atomic_add_int(&chk->whoTo->ref_count, 1);
9046 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9047 hdr->chunk_type = SCTP_COOKIE_ACK;
9048 hdr->chunk_flags = 0;
9049 hdr->chunk_length = htons(chk->send_size);
9050 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9051 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9052 chk->asoc->ctrl_queue_cnt++;
9058 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9060 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9061 struct mbuf *m_shutdown_ack;
9062 struct sctp_shutdown_ack_chunk *ack_cp;
9063 struct sctp_tmit_chunk *chk;
9065 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9066 if (m_shutdown_ack == NULL) {
9070 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9071 sctp_alloc_a_chunk(stcb, chk);
9074 sctp_m_freem(m_shutdown_ack);
9077 chk->copy_by_ref = 0;
9078 chk->send_size = sizeof(struct sctp_chunkhdr);
9079 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9080 chk->rec.chunk_id.can_take_data = 1;
9081 chk->sent = SCTP_DATAGRAM_UNSENT;
9084 chk->asoc = &stcb->asoc;
9085 chk->data = m_shutdown_ack;
9087 atomic_add_int(&net->ref_count, 1);
9089 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9090 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9091 ack_cp->ch.chunk_flags = 0;
9092 ack_cp->ch.chunk_length = htons(chk->send_size);
9093 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9094 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9095 chk->asoc->ctrl_queue_cnt++;
9100 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9102 /* formulate and queue a SHUTDOWN to the sender */
9103 struct mbuf *m_shutdown;
9104 struct sctp_shutdown_chunk *shutdown_cp;
9105 struct sctp_tmit_chunk *chk;
9107 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9108 if (m_shutdown == NULL) {
9112 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9113 sctp_alloc_a_chunk(stcb, chk);
9116 sctp_m_freem(m_shutdown);
9119 chk->copy_by_ref = 0;
9120 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9121 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9122 chk->rec.chunk_id.can_take_data = 1;
9123 chk->sent = SCTP_DATAGRAM_UNSENT;
9126 chk->asoc = &stcb->asoc;
9127 chk->data = m_shutdown;
9129 atomic_add_int(&net->ref_count, 1);
9131 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9132 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9133 shutdown_cp->ch.chunk_flags = 0;
9134 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9135 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9136 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9137 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9138 chk->asoc->ctrl_queue_cnt++;
9143 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9146 * formulate and queue an ASCONF to the peer. ASCONF parameters
9147 * should be queued on the assoc queue.
9149 struct sctp_tmit_chunk *chk;
9150 struct mbuf *m_asconf;
9153 SCTP_TCB_LOCK_ASSERT(stcb);
9155 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9156 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9157 /* can't send a new one if there is one in flight already */
9160 /* compose an ASCONF chunk, maximum length is PMTU */
9161 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9162 if (m_asconf == NULL) {
9165 sctp_alloc_a_chunk(stcb, chk);
9168 sctp_m_freem(m_asconf);
9171 chk->copy_by_ref = 0;
9172 chk->data = m_asconf;
9173 chk->send_size = len;
9174 chk->rec.chunk_id.id = SCTP_ASCONF;
9175 chk->rec.chunk_id.can_take_data = 0;
9176 chk->sent = SCTP_DATAGRAM_UNSENT;
9178 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9179 chk->asoc = &stcb->asoc;
9181 atomic_add_int(&chk->whoTo->ref_count, 1);
9182 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9183 chk->asoc->ctrl_queue_cnt++;
9188 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9191 * formulate and queue a asconf-ack back to sender. the asconf-ack
9192 * must be stored in the tcb.
9194 struct sctp_tmit_chunk *chk;
9195 struct sctp_asconf_ack *ack, *latest_ack;
9196 struct mbuf *m_ack, *m;
9197 struct sctp_nets *net = NULL;
9199 SCTP_TCB_LOCK_ASSERT(stcb);
9200 /* Get the latest ASCONF-ACK */
9201 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9202 if (latest_ack == NULL) {
9205 if (latest_ack->last_sent_to != NULL &&
9206 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9207 /* we're doing a retransmission */
9208 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9211 if (stcb->asoc.last_control_chunk_from == NULL)
9212 net = stcb->asoc.primary_destination;
9214 net = stcb->asoc.last_control_chunk_from;
9218 if (stcb->asoc.last_control_chunk_from == NULL)
9219 net = stcb->asoc.primary_destination;
9221 net = stcb->asoc.last_control_chunk_from;
9223 latest_ack->last_sent_to = net;
9225 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9226 if (ack->data == NULL) {
9229 /* copy the asconf_ack */
9230 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
9231 if (m_ack == NULL) {
9232 /* couldn't copy it */
9235 #ifdef SCTP_MBUF_LOGGING
9236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9241 if (SCTP_BUF_IS_EXTENDED(mat)) {
9242 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9244 mat = SCTP_BUF_NEXT(mat);
9249 sctp_alloc_a_chunk(stcb, chk);
9253 sctp_m_freem(m_ack);
9256 chk->copy_by_ref = 0;
9263 chk->send_size = ack->len;
9264 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9265 chk->rec.chunk_id.can_take_data = 1;
9266 chk->sent = SCTP_DATAGRAM_UNSENT;
9268 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9269 chk->asoc = &stcb->asoc;
9270 atomic_add_int(&chk->whoTo->ref_count, 1);
9272 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9273 chk->asoc->ctrl_queue_cnt++;
9280 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9281 struct sctp_tcb *stcb,
9282 struct sctp_association *asoc,
9283 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9284 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9290 * send out one MTU of retransmission. If fast_retransmit is
9291 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9292 * rwnd. For a Cookie or Asconf in the control chunk queue we
9293 * retransmit them by themselves.
9295 * For data chunks we will pick out the lowest TSN's in the sent_queue
9296 * marked for resend and bundle them all together (up to a MTU of
9297 * destination). The address to send to should have been
9298 * selected/changed where the retransmission was marked (i.e. in FR
9299 * or t3-timeout routines).
9301 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9302 struct sctp_tmit_chunk *chk, *fwd;
9303 struct mbuf *m, *endofchain;
9304 struct sctp_nets *net = NULL;
9305 uint32_t tsns_sent = 0;
9306 int no_fragmentflg, bundle_at, cnt_thru;
9308 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9309 struct sctp_auth_chunk *auth = NULL;
9310 uint32_t auth_offset = 0;
9311 uint16_t auth_keyid;
9312 int override_ok = 1;
9313 int data_auth_reqd = 0;
9316 SCTP_TCB_LOCK_ASSERT(stcb);
9317 tmr_started = ctl_cnt = bundle_at = error = 0;
9322 endofchain = m = NULL;
9323 auth_keyid = stcb->asoc.authinfo.active_keyid;
9324 #ifdef SCTP_AUDITING_ENABLED
9325 sctp_audit_log(0xC3, 1);
9327 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9328 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9329 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9330 asoc->sent_queue_retran_cnt);
9331 asoc->sent_queue_cnt = 0;
9332 asoc->sent_queue_cnt_removeable = 0;
9333 /* send back 0/0 so we enter normal transmission */
9337 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9338 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9339 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9340 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9341 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9344 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9345 if (chk != asoc->str_reset) {
9347 * not eligible for retran if its
9354 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9359 * Add an AUTH chunk, if chunk requires it save the
9360 * offset into the chain for AUTH
9362 if ((auth == NULL) &&
9363 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9364 stcb->asoc.peer_auth_chunks))) {
9365 m = sctp_add_auth_chunk(m, &endofchain,
9366 &auth, &auth_offset,
9368 chk->rec.chunk_id.id);
9369 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9371 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9377 /* do we have control chunks to retransmit? */
9379 /* Start a timer no matter if we suceed or fail */
9380 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9381 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9382 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9383 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9384 chk->snd_count++; /* update our count */
9385 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9386 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9387 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9388 no_fragmentflg, 0, NULL, 0,
9389 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9390 chk->whoTo->port, so_locked, NULL, NULL))) {
9391 SCTP_STAT_INCR(sctps_lowlevelerr);
9394 m = endofchain = NULL;
9398 * We don't want to mark the net->sent time here since this
9399 * we use this for HB and retrans cannot measure RTT
9401 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9403 chk->sent = SCTP_DATAGRAM_SENT;
9404 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9408 /* Clean up the fwd-tsn list */
9409 sctp_clean_up_ctl(stcb, asoc, so_locked);
9414 * Ok, it is just data retransmission we need to do or that and a
9415 * fwd-tsn with it all.
9417 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9418 return (SCTP_RETRAN_DONE);
9420 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9421 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9422 /* not yet open, resend the cookie and that is it */
9425 #ifdef SCTP_AUDITING_ENABLED
9426 sctp_auditing(20, inp, stcb, NULL);
9428 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9429 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9430 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9431 /* No, not sent to this net or not ready for rtx */
9434 if (chk->data == NULL) {
9435 printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9436 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9439 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9440 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9441 /* Gak, we have exceeded max unlucky retran, abort! */
9442 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9444 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9445 atomic_add_int(&stcb->asoc.refcnt, 1);
9446 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
9447 SCTP_TCB_LOCK(stcb);
9448 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9449 return (SCTP_RETRAN_EXIT);
9451 /* pick up the net */
9453 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9454 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
9456 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9459 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9460 /* No room in peers rwnd */
9463 tsn = asoc->last_acked_seq + 1;
9464 if (tsn == chk->rec.data.TSN_seq) {
9466 * we make a special exception for this
9467 * case. The peer has no rwnd but is missing
9468 * the lowest chunk.. which is probably what
9469 * is holding up the rwnd.
9471 goto one_chunk_around;
9476 if (asoc->peers_rwnd < mtu) {
9478 if ((asoc->peers_rwnd == 0) &&
9479 (asoc->total_flight == 0)) {
9480 chk->window_probe = 1;
9481 chk->whoTo->window_probe = 1;
9484 #ifdef SCTP_AUDITING_ENABLED
9485 sctp_audit_log(0xC3, 2);
9489 net->fast_retran_ip = 0;
9490 if (chk->rec.data.doing_fast_retransmit == 0) {
9492 * if no FR in progress skip destination that have
9493 * flight_size > cwnd.
9495 if (net->flight_size >= net->cwnd) {
9500 * Mark the destination net to have FR recovery
9504 net->fast_retran_ip = 1;
9508 * if no AUTH is yet included and this chunk requires it,
9509 * make sure to account for it. We don't apply the size
9510 * until the AUTH chunk is actually added below in case
9511 * there is no room for this chunk.
9513 if (data_auth_reqd && (auth == NULL)) {
9514 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9518 if ((chk->send_size <= (mtu - dmtu)) ||
9519 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9520 /* ok we will add this one */
9521 if (data_auth_reqd) {
9523 m = sctp_add_auth_chunk(m,
9529 auth_keyid = chk->auth_keyid;
9531 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9532 } else if (override_ok) {
9533 auth_keyid = chk->auth_keyid;
9535 } else if (chk->auth_keyid != auth_keyid) {
9536 /* different keyid, so done bundling */
9540 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9542 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9545 /* Do clear IP_DF ? */
9546 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9549 /* upate our MTU size */
9550 if (mtu > (chk->send_size + dmtu))
9551 mtu -= (chk->send_size + dmtu);
9554 data_list[bundle_at++] = chk;
9555 if (one_chunk && (asoc->total_flight <= 0)) {
9556 SCTP_STAT_INCR(sctps_windowprobed);
9559 if (one_chunk == 0) {
9561 * now are there anymore forward from chk to pick
9564 fwd = TAILQ_NEXT(chk, sctp_next);
9566 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9567 /* Nope, not for retran */
9568 fwd = TAILQ_NEXT(fwd, sctp_next);
9571 if (fwd->whoTo != net) {
9572 /* Nope, not the net in question */
9573 fwd = TAILQ_NEXT(fwd, sctp_next);
9576 if (data_auth_reqd && (auth == NULL)) {
9577 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9580 if (fwd->send_size <= (mtu - dmtu)) {
9581 if (data_auth_reqd) {
9583 m = sctp_add_auth_chunk(m,
9589 auth_keyid = fwd->auth_keyid;
9591 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9592 } else if (override_ok) {
9593 auth_keyid = fwd->auth_keyid;
9595 } else if (fwd->auth_keyid != auth_keyid) {
9603 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9605 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9608 /* Do clear IP_DF ? */
9609 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9612 /* upate our MTU size */
9613 if (mtu > (fwd->send_size + dmtu))
9614 mtu -= (fwd->send_size + dmtu);
9617 data_list[bundle_at++] = fwd;
9618 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9621 fwd = TAILQ_NEXT(fwd, sctp_next);
9623 /* can't fit so we are done */
9628 /* Is there something to send for this destination? */
9631 * No matter if we fail/or suceed we should start a
9632 * timer. A failure is like a lost IP packet :-)
9634 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9636 * no timer running on this destination
9639 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9642 /* Now lets send it, if there is anything to send :> */
9643 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9644 (struct sockaddr *)&net->ro._l_addr, m,
9645 auth_offset, auth, auth_keyid,
9646 no_fragmentflg, 0, NULL, 0,
9647 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9648 net->port, so_locked, NULL, NULL))) {
9649 /* error, we could not output */
9650 SCTP_STAT_INCR(sctps_lowlevelerr);
9653 m = endofchain = NULL;
9658 * We don't want to mark the net->sent time here
9659 * since this we use this for HB and retrans cannot
9662 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9664 /* For auto-close */
9666 if (*now_filled == 0) {
9667 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9668 *now = asoc->time_last_sent;
9671 asoc->time_last_sent = *now;
9673 *cnt_out += bundle_at;
9674 #ifdef SCTP_AUDITING_ENABLED
9675 sctp_audit_log(0xC4, bundle_at);
9678 tsns_sent = data_list[0]->rec.data.TSN_seq;
9680 for (i = 0; i < bundle_at; i++) {
9681 SCTP_STAT_INCR(sctps_sendretransdata);
9682 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9684 * When we have a revoked data, and we
9685 * retransmit it, then we clear the revoked
9686 * flag since this flag dictates if we
9687 * subtracted from the fs
9689 if (data_list[i]->rec.data.chunk_was_revoked) {
9690 /* Deflate the cwnd */
9691 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9692 data_list[i]->rec.data.chunk_was_revoked = 0;
9694 data_list[i]->snd_count++;
9695 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9696 /* record the time */
9697 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9698 if (data_list[i]->book_size_scale) {
9700 * need to double the book size on
9703 data_list[i]->book_size_scale = 0;
9705 * Since we double the booksize, we
9706 * must also double the output queue
9707 * size, since this get shrunk when
9708 * we free by this amount.
9710 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9711 data_list[i]->book_size *= 2;
9715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9716 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9717 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9719 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9720 (uint32_t) (data_list[i]->send_size +
9721 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9724 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9725 data_list[i]->whoTo->flight_size,
9726 data_list[i]->book_size,
9727 (uintptr_t) data_list[i]->whoTo,
9728 data_list[i]->rec.data.TSN_seq);
9730 sctp_flight_size_increase(data_list[i]);
9731 sctp_total_flight_increase(stcb, data_list[i]);
9732 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9733 /* SWS sender side engages */
9734 asoc->peers_rwnd = 0;
9737 (data_list[i]->rec.data.doing_fast_retransmit)) {
9738 SCTP_STAT_INCR(sctps_sendfastretrans);
9739 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9740 (tmr_started == 0)) {
9742 * ok we just fast-retrans'd
9743 * the lowest TSN, i.e the
9744 * first on the list. In
9745 * this case we want to give
9746 * some more time to get a
9747 * SACK back without a
9750 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9751 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9752 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9757 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9759 #ifdef SCTP_AUDITING_ENABLED
9760 sctp_auditing(21, inp, stcb, NULL);
9766 if (asoc->sent_queue_retran_cnt <= 0) {
9767 /* all done we have no more to retran */
9768 asoc->sent_queue_retran_cnt = 0;
9772 /* No more room in rwnd */
9775 /* stop the for loop here. we sent out a packet */
9783 sctp_timer_validation(struct sctp_inpcb *inp,
9784 struct sctp_tcb *stcb,
9785 struct sctp_association *asoc,
9788 struct sctp_nets *net;
9790 /* Validate that a timer is running somewhere */
9791 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9792 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9793 /* Here is a timer */
9797 SCTP_TCB_LOCK_ASSERT(stcb);
9798 /* Gak, we did not have a timer somewhere */
9799 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9800 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9805 sctp_chunk_output(struct sctp_inpcb *inp,
9806 struct sctp_tcb *stcb,
9809 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9815 * Ok this is the generic chunk service queue. we must do the
9817 * - See if there are retransmits pending, if so we must
9819 * - Service the stream queue that is next, moving any
9820 * message (note I must get a complete message i.e.
9821 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9823 * - Check to see if the cwnd/rwnd allows any output, if so we
9824 * go ahead and fomulate and send the low level chunks. Making sure
9825 * to combine any control in the control chunk queue also.
9827 struct sctp_association *asoc;
9828 struct sctp_nets *net;
9829 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9830 unsigned int burst_cnt = 0;
9834 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9837 unsigned int tot_frs = 0;
9840 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9841 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9847 SCTP_TCB_LOCK_ASSERT(stcb);
9849 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9851 if ((un_sent <= 0) &&
9852 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9853 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9854 (asoc->sent_queue_retran_cnt == 0)) {
9855 /* Nothing to do unless there is something to be sent left */
9859 * Do we have something to send, data or control AND a sack timer
9860 * running, if so piggy-back the sack.
9862 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9863 sctp_send_sack(stcb, so_locked);
9864 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9866 while (asoc->sent_queue_retran_cnt) {
9868 * Ok, it is retransmission time only, we send out only ONE
9869 * packet with a single call off to the retran code.
9871 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9873 * Special hook for handling cookiess discarded
9874 * by peer that carried data. Send cookie-ack only
9875 * and then the next call with get the retran's.
9877 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9879 &now, &now_filled, frag_point, so_locked);
9881 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9882 /* if its not from a HB then do it */
9884 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9890 * its from any other place, we don't allow retran
9891 * output (only control)
9896 /* Can't send anymore */
9898 * now lets push out control by calling med-level
9899 * output once. this assures that we WILL send HB's
9902 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9904 &now, &now_filled, frag_point, so_locked);
9905 #ifdef SCTP_AUDITING_ENABLED
9906 sctp_auditing(8, inp, stcb, NULL);
9908 (void)sctp_timer_validation(inp, stcb, asoc, ret);
9913 * The count was off.. retran is not happening so do
9914 * the normal retransmission.
9916 #ifdef SCTP_AUDITING_ENABLED
9917 sctp_auditing(9, inp, stcb, NULL);
9919 if (ret == SCTP_RETRAN_EXIT) {
9924 if (from_where == SCTP_OUTPUT_FROM_T3) {
9925 /* Only one transmission allowed out of a timeout */
9926 #ifdef SCTP_AUDITING_ENABLED
9927 sctp_auditing(10, inp, stcb, NULL);
9929 /* Push out any control */
9930 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9931 &now, &now_filled, frag_point, so_locked);
9934 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
9935 /* Hit FR burst limit */
9938 if ((num_out == 0) && (ret == 0)) {
9939 /* No more retrans to send */
9943 #ifdef SCTP_AUDITING_ENABLED
9944 sctp_auditing(12, inp, stcb, NULL);
9946 /* Check for bad destinations, if they exist move chunks around. */
9947 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9948 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
9949 SCTP_ADDR_NOT_REACHABLE) {
9951 * if possible move things off of this address we
9952 * still may send below due to the dormant state but
9953 * we try to find an alternate address to send to
9954 * and if we have one we move all queued data on the
9955 * out wheel to this alternate address.
9957 if (net->ref_count > 1)
9958 sctp_move_chunks_from_net(stcb, net);
9959 } else if ((asoc->sctp_cmt_on_off > 0) &&
9960 (asoc->sctp_cmt_pf > 0) &&
9961 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
9963 * JRS 5/14/07 - If CMT PF is on and the current
9964 * destination is in PF state, move all queued data
9965 * to an alternate desination.
9967 if (net->ref_count > 1)
9968 sctp_move_chunks_from_net(stcb, net);
9971 * if ((asoc->sat_network) || (net->addr_is_local))
9972 * { burst_limit = asoc->max_burst *
9973 * SCTP_SAT_NETWORK_BURST_INCR; }
9975 if (asoc->max_burst > 0) {
9976 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9977 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
9979 * JRS - Use the congestion
9980 * control given in the
9981 * congestion control module
9983 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
9984 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9985 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
9987 SCTP_STAT_INCR(sctps_maxburstqueued);
9989 net->fast_retran_ip = 0;
9991 if (net->flight_size == 0) {
9993 * Should be decaying the
10005 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10006 &reason_code, 0, from_where,
10007 &now, &now_filled, frag_point, so_locked);
10009 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10010 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10011 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10013 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10014 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10015 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10019 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10021 tot_out += num_out;
10023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10024 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10025 if (num_out == 0) {
10026 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10031 * When nagle is on, we look at how much is un_sent, then
10032 * if its smaller than an MTU and we have data in
10035 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10036 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10037 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10038 (stcb->asoc.total_flight > 0)) {
10042 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10043 TAILQ_EMPTY(&asoc->send_queue) &&
10044 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10045 /* Nothing left to send */
10048 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10049 /* Nothing left to send */
10052 } while (num_out &&
10053 ((asoc->max_burst == 0) ||
10054 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10055 (burst_cnt < asoc->max_burst)));
10057 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10058 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10059 SCTP_STAT_INCR(sctps_maxburstqueued);
10060 asoc->burst_limit_applied = 1;
10061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10062 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10065 asoc->burst_limit_applied = 0;
10068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10069 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10071 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10075 * Now we need to clean up the control chunk chain if a ECNE is on
10076 * it. It must be marked as UNSENT again so next call will continue
10077 * to send it until such time that we get a CWR, to remove it.
10079 if (stcb->asoc.ecn_echo_cnt_onq)
10080 sctp_fix_ecn_echo(asoc);
10086 sctp_output(inp, m, addr, control, p, flags)
10087 struct sctp_inpcb *inp;
10089 struct sockaddr *addr;
10090 struct mbuf *control;
10095 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10098 if (inp->sctp_socket == NULL) {
10099 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10102 return (sctp_sosend(inp->sctp_socket,
10104 (struct uio *)NULL,
10112 send_forward_tsn(struct sctp_tcb *stcb,
10113 struct sctp_association *asoc)
10115 struct sctp_tmit_chunk *chk;
10116 struct sctp_forward_tsn_chunk *fwdtsn;
10117 uint32_t advance_peer_ack_point;
10119 SCTP_TCB_LOCK_ASSERT(stcb);
10120 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10121 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10122 /* mark it to unsent */
10123 chk->sent = SCTP_DATAGRAM_UNSENT;
10124 chk->snd_count = 0;
10125 /* Do we correct its output location? */
10126 if (chk->whoTo != asoc->primary_destination) {
10127 sctp_free_remote_addr(chk->whoTo);
10128 chk->whoTo = asoc->primary_destination;
10129 atomic_add_int(&chk->whoTo->ref_count, 1);
10131 goto sctp_fill_in_rest;
10134 /* Ok if we reach here we must build one */
10135 sctp_alloc_a_chunk(stcb, chk);
10139 asoc->fwd_tsn_cnt++;
10140 chk->copy_by_ref = 0;
10141 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10142 chk->rec.chunk_id.can_take_data = 0;
10145 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10146 if (chk->data == NULL) {
10147 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10150 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10151 chk->sent = SCTP_DATAGRAM_UNSENT;
10152 chk->snd_count = 0;
10153 chk->whoTo = asoc->primary_destination;
10154 atomic_add_int(&chk->whoTo->ref_count, 1);
10155 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10156 asoc->ctrl_queue_cnt++;
10159 * Here we go through and fill out the part that deals with
10160 * stream/seq of the ones we skip.
10162 SCTP_BUF_LEN(chk->data) = 0;
10164 struct sctp_tmit_chunk *at, *tp1, *last;
10165 struct sctp_strseq *strseq;
10166 unsigned int cnt_of_space, i, ovh;
10167 unsigned int space_needed;
10168 unsigned int cnt_of_skipped = 0;
10170 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10171 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
10172 /* no more to look at */
10175 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10176 /* We don't report these */
10181 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10182 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10184 cnt_of_space = M_TRAILINGSPACE(chk->data);
10186 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10187 ovh = SCTP_MIN_OVERHEAD;
10189 ovh = SCTP_MIN_V4_OVERHEAD;
10191 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10192 /* trim to a mtu size */
10193 cnt_of_space = asoc->smallest_mtu - ovh;
10195 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10196 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10197 0xff, 0, cnt_of_skipped,
10198 asoc->advanced_peer_ack_point);
10201 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10202 if (cnt_of_space < space_needed) {
10204 * ok we must trim down the chunk by lowering the
10205 * advance peer ack point.
10207 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10208 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10209 0xff, 0xff, cnt_of_space,
10212 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10213 cnt_of_skipped /= sizeof(struct sctp_strseq);
10215 * Go through and find the TSN that will be the one
10218 at = TAILQ_FIRST(&asoc->sent_queue);
10219 for (i = 0; i < cnt_of_skipped; i++) {
10220 tp1 = TAILQ_NEXT(at, sctp_next);
10226 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10227 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10228 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10229 asoc->advanced_peer_ack_point);
10233 * last now points to last one I can report, update
10237 advance_peer_ack_point = last->rec.data.TSN_seq;
10238 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10239 cnt_of_skipped * sizeof(struct sctp_strseq);
10241 chk->send_size = space_needed;
10242 /* Setup the chunk */
10243 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10244 fwdtsn->ch.chunk_length = htons(chk->send_size);
10245 fwdtsn->ch.chunk_flags = 0;
10246 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10247 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10248 SCTP_BUF_LEN(chk->data) = chk->send_size;
10251 * Move pointer to after the fwdtsn and transfer to the
10254 strseq = (struct sctp_strseq *)fwdtsn;
10256 * Now populate the strseq list. This is done blindly
10257 * without pulling out duplicate stream info. This is
10258 * inefficent but won't harm the process since the peer will
10259 * look at these in sequence and will thus release anything.
10260 * It could mean we exceed the PMTU and chop off some that
10261 * we could have included.. but this is unlikely (aka 1432/4
10262 * would mean 300+ stream seq's would have to be reported in
10263 * one FWD-TSN. With a bit of work we can later FIX this to
10264 * optimize and pull out duplcates.. but it does add more
10265 * overhead. So for now... not!
10267 at = TAILQ_FIRST(&asoc->sent_queue);
10268 for (i = 0; i < cnt_of_skipped; i++) {
10269 tp1 = TAILQ_NEXT(at, sctp_next);
10272 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10273 /* We don't report these */
10278 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10279 at->rec.data.fwd_tsn_cnt = 0;
10281 strseq->stream = ntohs(at->rec.data.stream_number);
10282 strseq->sequence = ntohs(at->rec.data.stream_seq);
10292 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10293 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10299 * Queue up a SACK or NR-SACK in the control queue.
10300 * We must first check to see if a SACK or NR-SACK is
10301 * somehow on the control queue.
10302 * If so, we will take and and remove the old one.
10304 struct sctp_association *asoc;
10305 struct sctp_tmit_chunk *chk, *a_chk;
10306 struct sctp_sack_chunk *sack;
10307 struct sctp_nr_sack_chunk *nr_sack;
10308 struct sctp_gap_ack_block *gap_descriptor;
10309 struct sack_track *selector;
10314 int limit_reached = 0;
10315 unsigned int i, siz, j;
10316 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10319 uint32_t highest_tsn;
10324 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10325 (stcb->asoc.peer_supports_nr_sack == 1)) {
10326 type = SCTP_NR_SELECTIVE_ACK;
10328 type = SCTP_SELECTIVE_ACK;
10331 asoc = &stcb->asoc;
10332 SCTP_TCB_LOCK_ASSERT(stcb);
10333 if (asoc->last_data_chunk_from == NULL) {
10334 /* Hmm we never received anything */
10337 sctp_slide_mapping_arrays(stcb);
10338 sctp_set_rwnd(stcb, asoc);
10339 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10340 if (chk->rec.chunk_id.id == type) {
10341 /* Hmm, found a sack already on queue, remove it */
10342 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10343 asoc->ctrl_queue_cnt--;
10346 sctp_m_freem(a_chk->data);
10347 a_chk->data = NULL;
10349 sctp_free_remote_addr(a_chk->whoTo);
10350 a_chk->whoTo = NULL;
10354 if (a_chk == NULL) {
10355 sctp_alloc_a_chunk(stcb, a_chk);
10356 if (a_chk == NULL) {
10357 /* No memory so we drop the idea, and set a timer */
10358 if (stcb->asoc.delayed_ack) {
10359 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10360 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10361 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10362 stcb->sctp_ep, stcb, NULL);
10364 stcb->asoc.send_sack = 1;
10368 a_chk->copy_by_ref = 0;
10369 a_chk->rec.chunk_id.id = type;
10370 a_chk->rec.chunk_id.can_take_data = 1;
10372 /* Clear our pkt counts */
10373 asoc->data_pkts_seen = 0;
10375 a_chk->asoc = asoc;
10376 a_chk->snd_count = 0;
10377 a_chk->send_size = 0; /* fill in later */
10378 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10379 a_chk->whoTo = NULL;
10381 if ((asoc->numduptsns) ||
10382 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) {
10384 * Ok, we have some duplicates or the destination for the
10385 * sack is unreachable, lets see if we can select an
10386 * alternate than asoc->last_data_chunk_from
10388 if ((!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)) &&
10389 (asoc->used_alt_onsack > asoc->numnets)) {
10390 /* We used an alt last time, don't this time */
10391 a_chk->whoTo = NULL;
10393 asoc->used_alt_onsack++;
10394 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10396 if (a_chk->whoTo == NULL) {
10397 /* Nope, no alternate */
10398 a_chk->whoTo = asoc->last_data_chunk_from;
10399 asoc->used_alt_onsack = 0;
10403 * No duplicates so we use the last place we received data
10406 asoc->used_alt_onsack = 0;
10407 a_chk->whoTo = asoc->last_data_chunk_from;
10409 if (a_chk->whoTo) {
10410 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10412 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10413 highest_tsn = asoc->highest_tsn_inside_map;
10415 highest_tsn = asoc->highest_tsn_inside_nr_map;
10417 if (highest_tsn == asoc->cumulative_tsn) {
10419 if (type == SCTP_SELECTIVE_ACK) {
10420 space_req = sizeof(struct sctp_sack_chunk);
10422 space_req = sizeof(struct sctp_nr_sack_chunk);
10425 /* gaps get a cluster */
10426 space_req = MCLBYTES;
10428 /* Ok now lets formulate a MBUF with our sack */
10429 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10430 if ((a_chk->data == NULL) ||
10431 (a_chk->whoTo == NULL)) {
10432 /* rats, no mbuf memory */
10434 /* was a problem with the destination */
10435 sctp_m_freem(a_chk->data);
10436 a_chk->data = NULL;
10438 sctp_free_a_chunk(stcb, a_chk, so_locked);
10439 /* sa_ignore NO_NULL_CHK */
10440 if (stcb->asoc.delayed_ack) {
10441 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10442 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10443 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10444 stcb->sctp_ep, stcb, NULL);
10446 stcb->asoc.send_sack = 1;
10450 /* ok, lets go through and fill it in */
10451 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10452 space = M_TRAILINGSPACE(a_chk->data);
10453 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10454 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10456 limit = mtod(a_chk->data, caddr_t);
10461 if ((asoc->sctp_cmt_on_off > 0) &&
10462 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10464 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10465 * received, then set high bit to 1, else 0. Reset
10468 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10469 asoc->cmt_dac_pkts_rcvd = 0;
10471 #ifdef SCTP_ASOCLOG_OF_TSNS
10472 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10473 stcb->asoc.cumack_log_atsnt++;
10474 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10475 stcb->asoc.cumack_log_atsnt = 0;
10478 /* reset the readers interpretation */
10479 stcb->freed_by_sorcv_sincelast = 0;
10481 if (type == SCTP_SELECTIVE_ACK) {
10482 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10484 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10485 if (highest_tsn > asoc->mapping_array_base_tsn) {
10486 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10488 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10492 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10493 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10494 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10495 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10497 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10501 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10504 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10506 if (((type == SCTP_SELECTIVE_ACK) &&
10507 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10508 ((type == SCTP_NR_SELECTIVE_ACK) &&
10509 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10510 /* we have a gap .. maybe */
10511 for (i = 0; i < siz; i++) {
10512 tsn_map = asoc->mapping_array[i];
10513 if (type == SCTP_SELECTIVE_ACK) {
10514 tsn_map |= asoc->nr_mapping_array[i];
10518 * Clear all bits corresponding to TSNs
10519 * smaller or equal to the cumulative TSN.
10521 tsn_map &= (~0 << (1 - offset));
10523 selector = &sack_array[tsn_map];
10524 if (mergeable && selector->right_edge) {
10526 * Backup, left and right edges were ok to
10532 if (selector->num_entries == 0)
10535 for (j = 0; j < selector->num_entries; j++) {
10536 if (mergeable && selector->right_edge) {
10538 * do a merge by NOT setting
10544 * no merge, set the left
10548 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10550 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10553 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10559 if (selector->left_edge) {
10563 if (limit_reached) {
10564 /* Reached the limit stop */
10570 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10571 (limit_reached == 0)) {
10575 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10576 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10578 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10581 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10584 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10586 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10587 /* we have a gap .. maybe */
10588 for (i = 0; i < siz; i++) {
10589 tsn_map = asoc->nr_mapping_array[i];
10592 * Clear all bits corresponding to
10593 * TSNs smaller or equal to the
10596 tsn_map &= (~0 << (1 - offset));
10598 selector = &sack_array[tsn_map];
10599 if (mergeable && selector->right_edge) {
10601 * Backup, left and right edges were
10604 num_nr_gap_blocks--;
10607 if (selector->num_entries == 0)
10610 for (j = 0; j < selector->num_entries; j++) {
10611 if (mergeable && selector->right_edge) {
10613 * do a merge by NOT
10620 * no merge, set the
10624 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10626 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10627 num_nr_gap_blocks++;
10629 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10635 if (selector->left_edge) {
10639 if (limit_reached) {
10640 /* Reached the limit stop */
10647 /* now we must add any dups we are going to report. */
10648 if ((limit_reached == 0) && (asoc->numduptsns)) {
10649 dup = (uint32_t *) gap_descriptor;
10650 for (i = 0; i < asoc->numduptsns; i++) {
10651 *dup = htonl(asoc->dup_tsns[i]);
10654 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10659 asoc->numduptsns = 0;
10662 * now that the chunk is prepared queue it to the control chunk
10665 if (type == SCTP_SELECTIVE_ACK) {
10666 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10667 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10668 num_dups * sizeof(int32_t);
10669 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10670 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10671 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10672 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10673 sack->sack.num_dup_tsns = htons(num_dups);
10674 sack->ch.chunk_type = type;
10675 sack->ch.chunk_flags = flags;
10676 sack->ch.chunk_length = htons(a_chk->send_size);
10678 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10679 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10680 num_dups * sizeof(int32_t);
10681 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10682 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10683 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10684 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10685 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10686 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10687 nr_sack->nr_sack.reserved = 0;
10688 nr_sack->ch.chunk_type = type;
10689 nr_sack->ch.chunk_flags = flags;
10690 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10692 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10693 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10694 asoc->ctrl_queue_cnt++;
10695 asoc->send_sack = 0;
10696 SCTP_STAT_INCR(sctps_sendsacks);
10701 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10702 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10707 struct mbuf *m_abort;
10708 struct mbuf *m_out = NULL, *m_end = NULL;
10709 struct sctp_abort_chunk *abort = NULL;
10711 uint32_t auth_offset = 0;
10712 struct sctp_auth_chunk *auth = NULL;
10715 * Add an AUTH chunk, if chunk requires it and save the offset into
10716 * the chain for AUTH
10718 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10719 stcb->asoc.peer_auth_chunks)) {
10720 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10721 stcb, SCTP_ABORT_ASSOCIATION);
10722 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10724 SCTP_TCB_LOCK_ASSERT(stcb);
10725 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10726 if (m_abort == NULL) {
10729 sctp_m_freem(m_out);
10732 /* link in any error */
10733 SCTP_BUF_NEXT(m_abort) = operr;
10740 sz += SCTP_BUF_LEN(n);
10741 n = SCTP_BUF_NEXT(n);
10744 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10745 if (m_out == NULL) {
10746 /* NO Auth chunk prepended, so reserve space in front */
10747 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10750 /* Put AUTH chunk at the front of the chain */
10751 SCTP_BUF_NEXT(m_end) = m_abort;
10754 /* fill in the ABORT chunk */
10755 abort = mtod(m_abort, struct sctp_abort_chunk *);
10756 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10757 abort->ch.chunk_flags = 0;
10758 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10760 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
10761 stcb->asoc.primary_destination,
10762 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
10763 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0,
10764 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10765 stcb->asoc.primary_destination->port, so_locked, NULL, NULL);
10766 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10770 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10771 struct sctp_nets *net,
10774 /* formulate and SEND a SHUTDOWN-COMPLETE */
10775 struct mbuf *m_shutdown_comp;
10776 struct sctp_shutdown_complete_chunk *shutdown_complete;
10780 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10781 if (m_shutdown_comp == NULL) {
10785 if (reflect_vtag) {
10786 flags = SCTP_HAD_NO_TCB;
10787 vtag = stcb->asoc.my_vtag;
10790 vtag = stcb->asoc.peer_vtag;
10792 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10793 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10794 shutdown_complete->ch.chunk_flags = flags;
10795 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10796 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10797 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10798 (struct sockaddr *)&net->ro._l_addr,
10799 m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0,
10800 stcb->sctp_ep->sctp_lport, stcb->rport,
10802 net->port, SCTP_SO_NOT_LOCKED, NULL, NULL);
10803 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10808 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
10809 uint32_t vrf_id, uint16_t port)
10811 /* formulate and SEND a SHUTDOWN-COMPLETE */
10812 struct mbuf *o_pak;
10815 struct udphdr *udp = NULL;
10816 int offset_out, len, mlen;
10817 struct sctp_shutdown_complete_msg *comp_cp;
10820 struct ip *iph_out;
10824 struct ip6_hdr *ip6, *ip6_out;
10828 iph = mtod(m, struct ip *);
10829 switch (iph->ip_v) {
10832 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10836 case IPV6_VERSION >> 4:
10837 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10844 len += sizeof(struct udphdr);
10846 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10847 if (mout == NULL) {
10850 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10851 SCTP_BUF_LEN(mout) = len;
10852 SCTP_BUF_NEXT(mout) = NULL;
10853 if (m->m_flags & M_FLOWID) {
10854 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
10855 mout->m_flags |= M_FLOWID;
10865 switch (iph->ip_v) {
10868 iph_out = mtod(mout, struct ip *);
10870 /* Fill in the IP header for the ABORT */
10871 iph_out->ip_v = IPVERSION;
10872 iph_out->ip_hl = (sizeof(struct ip) / 4);
10873 iph_out->ip_tos = (u_char)0;
10874 iph_out->ip_id = 0;
10875 iph_out->ip_off = 0;
10876 iph_out->ip_ttl = MAXTTL;
10878 iph_out->ip_p = IPPROTO_UDP;
10880 iph_out->ip_p = IPPROTO_SCTP;
10882 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10883 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10885 /* let IP layer calculate this */
10886 iph_out->ip_sum = 0;
10887 offset_out += sizeof(*iph_out);
10888 comp_cp = (struct sctp_shutdown_complete_msg *)(
10889 (caddr_t)iph_out + offset_out);
10893 case IPV6_VERSION >> 4:
10894 ip6 = (struct ip6_hdr *)iph;
10895 ip6_out = mtod(mout, struct ip6_hdr *);
10897 /* Fill in the IPv6 header for the ABORT */
10898 ip6_out->ip6_flow = ip6->ip6_flow;
10899 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10901 ip6_out->ip6_nxt = IPPROTO_UDP;
10903 ip6_out->ip6_nxt = IPPROTO_SCTP;
10905 ip6_out->ip6_src = ip6->ip6_dst;
10906 ip6_out->ip6_dst = ip6->ip6_src;
10908 * ?? The old code had both the iph len + payload, I think
10909 * this is wrong and would never have worked
10911 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10912 offset_out += sizeof(*ip6_out);
10913 comp_cp = (struct sctp_shutdown_complete_msg *)(
10914 (caddr_t)ip6_out + offset_out);
10918 /* Currently not supported. */
10919 sctp_m_freem(mout);
10923 udp = (struct udphdr *)comp_cp;
10924 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10925 udp->uh_dport = port;
10926 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
10929 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10931 offset_out += sizeof(struct udphdr);
10932 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
10934 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10936 sctp_m_freem(mout);
10939 /* Now copy in and fill in the ABORT tags etc. */
10940 comp_cp->sh.src_port = sh->dest_port;
10941 comp_cp->sh.dest_port = sh->src_port;
10942 comp_cp->sh.checksum = 0;
10943 comp_cp->sh.v_tag = sh->v_tag;
10944 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
10945 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10946 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10949 if (iph_out != NULL) {
10953 mlen = SCTP_BUF_LEN(mout);
10954 bzero(&ro, sizeof ro);
10955 /* set IPv4 length */
10956 iph_out->ip_len = mlen;
10957 #ifdef SCTP_PACKET_LOGGING
10958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10959 sctp_packet_log(mout, mlen);
10962 #if defined(SCTP_WITH_NO_CSUM)
10963 SCTP_STAT_INCR(sctps_sendnocrc);
10965 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
10966 SCTP_STAT_INCR(sctps_sendswcrc);
10968 SCTP_ENABLE_UDP_CSUM(mout);
10970 #if defined(SCTP_WITH_NO_CSUM)
10971 SCTP_STAT_INCR(sctps_sendnocrc);
10973 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10974 mout->m_pkthdr.csum_data = 0;
10975 SCTP_STAT_INCR(sctps_sendhwcrc);
10978 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10980 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
10982 /* Free the route if we got one back */
10988 if (ip6_out != NULL) {
10989 struct route_in6 ro;
10991 struct ifnet *ifp = NULL;
10993 bzero(&ro, sizeof(ro));
10994 mlen = SCTP_BUF_LEN(mout);
10995 #ifdef SCTP_PACKET_LOGGING
10996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10997 sctp_packet_log(mout, mlen);
10999 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
11001 #if defined(SCTP_WITH_NO_CSUM)
11002 SCTP_STAT_INCR(sctps_sendnocrc);
11004 comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11005 SCTP_STAT_INCR(sctps_sendswcrc);
11007 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
11008 udp->uh_sum = 0xffff;
11011 #if defined(SCTP_WITH_NO_CSUM)
11012 SCTP_STAT_INCR(sctps_sendnocrc);
11014 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11015 mout->m_pkthdr.csum_data = 0;
11016 SCTP_STAT_INCR(sctps_sendhwcrc);
11019 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
11021 /* Free the route if we got one back */
11026 SCTP_STAT_INCR(sctps_sendpackets);
11027 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11028 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11033 static struct sctp_nets *
11034 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
11036 struct sctp_nets *net, *hnet;
11037 int ms_goneby, highest_ms, state_overide = 0;
11039 (void)SCTP_GETTIME_TIMEVAL(now);
11042 SCTP_TCB_LOCK_ASSERT(stcb);
11043 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
11045 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
11046 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
11049 * Skip this guy from consideration if HB is off AND
11054 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
11055 /* skip this dest net from consideration */
11058 if (net->last_sent_time.tv_sec) {
11059 /* Sent to so we subtract */
11060 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
11062 /* Never been sent to */
11063 ms_goneby = 0x7fffffff;
11065 * When the address state is unconfirmed but still
11066 * considered reachable, we HB at a higher rate. Once it
11067 * goes confirmed OR reaches the "unreachable" state, thenw
11068 * we cut it back to HB at a more normal pace.
11070 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
11076 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
11077 (ms_goneby > highest_ms)) {
11078 highest_ms = ms_goneby;
11083 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
11089 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
11091 * Found the one with longest delay bounds OR it is
11092 * unconfirmed and still not marked unreachable.
11094 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
11097 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
11098 (struct sockaddr *)&hnet->ro._l_addr);
11100 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
11103 /* update the timer now */
11104 hnet->last_sent_time = *now;
11107 /* Nothing to HB */
11112 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net, int so_locked
11113 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11118 struct sctp_tmit_chunk *chk;
11119 struct sctp_nets *net;
11120 struct sctp_heartbeat_chunk *hb;
11121 struct timeval now;
11123 SCTP_TCB_LOCK_ASSERT(stcb);
11124 if (user_req == 0) {
11125 net = sctp_select_hb_destination(stcb, &now);
11128 * All our busy none to send to, just start the
11131 if (stcb->asoc.state == 0) {
11134 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
11145 (void)SCTP_GETTIME_TIMEVAL(&now);
11147 switch (net->ro._l_addr.sa.sa_family) {
11159 sctp_alloc_a_chunk(stcb, chk);
11161 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11164 chk->copy_by_ref = 0;
11165 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11166 chk->rec.chunk_id.can_take_data = 1;
11167 chk->asoc = &stcb->asoc;
11168 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11170 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11171 if (chk->data == NULL) {
11172 sctp_free_a_chunk(stcb, chk, so_locked);
11175 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11176 SCTP_BUF_LEN(chk->data) = chk->send_size;
11177 chk->sent = SCTP_DATAGRAM_UNSENT;
11178 chk->snd_count = 0;
11180 atomic_add_int(&chk->whoTo->ref_count, 1);
11181 /* Now we have a mbuf that we can fill in with the details */
11182 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11183 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11184 /* fill out chunk header */
11185 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11186 hb->ch.chunk_flags = 0;
11187 hb->ch.chunk_length = htons(chk->send_size);
11188 /* Fill out hb parameter */
11189 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11190 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11191 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11192 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11193 /* Did our user request this one, put it in */
11194 hb->heartbeat.hb_info.user_req = user_req;
11195 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11196 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11197 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11199 * we only take from the entropy pool if the address is not
11202 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11203 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11205 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11206 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11208 switch (net->ro._l_addr.sa.sa_family) {
11211 memcpy(hb->heartbeat.hb_info.address,
11212 &net->ro._l_addr.sin.sin_addr,
11213 sizeof(net->ro._l_addr.sin.sin_addr));
11218 memcpy(hb->heartbeat.hb_info.address,
11219 &net->ro._l_addr.sin6.sin6_addr,
11220 sizeof(net->ro._l_addr.sin6.sin6_addr));
11229 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
11230 * PF-heartbeats. Because of this, threshold management is done by
11231 * the t3 timer handler, and does not need to be done upon the send
11232 * of a PF-heartbeat. If CMT PF is on and the destination to which a
11233 * heartbeat is being sent is in PF state, do NOT do threshold
11236 if ((stcb->asoc.sctp_cmt_pf == 0) ||
11237 ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
11238 /* ok we have a destination that needs a beat */
11239 /* lets do the theshold management Qiaobing style */
11240 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
11241 stcb->asoc.max_send_times)) {
11243 * we have lost the association, in a way this is
11244 * quite bad since we really are one less time since
11245 * we really did not send yet. This is the down side
11246 * to the Q's style as defined in the RFC and not my
11247 * alternate style defined in the RFC.
11249 if (chk->data != NULL) {
11250 sctp_m_freem(chk->data);
11254 * Here we do NOT use the macro since the
11255 * association is now gone.
11258 sctp_free_remote_addr(chk->whoTo);
11261 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk, so_locked);
11265 net->hb_responded = 0;
11266 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11267 stcb->asoc.ctrl_queue_cnt++;
11268 SCTP_STAT_INCR(sctps_sendheartbeat);
11270 * Call directly med level routine to put out the chunk. It will
11271 * always tumble out control chunks aka HB but it may even tumble
11278 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11281 struct sctp_association *asoc;
11282 struct sctp_ecne_chunk *ecne;
11283 struct sctp_tmit_chunk *chk;
11285 asoc = &stcb->asoc;
11286 SCTP_TCB_LOCK_ASSERT(stcb);
11287 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11288 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11289 /* found a previous ECN_ECHO update it if needed */
11290 uint32_t cnt, ctsn;
11292 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11293 ctsn = ntohl(ecne->tsn);
11294 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11295 ecne->tsn = htonl(high_tsn);
11296 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11298 cnt = ntohl(ecne->num_pkts_since_cwr);
11300 ecne->num_pkts_since_cwr = htonl(cnt);
11304 /* nope could not find one to update so we must build one */
11305 sctp_alloc_a_chunk(stcb, chk);
11309 chk->copy_by_ref = 0;
11310 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11311 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11312 chk->rec.chunk_id.can_take_data = 0;
11313 chk->asoc = &stcb->asoc;
11314 chk->send_size = sizeof(struct sctp_ecne_chunk);
11315 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11316 if (chk->data == NULL) {
11317 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11320 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11321 SCTP_BUF_LEN(chk->data) = chk->send_size;
11322 chk->sent = SCTP_DATAGRAM_UNSENT;
11323 chk->snd_count = 0;
11325 atomic_add_int(&chk->whoTo->ref_count, 1);
11326 stcb->asoc.ecn_echo_cnt_onq++;
11327 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11328 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11329 ecne->ch.chunk_flags = 0;
11330 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11331 ecne->tsn = htonl(high_tsn);
11332 ecne->num_pkts_since_cwr = htonl(1);
11333 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11334 asoc->ctrl_queue_cnt++;
11338 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11339 struct mbuf *m, int iphlen, int bad_crc)
11341 struct sctp_association *asoc;
11342 struct sctp_pktdrop_chunk *drp;
11343 struct sctp_tmit_chunk *chk;
11350 struct ip6_hdr *ip6h;
11353 int fullsz = 0, extra = 0;
11356 struct sctp_chunkhdr *ch, chunk_buf;
11357 unsigned int chk_length;
11362 asoc = &stcb->asoc;
11363 SCTP_TCB_LOCK_ASSERT(stcb);
11364 if (asoc->peer_supports_pktdrop == 0) {
11366 * peer must declare support before I send one.
11370 if (stcb->sctp_socket == NULL) {
11373 sctp_alloc_a_chunk(stcb, chk);
11377 chk->copy_by_ref = 0;
11378 iph = mtod(m, struct ip *);
11380 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11383 switch (iph->ip_v) {
11387 len = chk->send_size = iph->ip_len;
11391 case IPV6_VERSION >> 4:
11393 ip6h = mtod(m, struct ip6_hdr *);
11394 len = chk->send_size = htons(ip6h->ip6_plen);
11400 /* Validate that we do not have an ABORT in here. */
11401 offset = iphlen + sizeof(struct sctphdr);
11402 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11403 sizeof(*ch), (uint8_t *) & chunk_buf);
11404 while (ch != NULL) {
11405 chk_length = ntohs(ch->chunk_length);
11406 if (chk_length < sizeof(*ch)) {
11407 /* break to abort land */
11410 switch (ch->chunk_type) {
11411 case SCTP_PACKET_DROPPED:
11412 case SCTP_ABORT_ASSOCIATION:
11413 case SCTP_INITIATION_ACK:
11415 * We don't respond with an PKT-DROP to an ABORT
11416 * or PKT-DROP. We also do not respond to an
11417 * INIT-ACK, because we can't know if the initiation
11418 * tag is correct or not.
11420 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11425 offset += SCTP_SIZE32(chk_length);
11426 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11427 sizeof(*ch), (uint8_t *) & chunk_buf);
11430 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11431 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11433 * only send 1 mtu worth, trim off the excess on the end.
11435 fullsz = len - extra;
11436 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11439 chk->asoc = &stcb->asoc;
11440 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11441 if (chk->data == NULL) {
11443 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11446 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11447 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11449 sctp_m_freem(chk->data);
11453 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11454 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11455 chk->book_size_scale = 0;
11457 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11458 drp->trunc_len = htons(fullsz);
11460 * Len is already adjusted to size minus overhead above take
11461 * out the pkt_drop chunk itself from it.
11463 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11464 len = chk->send_size;
11466 /* no truncation needed */
11467 drp->ch.chunk_flags = 0;
11468 drp->trunc_len = htons(0);
11471 drp->ch.chunk_flags |= SCTP_BADCRC;
11473 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11474 SCTP_BUF_LEN(chk->data) = chk->send_size;
11475 chk->sent = SCTP_DATAGRAM_UNSENT;
11476 chk->snd_count = 0;
11478 /* we should hit here */
11481 chk->whoTo = asoc->primary_destination;
11483 atomic_add_int(&chk->whoTo->ref_count, 1);
11484 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11485 chk->rec.chunk_id.can_take_data = 1;
11486 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11487 drp->ch.chunk_length = htons(chk->send_size);
11488 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11492 drp->bottle_bw = htonl(spc);
11493 if (asoc->my_rwnd) {
11494 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11495 asoc->size_on_all_streams +
11496 asoc->my_rwnd_control_len +
11497 stcb->sctp_socket->so_rcv.sb_cc);
11500 * If my rwnd is 0, possibly from mbuf depletion as well as
11501 * space used, tell the peer there is NO space aka onq == bw
11503 drp->current_onq = htonl(spc);
11507 m_copydata(m, iphlen, len, (caddr_t)datap);
11508 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11509 asoc->ctrl_queue_cnt++;
11513 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11515 struct sctp_association *asoc;
11516 struct sctp_cwr_chunk *cwr;
11517 struct sctp_tmit_chunk *chk;
11519 asoc = &stcb->asoc;
11520 SCTP_TCB_LOCK_ASSERT(stcb);
11523 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11524 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11526 * found a previous CWR queued to same destination
11527 * update it if needed
11531 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11532 ctsn = ntohl(cwr->tsn);
11533 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11534 cwr->tsn = htonl(high_tsn);
11536 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11537 /* Make sure override is carried */
11538 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11543 sctp_alloc_a_chunk(stcb, chk);
11547 chk->copy_by_ref = 0;
11548 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11549 chk->rec.chunk_id.can_take_data = 1;
11550 chk->asoc = &stcb->asoc;
11551 chk->send_size = sizeof(struct sctp_cwr_chunk);
11552 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11553 if (chk->data == NULL) {
11554 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11557 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11558 SCTP_BUF_LEN(chk->data) = chk->send_size;
11559 chk->sent = SCTP_DATAGRAM_UNSENT;
11560 chk->snd_count = 0;
11562 atomic_add_int(&chk->whoTo->ref_count, 1);
11563 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11564 cwr->ch.chunk_type = SCTP_ECN_CWR;
11565 cwr->ch.chunk_flags = override;
11566 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11567 cwr->tsn = htonl(high_tsn);
11568 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11569 asoc->ctrl_queue_cnt++;
11573 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11574 int number_entries, uint16_t * list,
11575 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11577 int len, old_len, i;
11578 struct sctp_stream_reset_out_request *req_out;
11579 struct sctp_chunkhdr *ch;
11581 ch = mtod(chk->data, struct sctp_chunkhdr *);
11584 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11586 /* get to new offset for the param. */
11587 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11588 /* now how long will this param be? */
11589 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11590 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11591 req_out->ph.param_length = htons(len);
11592 req_out->request_seq = htonl(seq);
11593 req_out->response_seq = htonl(resp_seq);
11594 req_out->send_reset_at_tsn = htonl(last_sent);
11595 if (number_entries) {
11596 for (i = 0; i < number_entries; i++) {
11597 req_out->list_of_streams[i] = htons(list[i]);
11600 if (SCTP_SIZE32(len) > len) {
11602 * Need to worry about the pad we may end up adding to the
11603 * end. This is easy since the struct is either aligned to 4
11604 * bytes or 2 bytes off.
11606 req_out->list_of_streams[number_entries] = 0;
11608 /* now fix the chunk length */
11609 ch->chunk_length = htons(len + old_len);
11610 chk->book_size = len + old_len;
11611 chk->book_size_scale = 0;
11612 chk->send_size = SCTP_SIZE32(chk->book_size);
11613 SCTP_BUF_LEN(chk->data) = chk->send_size;
11619 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11620 int number_entries, uint16_t * list,
11623 int len, old_len, i;
11624 struct sctp_stream_reset_in_request *req_in;
11625 struct sctp_chunkhdr *ch;
11627 ch = mtod(chk->data, struct sctp_chunkhdr *);
11630 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11632 /* get to new offset for the param. */
11633 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11634 /* now how long will this param be? */
11635 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11636 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11637 req_in->ph.param_length = htons(len);
11638 req_in->request_seq = htonl(seq);
11639 if (number_entries) {
11640 for (i = 0; i < number_entries; i++) {
11641 req_in->list_of_streams[i] = htons(list[i]);
11644 if (SCTP_SIZE32(len) > len) {
11646 * Need to worry about the pad we may end up adding to the
11647 * end. This is easy since the struct is either aligned to 4
11648 * bytes or 2 bytes off.
11650 req_in->list_of_streams[number_entries] = 0;
11652 /* now fix the chunk length */
11653 ch->chunk_length = htons(len + old_len);
11654 chk->book_size = len + old_len;
11655 chk->book_size_scale = 0;
11656 chk->send_size = SCTP_SIZE32(chk->book_size);
11657 SCTP_BUF_LEN(chk->data) = chk->send_size;
11663 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11667 struct sctp_stream_reset_tsn_request *req_tsn;
11668 struct sctp_chunkhdr *ch;
11670 ch = mtod(chk->data, struct sctp_chunkhdr *);
11673 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11675 /* get to new offset for the param. */
11676 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11677 /* now how long will this param be? */
11678 len = sizeof(struct sctp_stream_reset_tsn_request);
11679 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11680 req_tsn->ph.param_length = htons(len);
11681 req_tsn->request_seq = htonl(seq);
11683 /* now fix the chunk length */
11684 ch->chunk_length = htons(len + old_len);
11685 chk->send_size = len + old_len;
11686 chk->book_size = SCTP_SIZE32(chk->send_size);
11687 chk->book_size_scale = 0;
11688 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11693 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11694 uint32_t resp_seq, uint32_t result)
11697 struct sctp_stream_reset_response *resp;
11698 struct sctp_chunkhdr *ch;
11700 ch = mtod(chk->data, struct sctp_chunkhdr *);
11703 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11705 /* get to new offset for the param. */
11706 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11707 /* now how long will this param be? */
11708 len = sizeof(struct sctp_stream_reset_response);
11709 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11710 resp->ph.param_length = htons(len);
11711 resp->response_seq = htonl(resp_seq);
11712 resp->result = ntohl(result);
11714 /* now fix the chunk length */
11715 ch->chunk_length = htons(len + old_len);
11716 chk->book_size = len + old_len;
11717 chk->book_size_scale = 0;
11718 chk->send_size = SCTP_SIZE32(chk->book_size);
11719 SCTP_BUF_LEN(chk->data) = chk->send_size;
11726 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11727 uint32_t resp_seq, uint32_t result,
11728 uint32_t send_una, uint32_t recv_next)
11731 struct sctp_stream_reset_response_tsn *resp;
11732 struct sctp_chunkhdr *ch;
11734 ch = mtod(chk->data, struct sctp_chunkhdr *);
11737 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11739 /* get to new offset for the param. */
11740 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11741 /* now how long will this param be? */
11742 len = sizeof(struct sctp_stream_reset_response_tsn);
11743 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11744 resp->ph.param_length = htons(len);
11745 resp->response_seq = htonl(resp_seq);
11746 resp->result = htonl(result);
11747 resp->senders_next_tsn = htonl(send_una);
11748 resp->receivers_next_tsn = htonl(recv_next);
11750 /* now fix the chunk length */
11751 ch->chunk_length = htons(len + old_len);
11752 chk->book_size = len + old_len;
11753 chk->send_size = SCTP_SIZE32(chk->book_size);
11754 chk->book_size_scale = 0;
11755 SCTP_BUF_LEN(chk->data) = chk->send_size;
11760 sctp_add_a_stream(struct sctp_tmit_chunk *chk,
11765 struct sctp_chunkhdr *ch;
11766 struct sctp_stream_reset_add_strm *addstr;
11768 ch = mtod(chk->data, struct sctp_chunkhdr *);
11769 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11771 /* get to new offset for the param. */
11772 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11773 /* now how long will this param be? */
11774 len = sizeof(struct sctp_stream_reset_add_strm);
11777 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
11778 addstr->ph.param_length = htons(len);
11779 addstr->request_seq = htonl(seq);
11780 addstr->number_of_streams = htons(adding);
11781 addstr->reserved = 0;
11783 /* now fix the chunk length */
11784 ch->chunk_length = htons(len + old_len);
11785 chk->send_size = len + old_len;
11786 chk->book_size = SCTP_SIZE32(chk->send_size);
11787 chk->book_size_scale = 0;
11788 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11793 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11794 int number_entries, uint16_t * list,
11795 uint8_t send_out_req,
11797 uint8_t send_in_req,
11798 uint8_t send_tsn_req,
11799 uint8_t add_stream,
11804 struct sctp_association *asoc;
11805 struct sctp_tmit_chunk *chk;
11806 struct sctp_chunkhdr *ch;
11809 asoc = &stcb->asoc;
11810 if (asoc->stream_reset_outstanding) {
11812 * Already one pending, must get ACK back to clear the flag.
11814 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11817 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11818 (add_stream == 0)) {
11819 /* nothing to do */
11820 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11823 if (send_tsn_req && (send_out_req || send_in_req)) {
11824 /* error, can't do that */
11825 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11828 sctp_alloc_a_chunk(stcb, chk);
11830 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11833 chk->copy_by_ref = 0;
11834 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11835 chk->rec.chunk_id.can_take_data = 0;
11836 chk->asoc = &stcb->asoc;
11837 chk->book_size = sizeof(struct sctp_chunkhdr);
11838 chk->send_size = SCTP_SIZE32(chk->book_size);
11839 chk->book_size_scale = 0;
11841 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11842 if (chk->data == NULL) {
11843 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11844 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11847 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11849 /* setup chunk parameters */
11850 chk->sent = SCTP_DATAGRAM_UNSENT;
11851 chk->snd_count = 0;
11852 chk->whoTo = asoc->primary_destination;
11853 atomic_add_int(&chk->whoTo->ref_count, 1);
11855 ch = mtod(chk->data, struct sctp_chunkhdr *);
11856 ch->chunk_type = SCTP_STREAM_RESET;
11857 ch->chunk_flags = 0;
11858 ch->chunk_length = htons(chk->book_size);
11859 SCTP_BUF_LEN(chk->data) = chk->send_size;
11861 seq = stcb->asoc.str_reset_seq_out;
11862 if (send_out_req) {
11863 sctp_add_stream_reset_out(chk, number_entries, list,
11864 seq, resp_seq, (stcb->asoc.sending_seq - 1));
11865 asoc->stream_reset_out_is_outstanding = 1;
11867 asoc->stream_reset_outstanding++;
11870 sctp_add_a_stream(chk, seq, adding);
11872 asoc->stream_reset_outstanding++;
11875 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11876 asoc->stream_reset_outstanding++;
11878 if (send_tsn_req) {
11879 sctp_add_stream_reset_tsn(chk, seq);
11880 asoc->stream_reset_outstanding++;
11882 asoc->str_reset = chk;
11884 /* insert the chunk for sending */
11885 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11888 asoc->ctrl_queue_cnt++;
11889 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11894 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11895 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11898 * Formulate the abort message, and send it back down.
11900 struct mbuf *o_pak;
11902 struct sctp_abort_msg *abm;
11904 struct udphdr *udp;
11905 int iphlen_out, len;
11908 struct ip *iph_out;
11912 struct ip6_hdr *ip6, *ip6_out;
11916 /* don't respond to ABORT with ABORT */
11917 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11919 sctp_m_freem(err_cause);
11922 iph = mtod(m, struct ip *);
11923 switch (iph->ip_v) {
11926 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11930 case IPV6_VERSION >> 4:
11931 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11936 sctp_m_freem(err_cause);
11941 len += sizeof(struct udphdr);
11943 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11944 if (mout == NULL) {
11946 sctp_m_freem(err_cause);
11950 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11951 SCTP_BUF_LEN(mout) = len;
11952 SCTP_BUF_NEXT(mout) = err_cause;
11953 if (m->m_flags & M_FLOWID) {
11954 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
11955 mout->m_flags |= M_FLOWID;
11963 switch (iph->ip_v) {
11966 iph_out = mtod(mout, struct ip *);
11968 /* Fill in the IP header for the ABORT */
11969 iph_out->ip_v = IPVERSION;
11970 iph_out->ip_hl = (sizeof(struct ip) / 4);
11971 iph_out->ip_tos = (u_char)0;
11972 iph_out->ip_id = 0;
11973 iph_out->ip_off = 0;
11974 iph_out->ip_ttl = MAXTTL;
11976 iph_out->ip_p = IPPROTO_UDP;
11978 iph_out->ip_p = IPPROTO_SCTP;
11980 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11981 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11982 /* let IP layer calculate this */
11983 iph_out->ip_sum = 0;
11985 iphlen_out = sizeof(*iph_out);
11986 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
11990 case IPV6_VERSION >> 4:
11991 ip6 = (struct ip6_hdr *)iph;
11992 ip6_out = mtod(mout, struct ip6_hdr *);
11994 /* Fill in the IP6 header for the ABORT */
11995 ip6_out->ip6_flow = ip6->ip6_flow;
11996 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11998 ip6_out->ip6_nxt = IPPROTO_UDP;
12000 ip6_out->ip6_nxt = IPPROTO_SCTP;
12002 ip6_out->ip6_src = ip6->ip6_dst;
12003 ip6_out->ip6_dst = ip6->ip6_src;
12005 iphlen_out = sizeof(*ip6_out);
12006 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
12010 /* Currently not supported */
12011 sctp_m_freem(mout);
12015 udp = (struct udphdr *)abm;
12017 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
12018 udp->uh_dport = port;
12019 /* set udp->uh_ulen later */
12021 iphlen_out += sizeof(struct udphdr);
12022 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
12024 abm->sh.src_port = sh->dest_port;
12025 abm->sh.dest_port = sh->src_port;
12026 abm->sh.checksum = 0;
12028 abm->sh.v_tag = sh->v_tag;
12029 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
12031 abm->sh.v_tag = htonl(vtag);
12032 abm->msg.ch.chunk_flags = 0;
12034 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
12037 struct mbuf *m_tmp = err_cause;
12040 /* get length of the err_cause chain */
12041 while (m_tmp != NULL) {
12042 err_len += SCTP_BUF_LEN(m_tmp);
12043 m_tmp = SCTP_BUF_NEXT(m_tmp);
12045 len = SCTP_BUF_LEN(mout) + err_len;
12047 /* need pad at end of chunk */
12048 uint32_t cpthis = 0;
12051 padlen = 4 - (len % 4);
12052 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
12055 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
12057 len = SCTP_BUF_LEN(mout);
12058 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
12061 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
12063 sctp_m_freem(mout);
12067 if (iph_out != NULL) {
12071 /* zap the stack pointer to the route */
12072 bzero(&ro, sizeof ro);
12074 udp->uh_ulen = htons(len - sizeof(struct ip));
12075 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
12077 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
12078 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
12079 /* set IPv4 length */
12080 iph_out->ip_len = len;
12082 #ifdef SCTP_PACKET_LOGGING
12083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12084 sctp_packet_log(mout, len);
12086 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12088 #if defined(SCTP_WITH_NO_CSUM)
12089 SCTP_STAT_INCR(sctps_sendnocrc);
12091 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
12092 SCTP_STAT_INCR(sctps_sendswcrc);
12094 SCTP_ENABLE_UDP_CSUM(o_pak);
12096 #if defined(SCTP_WITH_NO_CSUM)
12097 SCTP_STAT_INCR(sctps_sendnocrc);
12099 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12100 mout->m_pkthdr.csum_data = 0;
12101 SCTP_STAT_INCR(sctps_sendhwcrc);
12104 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12106 /* Free the route if we got one back */
12112 if (ip6_out != NULL) {
12113 struct route_in6 ro;
12115 struct ifnet *ifp = NULL;
12117 /* zap the stack pointer to the route */
12118 bzero(&ro, sizeof(ro));
12120 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12122 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
12123 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
12124 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12125 #ifdef SCTP_PACKET_LOGGING
12126 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12127 sctp_packet_log(mout, len);
12129 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12131 #if defined(SCTP_WITH_NO_CSUM)
12132 SCTP_STAT_INCR(sctps_sendnocrc);
12134 abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12135 SCTP_STAT_INCR(sctps_sendswcrc);
12137 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12138 udp->uh_sum = 0xffff;
12141 #if defined(SCTP_WITH_NO_CSUM)
12142 SCTP_STAT_INCR(sctps_sendnocrc);
12144 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12145 mout->m_pkthdr.csum_data = 0;
12146 SCTP_STAT_INCR(sctps_sendhwcrc);
12149 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
12151 /* Free the route if we got one back */
12156 SCTP_STAT_INCR(sctps_sendpackets);
12157 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12158 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12162 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
12163 uint32_t vrf_id, uint16_t port)
12165 struct mbuf *o_pak;
12166 struct sctphdr *sh, *sh_out;
12167 struct sctp_chunkhdr *ch;
12169 struct udphdr *udp = NULL;
12171 int iphlen_out, len;
12174 struct ip *iph_out;
12178 struct ip6_hdr *ip6, *ip6_out;
12182 iph = mtod(m, struct ip *);
12183 sh = (struct sctphdr *)((caddr_t)iph + iphlen);
12184 switch (iph->ip_v) {
12187 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
12191 case IPV6_VERSION >> 4:
12192 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
12202 len += sizeof(struct udphdr);
12204 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
12205 if (mout == NULL) {
12211 SCTP_BUF_RESV_UF(mout, max_linkhdr);
12212 SCTP_BUF_LEN(mout) = len;
12213 SCTP_BUF_NEXT(mout) = scm;
12214 if (m->m_flags & M_FLOWID) {
12215 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
12216 mout->m_flags |= M_FLOWID;
12224 switch (iph->ip_v) {
12227 iph_out = mtod(mout, struct ip *);
12229 /* Fill in the IP header for the ABORT */
12230 iph_out->ip_v = IPVERSION;
12231 iph_out->ip_hl = (sizeof(struct ip) / 4);
12232 iph_out->ip_tos = (u_char)0;
12233 iph_out->ip_id = 0;
12234 iph_out->ip_off = 0;
12235 iph_out->ip_ttl = MAXTTL;
12237 iph_out->ip_p = IPPROTO_UDP;
12239 iph_out->ip_p = IPPROTO_SCTP;
12241 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
12242 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
12243 /* let IP layer calculate this */
12244 iph_out->ip_sum = 0;
12246 iphlen_out = sizeof(struct ip);
12247 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
12251 case IPV6_VERSION >> 4:
12252 ip6 = (struct ip6_hdr *)iph;
12253 ip6_out = mtod(mout, struct ip6_hdr *);
12255 /* Fill in the IP6 header for the ABORT */
12256 ip6_out->ip6_flow = ip6->ip6_flow;
12257 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
12259 ip6_out->ip6_nxt = IPPROTO_UDP;
12261 ip6_out->ip6_nxt = IPPROTO_SCTP;
12263 ip6_out->ip6_src = ip6->ip6_dst;
12264 ip6_out->ip6_dst = ip6->ip6_src;
12266 iphlen_out = sizeof(struct ip6_hdr);
12267 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
12271 /* Currently not supported */
12272 sctp_m_freem(mout);
12276 udp = (struct udphdr *)sh_out;
12278 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
12279 udp->uh_dport = port;
12280 /* set udp->uh_ulen later */
12282 iphlen_out += sizeof(struct udphdr);
12283 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
12285 sh_out->src_port = sh->dest_port;
12286 sh_out->dest_port = sh->src_port;
12287 sh_out->v_tag = vtag;
12288 sh_out->checksum = 0;
12290 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
12291 ch->chunk_type = SCTP_OPERATION_ERROR;
12292 ch->chunk_flags = 0;
12295 struct mbuf *m_tmp = scm;
12298 /* get length of the err_cause chain */
12299 while (m_tmp != NULL) {
12300 cause_len += SCTP_BUF_LEN(m_tmp);
12301 m_tmp = SCTP_BUF_NEXT(m_tmp);
12303 len = SCTP_BUF_LEN(mout) + cause_len;
12304 if (cause_len % 4) {
12305 /* need pad at end of chunk */
12306 uint32_t cpthis = 0;
12309 padlen = 4 - (len % 4);
12310 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
12313 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
12315 len = SCTP_BUF_LEN(mout);
12316 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
12319 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
12321 sctp_m_freem(mout);
12325 if (iph_out != NULL) {
12329 /* zap the stack pointer to the route */
12330 bzero(&ro, sizeof ro);
12332 udp->uh_ulen = htons(len - sizeof(struct ip));
12333 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
12335 /* set IPv4 length */
12336 iph_out->ip_len = len;
12338 #ifdef SCTP_PACKET_LOGGING
12339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12340 sctp_packet_log(mout, len);
12342 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12344 #if defined(SCTP_WITH_NO_CSUM)
12345 SCTP_STAT_INCR(sctps_sendnocrc);
12347 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
12348 SCTP_STAT_INCR(sctps_sendswcrc);
12350 SCTP_ENABLE_UDP_CSUM(o_pak);
12352 #if defined(SCTP_WITH_NO_CSUM)
12353 SCTP_STAT_INCR(sctps_sendnocrc);
12355 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12356 mout->m_pkthdr.csum_data = 0;
12357 SCTP_STAT_INCR(sctps_sendhwcrc);
12360 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12362 /* Free the route if we got one back */
12368 if (ip6_out != NULL) {
12369 struct route_in6 ro;
12371 struct ifnet *ifp = NULL;
12373 /* zap the stack pointer to the route */
12374 bzero(&ro, sizeof(ro));
12376 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12378 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12379 #ifdef SCTP_PACKET_LOGGING
12380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12381 sctp_packet_log(mout, len);
12383 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12385 #if defined(SCTP_WITH_NO_CSUM)
12386 SCTP_STAT_INCR(sctps_sendnocrc);
12388 sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12389 SCTP_STAT_INCR(sctps_sendswcrc);
12391 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12392 udp->uh_sum = 0xffff;
12395 #if defined(SCTP_WITH_NO_CSUM)
12396 SCTP_STAT_INCR(sctps_sendnocrc);
12398 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12399 mout->m_pkthdr.csum_data = 0;
12400 SCTP_STAT_INCR(sctps_sendhwcrc);
12403 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
12405 /* Free the route if we got one back */
12410 SCTP_STAT_INCR(sctps_sendpackets);
12411 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12412 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12415 static struct mbuf *
12416 sctp_copy_resume(struct sctp_stream_queue_pending *sp,
12418 struct sctp_sndrcvinfo *srcv,
12420 int user_marks_eor,
12423 struct mbuf **new_tail)
12427 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12428 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12430 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12433 *sndout = m_length(m, NULL);
12434 *new_tail = m_last(m);
12440 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12447 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12449 if (sp->data == NULL) {
12450 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12453 sp->tail_mbuf = m_last(sp->data);
12459 static struct sctp_stream_queue_pending *
12460 sctp_copy_it_in(struct sctp_tcb *stcb,
12461 struct sctp_association *asoc,
12462 struct sctp_sndrcvinfo *srcv,
12464 struct sctp_nets *net,
12466 int user_marks_eor,
12471 * This routine must be very careful in its work. Protocol
12472 * processing is up and running so care must be taken to spl...()
12473 * when you need to do something that may effect the stcb/asoc. The
12474 * sb is locked however. When data is copied the protocol processing
12475 * should be enabled since this is a slower operation...
12477 struct sctp_stream_queue_pending *sp = NULL;
12481 /* Now can we send this? */
12482 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12483 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12484 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12485 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12486 /* got data while shutting down */
12487 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12488 *error = ECONNRESET;
12491 sctp_alloc_a_strmoq(stcb, sp);
12493 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12498 sp->sender_all_done = 0;
12499 sp->sinfo_flags = srcv->sinfo_flags;
12500 sp->timetolive = srcv->sinfo_timetolive;
12501 sp->ppid = srcv->sinfo_ppid;
12502 sp->context = srcv->sinfo_context;
12504 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12506 sp->stream = srcv->sinfo_stream;
12507 sp->length = min(uio->uio_resid, max_send_len);
12508 if ((sp->length == (uint32_t) uio->uio_resid) &&
12509 ((user_marks_eor == 0) ||
12510 (srcv->sinfo_flags & SCTP_EOF) ||
12511 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12512 sp->msg_is_complete = 1;
12514 sp->msg_is_complete = 0;
12516 sp->sender_all_done = 0;
12517 sp->some_taken = 0;
12518 sp->put_last_out = 0;
12519 resv_in_first = sizeof(struct sctp_data_chunk);
12520 sp->data = sp->tail_mbuf = NULL;
12521 if (sp->length == 0) {
12525 if (srcv->sinfo_keynumber_valid) {
12526 sp->auth_keyid = srcv->sinfo_keynumber;
12528 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12530 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12531 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12532 sp->holds_key_ref = 1;
12534 *error = sctp_copy_one(sp, uio, resv_in_first);
12537 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12540 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12542 atomic_add_int(&sp->net->ref_count, 1);
12546 sctp_set_prsctp_policy(sp);
12554 sctp_sosend(struct socket *so,
12555 struct sockaddr *addr,
12558 struct mbuf *control,
12563 int error, use_sndinfo = 0;
12564 struct sctp_sndrcvinfo sndrcvninfo;
12565 struct sockaddr *addr_to_use;
12567 #if defined(INET) && defined(INET6)
12568 struct sockaddr_in sin;
12573 /* process cmsg snd/rcv info (maybe a assoc-id) */
12574 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12575 sizeof(sndrcvninfo))) {
12580 addr_to_use = addr;
12581 #if defined(INET) && defined(INET6)
12582 if ((addr) && (addr->sa_family == AF_INET6)) {
12583 struct sockaddr_in6 *sin6;
12585 sin6 = (struct sockaddr_in6 *)addr;
12586 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12587 in6_sin6_2_sin(&sin, sin6);
12588 addr_to_use = (struct sockaddr *)&sin;
12592 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12595 use_sndinfo ? &sndrcvninfo : NULL
12603 sctp_lower_sosend(struct socket *so,
12604 struct sockaddr *addr,
12606 struct mbuf *i_pak,
12607 struct mbuf *control,
12609 struct sctp_sndrcvinfo *srcv
12614 unsigned int sndlen = 0, max_len;
12616 struct mbuf *top = NULL;
12617 int queue_only = 0, queue_only_for_init = 0;
12618 int free_cnt_applied = 0;
12620 int now_filled = 0;
12621 unsigned int inqueue_bytes = 0;
12622 struct sctp_block_entry be;
12623 struct sctp_inpcb *inp;
12624 struct sctp_tcb *stcb = NULL;
12625 struct timeval now;
12626 struct sctp_nets *net;
12627 struct sctp_association *asoc;
12628 struct sctp_inpcb *t_inp;
12629 int user_marks_eor;
12630 int create_lock_applied = 0;
12631 int nagle_applies = 0;
12632 int some_on_control = 0;
12633 int got_all_of_the_send = 0;
12634 int hold_tcblock = 0;
12635 int non_blocking = 0;
12636 uint32_t local_add_more, local_soresv = 0;
12638 uint16_t sinfo_flags;
12639 sctp_assoc_t sinfo_assoc_id;
12646 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12648 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12651 SCTP_RELEASE_PKT(i_pak);
12655 if ((uio == NULL) && (i_pak == NULL)) {
12656 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12659 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12660 atomic_add_int(&inp->total_sends, 1);
12662 if (uio->uio_resid < 0) {
12663 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12666 sndlen = uio->uio_resid;
12668 top = SCTP_HEADER_TO_CHAIN(i_pak);
12669 sndlen = SCTP_HEADER_LEN(i_pak);
12671 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12674 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12675 (inp->sctp_socket->so_qlimit)) {
12676 /* The listener can NOT send */
12677 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12682 * Pre-screen address, if one is given the sin-len
12683 * must be set correctly!
12686 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12688 switch (raddr->sa.sa_family) {
12691 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12692 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12696 port = raddr->sin.sin_port;
12701 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12702 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12706 port = raddr->sin6.sin6_port;
12710 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12711 error = EAFNOSUPPORT;
12718 sinfo_flags = srcv->sinfo_flags;
12719 sinfo_assoc_id = srcv->sinfo_assoc_id;
12720 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12721 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12722 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12726 if (srcv->sinfo_flags)
12727 SCTP_STAT_INCR(sctps_sends_with_flags);
12729 sinfo_flags = inp->def_send.sinfo_flags;
12730 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12732 if (sinfo_flags & SCTP_SENDALL) {
12733 /* its a sendall */
12734 error = sctp_sendall(inp, uio, top, srcv);
12738 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12739 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12743 /* now we must find the assoc */
12744 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12745 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12746 SCTP_INP_RLOCK(inp);
12747 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12748 if (stcb == NULL) {
12749 SCTP_INP_RUNLOCK(inp);
12750 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12754 SCTP_TCB_LOCK(stcb);
12756 SCTP_INP_RUNLOCK(inp);
12757 } else if (sinfo_assoc_id) {
12758 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12761 * Since we did not use findep we must
12762 * increment it, and if we don't find a tcb
12765 SCTP_INP_WLOCK(inp);
12766 SCTP_INP_INCR_REF(inp);
12767 SCTP_INP_WUNLOCK(inp);
12768 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12769 if (stcb == NULL) {
12770 SCTP_INP_WLOCK(inp);
12771 SCTP_INP_DECR_REF(inp);
12772 SCTP_INP_WUNLOCK(inp);
12777 if ((stcb == NULL) && (addr)) {
12778 /* Possible implicit send? */
12779 SCTP_ASOC_CREATE_LOCK(inp);
12780 create_lock_applied = 1;
12781 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12782 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12783 /* Should I really unlock ? */
12784 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12789 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12790 (addr->sa_family == AF_INET6)) {
12791 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12795 SCTP_INP_WLOCK(inp);
12796 SCTP_INP_INCR_REF(inp);
12797 SCTP_INP_WUNLOCK(inp);
12798 /* With the lock applied look again */
12799 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12800 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12801 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12803 if (stcb == NULL) {
12804 SCTP_INP_WLOCK(inp);
12805 SCTP_INP_DECR_REF(inp);
12806 SCTP_INP_WUNLOCK(inp);
12813 if (t_inp != inp) {
12814 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12819 if (stcb == NULL) {
12820 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
12821 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12822 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12826 if (addr == NULL) {
12827 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12832 * UDP style, we must go ahead and start the INIT
12837 if ((sinfo_flags & SCTP_ABORT) ||
12838 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12840 * User asks to abort a non-existant assoc,
12841 * or EOF a non-existant assoc with no data
12843 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12847 /* get an asoc/stcb struct */
12848 vrf_id = inp->def_vrf_id;
12850 if (create_lock_applied == 0) {
12851 panic("Error, should hold create lock and I don't?");
12854 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12857 if (stcb == NULL) {
12858 /* Error is setup for us in the call */
12862 if (create_lock_applied) {
12863 SCTP_ASOC_CREATE_UNLOCK(inp);
12864 create_lock_applied = 0;
12866 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12869 * Turn on queue only flag to prevent data from
12873 asoc = &stcb->asoc;
12874 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12875 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12877 /* initialize authentication params for the assoc */
12878 sctp_initialize_auth_params(inp, stcb);
12881 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12882 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12888 /* out with the INIT */
12889 queue_only_for_init = 1;
12891 * we may want to dig in after this call and adjust the MTU
12892 * value. It defaulted to 1500 (constant) but the ro
12893 * structure may now have an update and thus we may need to
12894 * change it BEFORE we append the message.
12898 asoc = &stcb->asoc;
12900 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12901 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12903 net = sctp_findnet(stcb, addr);
12906 if ((net == NULL) ||
12907 ((port != 0) && (port != stcb->rport))) {
12908 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12913 net = stcb->asoc.primary_destination;
12915 atomic_add_int(&stcb->total_sends, 1);
12916 /* Keep the stcb from being freed under our feet */
12917 atomic_add_int(&asoc->refcnt, 1);
12918 free_cnt_applied = 1;
12920 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12921 if (sndlen > asoc->smallest_mtu) {
12922 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12927 if ((SCTP_SO_IS_NBIO(so)
12928 || (flags & MSG_NBIO)
12932 /* would we block? */
12933 if (non_blocking) {
12934 if (hold_tcblock == 0) {
12935 SCTP_TCB_LOCK(stcb);
12938 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12939 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12940 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12941 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12942 if (sndlen > SCTP_SB_LIMIT_SND(so))
12945 error = EWOULDBLOCK;
12948 stcb->asoc.sb_send_resv += sndlen;
12949 SCTP_TCB_UNLOCK(stcb);
12952 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12954 local_soresv = sndlen;
12955 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12956 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12957 error = ECONNRESET;
12960 if (create_lock_applied) {
12961 SCTP_ASOC_CREATE_UNLOCK(inp);
12962 create_lock_applied = 0;
12964 if (asoc->stream_reset_outstanding) {
12966 * Can't queue any data while stream reset is underway.
12968 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12972 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12973 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12976 /* we are now done with all control */
12978 sctp_m_freem(control);
12981 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12982 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12983 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12984 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12985 if (srcv->sinfo_flags & SCTP_ABORT) {
12988 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12989 error = ECONNRESET;
12993 /* Ok, we will attempt a msgsnd :> */
12995 p->td_ru.ru_msgsnd++;
12997 /* Are we aborting? */
12998 if (srcv->sinfo_flags & SCTP_ABORT) {
13000 int tot_demand, tot_out = 0, max_out;
13002 SCTP_STAT_INCR(sctps_sends_with_abort);
13003 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
13004 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
13005 /* It has to be up before we abort */
13006 /* how big is the user initiated abort? */
13007 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13011 if (hold_tcblock) {
13012 SCTP_TCB_UNLOCK(stcb);
13016 struct mbuf *cntm = NULL;
13018 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
13022 tot_out += SCTP_BUF_LEN(cntm);
13023 cntm = SCTP_BUF_NEXT(cntm);
13026 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13028 /* Must fit in a MTU */
13030 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13031 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13033 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13037 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
13040 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13044 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13045 max_out -= sizeof(struct sctp_abort_msg);
13046 if (tot_out > max_out) {
13050 struct sctp_paramhdr *ph;
13052 /* now move forward the data pointer */
13053 ph = mtod(mm, struct sctp_paramhdr *);
13054 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13055 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
13057 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
13059 error = uiomove((caddr_t)ph, (int)tot_out, uio);
13062 * Here if we can't get his data we
13063 * still abort we just don't get to
13064 * send the users note :-0
13071 SCTP_BUF_NEXT(mm) = top;
13075 if (hold_tcblock == 0) {
13076 SCTP_TCB_LOCK(stcb);
13079 atomic_add_int(&stcb->asoc.refcnt, -1);
13080 free_cnt_applied = 0;
13081 /* release this lock, otherwise we hang on ourselves */
13082 sctp_abort_an_association(stcb->sctp_ep, stcb,
13083 SCTP_RESPONSE_TO_USER_REQ,
13084 mm, SCTP_SO_LOCKED);
13085 /* now relock the stcb so everything is sane */
13089 * In this case top is already chained to mm avoid double
13090 * free, since we free it below if top != NULL and driver
13091 * would free it after sending the packet out
13098 /* Calculate the maximum we can send */
13099 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13100 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13101 if (non_blocking) {
13102 /* we already checked for non-blocking above. */
13105 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13110 if (hold_tcblock) {
13111 SCTP_TCB_UNLOCK(stcb);
13114 /* Is the stream no. valid? */
13115 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13116 /* Invalid stream number */
13117 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13121 if (asoc->strmout == NULL) {
13122 /* huh? software error */
13123 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13127 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13128 if ((user_marks_eor == 0) &&
13129 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13130 /* It will NEVER fit */
13131 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13135 if ((uio == NULL) && user_marks_eor) {
13137 * We do not support eeor mode for
13138 * sending with mbuf chains (like sendfile).
13140 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13144 if (user_marks_eor) {
13145 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13148 * For non-eeor the whole message must fit in
13149 * the socket send buffer.
13151 local_add_more = sndlen;
13154 if (non_blocking) {
13155 goto skip_preblock;
13157 if (((max_len <= local_add_more) &&
13158 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13160 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13161 /* No room right now ! */
13162 SOCKBUF_LOCK(&so->so_snd);
13163 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13164 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13165 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13166 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13167 (unsigned int)SCTP_SB_LIMIT_SND(so),
13170 stcb->asoc.stream_queue_cnt,
13171 stcb->asoc.chunks_on_out_queue,
13172 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13173 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13174 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
13177 stcb->block_entry = &be;
13178 error = sbwait(&so->so_snd);
13179 stcb->block_entry = NULL;
13180 if (error || so->so_error || be.error) {
13183 error = so->so_error;
13188 SOCKBUF_UNLOCK(&so->so_snd);
13191 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13192 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13193 so, asoc, stcb->asoc.total_output_queue_size);
13195 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13198 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13200 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13201 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13205 SOCKBUF_UNLOCK(&so->so_snd);
13208 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13212 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13213 * case NOTE: uio will be null when top/mbuf is passed
13216 if (srcv->sinfo_flags & SCTP_EOF) {
13217 got_all_of_the_send = 1;
13220 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13226 struct sctp_stream_queue_pending *sp;
13227 struct sctp_stream_out *strm;
13230 SCTP_TCB_SEND_LOCK(stcb);
13231 if ((asoc->stream_locked) &&
13232 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13233 SCTP_TCB_SEND_UNLOCK(stcb);
13234 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13238 SCTP_TCB_SEND_UNLOCK(stcb);
13240 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13241 if (strm->last_msg_incomplete == 0) {
13243 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
13244 if ((sp == NULL) || (error)) {
13247 SCTP_TCB_SEND_LOCK(stcb);
13248 if (sp->msg_is_complete) {
13249 strm->last_msg_incomplete = 0;
13250 asoc->stream_locked = 0;
13253 * Just got locked to this guy in case of an
13256 strm->last_msg_incomplete = 1;
13257 asoc->stream_locked = 1;
13258 asoc->stream_locked_on = srcv->sinfo_stream;
13259 sp->sender_all_done = 0;
13261 sctp_snd_sb_alloc(stcb, sp->length);
13262 atomic_add_int(&asoc->stream_queue_cnt, 1);
13263 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
13264 sp->strseq = strm->next_sequence_sent;
13265 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
13266 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
13267 (uintptr_t) stcb, sp->length,
13268 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
13270 strm->next_sequence_sent++;
13272 SCTP_STAT_INCR(sctps_sends_with_unord);
13274 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13275 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13276 SCTP_TCB_SEND_UNLOCK(stcb);
13278 SCTP_TCB_SEND_LOCK(stcb);
13279 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13280 SCTP_TCB_SEND_UNLOCK(stcb);
13282 /* ???? Huh ??? last msg is gone */
13284 panic("Warning: Last msg marked incomplete, yet nothing left?");
13286 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13287 strm->last_msg_incomplete = 0;
13293 while (uio->uio_resid > 0) {
13294 /* How much room do we have? */
13295 struct mbuf *new_tail, *mm;
13297 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13298 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13302 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13303 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13304 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13307 if (hold_tcblock) {
13308 SCTP_TCB_UNLOCK(stcb);
13311 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
13312 if ((mm == NULL) || error) {
13318 /* Update the mbuf and count */
13319 SCTP_TCB_SEND_LOCK(stcb);
13320 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13322 * we need to get out. Peer probably
13326 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13327 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13328 error = ECONNRESET;
13330 SCTP_TCB_SEND_UNLOCK(stcb);
13333 if (sp->tail_mbuf) {
13334 /* tack it to the end */
13335 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13336 sp->tail_mbuf = new_tail;
13338 /* A stolen mbuf */
13340 sp->tail_mbuf = new_tail;
13342 sctp_snd_sb_alloc(stcb, sndout);
13343 atomic_add_int(&sp->length, sndout);
13346 /* Did we reach EOR? */
13347 if ((uio->uio_resid == 0) &&
13348 ((user_marks_eor == 0) ||
13349 (srcv->sinfo_flags & SCTP_EOF) ||
13350 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13351 sp->msg_is_complete = 1;
13353 sp->msg_is_complete = 0;
13355 SCTP_TCB_SEND_UNLOCK(stcb);
13357 if (uio->uio_resid == 0) {
13362 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13364 * This is ugly but we must assure locking
13367 if (hold_tcblock == 0) {
13368 SCTP_TCB_LOCK(stcb);
13371 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13372 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13373 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13374 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13380 SCTP_TCB_UNLOCK(stcb);
13383 /* wait for space now */
13384 if (non_blocking) {
13385 /* Non-blocking io in place out */
13388 /* What about the INIT, send it maybe */
13389 if (queue_only_for_init) {
13390 if (hold_tcblock == 0) {
13391 SCTP_TCB_LOCK(stcb);
13394 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13395 /* a collision took us forward? */
13398 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13399 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13403 if ((net->flight_size > net->cwnd) &&
13404 (asoc->sctp_cmt_on_off == 0)) {
13405 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13407 } else if (asoc->ifp_had_enobuf) {
13408 SCTP_STAT_INCR(sctps_ifnomemqueued);
13409 if (net->flight_size > (2 * net->mtu)) {
13412 asoc->ifp_had_enobuf = 0;
13414 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13415 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13416 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13417 (stcb->asoc.total_flight > 0) &&
13418 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13419 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13422 * Ok, Nagle is set on and we have data outstanding.
13423 * Don't send anything and let SACKs drive out the
13424 * data unless wen have a "full" segment to send.
13426 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13427 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13429 SCTP_STAT_INCR(sctps_naglequeued);
13432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13433 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13434 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13436 SCTP_STAT_INCR(sctps_naglesent);
13439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13441 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13442 nagle_applies, un_sent);
13443 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13444 stcb->asoc.total_flight,
13445 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13447 if (queue_only_for_init)
13448 queue_only_for_init = 0;
13449 if ((queue_only == 0) && (nagle_applies == 0)) {
13451 * need to start chunk output
13452 * before blocking.. note that if
13453 * a lock is already applied, then
13454 * the input via the net is happening
13455 * and I don't need to start output :-D
13457 if (hold_tcblock == 0) {
13458 if (SCTP_TCB_TRYLOCK(stcb)) {
13460 sctp_chunk_output(inp,
13462 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13465 sctp_chunk_output(inp,
13467 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13469 if (hold_tcblock == 1) {
13470 SCTP_TCB_UNLOCK(stcb);
13474 SOCKBUF_LOCK(&so->so_snd);
13476 * This is a bit strange, but I think it will
13477 * work. The total_output_queue_size is locked and
13478 * protected by the TCB_LOCK, which we just released.
13479 * There is a race that can occur between releasing it
13480 * above, and me getting the socket lock, where sacks
13481 * come in but we have not put the SB_WAIT on the
13482 * so_snd buffer to get the wakeup. After the LOCK
13483 * is applied the sack_processing will also need to
13484 * LOCK the so->so_snd to do the actual sowwakeup(). So
13485 * once we have the socket buffer lock if we recheck the
13486 * size we KNOW we will get to sleep safely with the
13487 * wakeup flag in place.
13489 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13490 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13492 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13493 so, asoc, uio->uio_resid);
13496 stcb->block_entry = &be;
13497 error = sbwait(&so->so_snd);
13498 stcb->block_entry = NULL;
13500 if (error || so->so_error || be.error) {
13503 error = so->so_error;
13508 SOCKBUF_UNLOCK(&so->so_snd);
13511 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13512 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13513 so, asoc, stcb->asoc.total_output_queue_size);
13516 SOCKBUF_UNLOCK(&so->so_snd);
13517 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13521 SCTP_TCB_SEND_LOCK(stcb);
13523 if (sp->msg_is_complete == 0) {
13524 strm->last_msg_incomplete = 1;
13525 asoc->stream_locked = 1;
13526 asoc->stream_locked_on = srcv->sinfo_stream;
13528 sp->sender_all_done = 1;
13529 strm->last_msg_incomplete = 0;
13530 asoc->stream_locked = 0;
13533 SCTP_PRINTF("Huh no sp TSNH?\n");
13534 strm->last_msg_incomplete = 0;
13535 asoc->stream_locked = 0;
13537 SCTP_TCB_SEND_UNLOCK(stcb);
13538 if (uio->uio_resid == 0) {
13539 got_all_of_the_send = 1;
13542 /* We send in a 0, since we do NOT have any locks */
13543 error = sctp_msg_append(stcb, net, top, srcv, 0);
13545 if (srcv->sinfo_flags & SCTP_EOF) {
13547 * This should only happen for Panda for the mbuf
13548 * send case, which does NOT yet support EEOR mode.
13549 * Thus, we can just set this flag to do the proper
13552 got_all_of_the_send = 1;
13560 if ((srcv->sinfo_flags & SCTP_EOF) &&
13561 (got_all_of_the_send == 1) &&
13562 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
13565 SCTP_STAT_INCR(sctps_sends_with_eof);
13567 if (hold_tcblock == 0) {
13568 SCTP_TCB_LOCK(stcb);
13571 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13572 if (TAILQ_EMPTY(&asoc->send_queue) &&
13573 TAILQ_EMPTY(&asoc->sent_queue) &&
13575 if (asoc->locked_on_sending) {
13578 /* there is nothing queued to send, so I'm done... */
13579 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13580 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13581 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13582 /* only send SHUTDOWN the first time through */
13583 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
13584 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13585 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13587 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13588 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13589 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13590 asoc->primary_destination);
13591 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13592 asoc->primary_destination);
13596 * we still got (or just got) data to send, so set
13600 * XXX sockets draft says that SCTP_EOF should be
13601 * sent with no data. currently, we will allow user
13602 * data to be sent first and move to
13605 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13606 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13607 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13608 if (hold_tcblock == 0) {
13609 SCTP_TCB_LOCK(stcb);
13612 if (asoc->locked_on_sending) {
13613 /* Locked to send out the data */
13614 struct sctp_stream_queue_pending *sp;
13616 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13618 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13619 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13622 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13623 if (TAILQ_EMPTY(&asoc->send_queue) &&
13624 TAILQ_EMPTY(&asoc->sent_queue) &&
13625 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13627 if (free_cnt_applied) {
13628 atomic_add_int(&stcb->asoc.refcnt, -1);
13629 free_cnt_applied = 0;
13631 sctp_abort_an_association(stcb->sctp_ep, stcb,
13632 SCTP_RESPONSE_TO_USER_REQ,
13633 NULL, SCTP_SO_LOCKED);
13635 * now relock the stcb so everything
13642 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13643 asoc->primary_destination);
13644 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13649 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13650 some_on_control = 1;
13652 if (queue_only_for_init) {
13653 if (hold_tcblock == 0) {
13654 SCTP_TCB_LOCK(stcb);
13657 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13658 /* a collision took us forward? */
13661 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13662 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13666 if ((net->flight_size > net->cwnd) &&
13667 (stcb->asoc.sctp_cmt_on_off == 0)) {
13668 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13670 } else if (asoc->ifp_had_enobuf) {
13671 SCTP_STAT_INCR(sctps_ifnomemqueued);
13672 if (net->flight_size > (2 * net->mtu)) {
13675 asoc->ifp_had_enobuf = 0;
13677 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13678 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13679 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13680 (stcb->asoc.total_flight > 0) &&
13681 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13682 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13684 * Ok, Nagle is set on and we have data outstanding.
13685 * Don't send anything and let SACKs drive out the
13686 * data unless wen have a "full" segment to send.
13688 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13689 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13691 SCTP_STAT_INCR(sctps_naglequeued);
13694 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13695 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13696 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13698 SCTP_STAT_INCR(sctps_naglesent);
13701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13702 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13703 nagle_applies, un_sent);
13704 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13705 stcb->asoc.total_flight,
13706 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13708 if (queue_only_for_init)
13709 queue_only_for_init = 0;
13710 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13711 /* we can attempt to send too. */
13712 if (hold_tcblock == 0) {
13714 * If there is activity recv'ing sacks no need to
13717 if (SCTP_TCB_TRYLOCK(stcb)) {
13718 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13722 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13724 } else if ((queue_only == 0) &&
13725 (stcb->asoc.peers_rwnd == 0) &&
13726 (stcb->asoc.total_flight == 0)) {
13727 /* We get to have a probe outstanding */
13728 if (hold_tcblock == 0) {
13730 SCTP_TCB_LOCK(stcb);
13732 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13733 } else if (some_on_control) {
13734 int num_out, reason, frag_point;
13736 /* Here we do control only */
13737 if (hold_tcblock == 0) {
13739 SCTP_TCB_LOCK(stcb);
13741 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13742 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13743 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13745 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13746 queue_only, stcb->asoc.peers_rwnd, un_sent,
13747 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13748 stcb->asoc.total_output_queue_size, error);
13753 if (local_soresv && stcb) {
13754 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13757 if (create_lock_applied) {
13758 SCTP_ASOC_CREATE_UNLOCK(inp);
13759 create_lock_applied = 0;
13761 if ((stcb) && hold_tcblock) {
13762 SCTP_TCB_UNLOCK(stcb);
13764 if (stcb && free_cnt_applied) {
13765 atomic_add_int(&stcb->asoc.refcnt, -1);
13769 if (mtx_owned(&stcb->tcb_mtx)) {
13770 panic("Leaving with tcb mtx owned?");
13772 if (mtx_owned(&stcb->tcb_send_mtx)) {
13773 panic("Leaving with tcb send mtx owned?");
13779 sctp_validate_no_locks(inp);
13781 printf("Warning - inp is NULL so cant validate locks\n");
13788 sctp_m_freem(control);
13795 * generate an AUTHentication chunk, if required
13798 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13799 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13800 struct sctp_tcb *stcb, uint8_t chunk)
13802 struct mbuf *m_auth;
13803 struct sctp_auth_chunk *auth;
13806 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13810 /* sysctl disabled auth? */
13811 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13814 /* peer doesn't do auth... */
13815 if (!stcb->asoc.peer_supports_auth) {
13818 /* does the requested chunk require auth? */
13819 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13822 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13823 if (m_auth == NULL) {
13827 /* reserve some space if this will be the first mbuf */
13829 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13830 /* fill in the AUTH chunk details */
13831 auth = mtod(m_auth, struct sctp_auth_chunk *);
13832 bzero(auth, sizeof(*auth));
13833 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13834 auth->ch.chunk_flags = 0;
13835 chunk_len = sizeof(*auth) +
13836 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13837 auth->ch.chunk_length = htons(chunk_len);
13838 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13839 /* key id and hmac digest will be computed and filled in upon send */
13841 /* save the offset where the auth was inserted into the chain */
13848 *offset += SCTP_BUF_LEN(cn);
13849 cn = SCTP_BUF_NEXT(cn);
13854 /* update length and return pointer to the auth chunk */
13855 SCTP_BUF_LEN(m_auth) = chunk_len;
13856 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13857 if (auth_ret != NULL)
13865 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13867 struct nd_prefix *pfx = NULL;
13868 struct nd_pfxrouter *pfxrtr = NULL;
13869 struct sockaddr_in6 gw6;
13871 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13874 /* get prefix entry of address */
13875 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13876 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13878 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13879 &src6->sin6_addr, &pfx->ndpr_mask))
13882 /* no prefix entry in the prefix list */
13884 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13885 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13888 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13889 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13891 /* search installed gateway from prefix entry */
13892 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
13893 pfxrtr->pfr_next) {
13894 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13895 gw6.sin6_family = AF_INET6;
13896 gw6.sin6_len = sizeof(struct sockaddr_in6);
13897 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13898 sizeof(struct in6_addr));
13899 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13900 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13901 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13902 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13903 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13904 ro->ro_rt->rt_gateway)) {
13905 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13909 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13916 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13919 struct sockaddr_in *sin, *mask;
13920 struct ifaddr *ifa;
13921 struct in_addr srcnetaddr, gwnetaddr;
13923 if (ro == NULL || ro->ro_rt == NULL ||
13924 sifa->address.sa.sa_family != AF_INET) {
13927 ifa = (struct ifaddr *)sifa->ifa;
13928 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13929 sin = (struct sockaddr_in *)&sifa->address.sin;
13930 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13931 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13932 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13933 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13935 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13936 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13937 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13938 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13939 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13940 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {