2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_indata.h>
52 #include <netinet/sctp_bsd_addr.h>
53 #include <netinet/sctp_input.h>
54 #include <netinet/sctp_crc32.h>
55 #include <netinet/udp.h>
56 #include <machine/in_cksum.h>
60 #define SCTP_MAX_GAPS_INARRAY 4
62 uint8_t right_edge; /* mergable on the right edge */
63 uint8_t left_edge; /* mergable on the left edge */
66 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
69 struct sack_track sack_array[256] = {
70 {0, 0, 0, 0, /* 0x00 */
77 {1, 0, 1, 0, /* 0x01 */
84 {0, 0, 1, 0, /* 0x02 */
91 {1, 0, 1, 0, /* 0x03 */
98 {0, 0, 1, 0, /* 0x04 */
105 {1, 0, 2, 0, /* 0x05 */
112 {0, 0, 1, 0, /* 0x06 */
119 {1, 0, 1, 0, /* 0x07 */
126 {0, 0, 1, 0, /* 0x08 */
133 {1, 0, 2, 0, /* 0x09 */
140 {0, 0, 2, 0, /* 0x0a */
147 {1, 0, 2, 0, /* 0x0b */
154 {0, 0, 1, 0, /* 0x0c */
161 {1, 0, 2, 0, /* 0x0d */
168 {0, 0, 1, 0, /* 0x0e */
175 {1, 0, 1, 0, /* 0x0f */
182 {0, 0, 1, 0, /* 0x10 */
189 {1, 0, 2, 0, /* 0x11 */
196 {0, 0, 2, 0, /* 0x12 */
203 {1, 0, 2, 0, /* 0x13 */
210 {0, 0, 2, 0, /* 0x14 */
217 {1, 0, 3, 0, /* 0x15 */
224 {0, 0, 2, 0, /* 0x16 */
231 {1, 0, 2, 0, /* 0x17 */
238 {0, 0, 1, 0, /* 0x18 */
245 {1, 0, 2, 0, /* 0x19 */
252 {0, 0, 2, 0, /* 0x1a */
259 {1, 0, 2, 0, /* 0x1b */
266 {0, 0, 1, 0, /* 0x1c */
273 {1, 0, 2, 0, /* 0x1d */
280 {0, 0, 1, 0, /* 0x1e */
287 {1, 0, 1, 0, /* 0x1f */
294 {0, 0, 1, 0, /* 0x20 */
301 {1, 0, 2, 0, /* 0x21 */
308 {0, 0, 2, 0, /* 0x22 */
315 {1, 0, 2, 0, /* 0x23 */
322 {0, 0, 2, 0, /* 0x24 */
329 {1, 0, 3, 0, /* 0x25 */
336 {0, 0, 2, 0, /* 0x26 */
343 {1, 0, 2, 0, /* 0x27 */
350 {0, 0, 2, 0, /* 0x28 */
357 {1, 0, 3, 0, /* 0x29 */
364 {0, 0, 3, 0, /* 0x2a */
371 {1, 0, 3, 0, /* 0x2b */
378 {0, 0, 2, 0, /* 0x2c */
385 {1, 0, 3, 0, /* 0x2d */
392 {0, 0, 2, 0, /* 0x2e */
399 {1, 0, 2, 0, /* 0x2f */
406 {0, 0, 1, 0, /* 0x30 */
413 {1, 0, 2, 0, /* 0x31 */
420 {0, 0, 2, 0, /* 0x32 */
427 {1, 0, 2, 0, /* 0x33 */
434 {0, 0, 2, 0, /* 0x34 */
441 {1, 0, 3, 0, /* 0x35 */
448 {0, 0, 2, 0, /* 0x36 */
455 {1, 0, 2, 0, /* 0x37 */
462 {0, 0, 1, 0, /* 0x38 */
469 {1, 0, 2, 0, /* 0x39 */
476 {0, 0, 2, 0, /* 0x3a */
483 {1, 0, 2, 0, /* 0x3b */
490 {0, 0, 1, 0, /* 0x3c */
497 {1, 0, 2, 0, /* 0x3d */
504 {0, 0, 1, 0, /* 0x3e */
511 {1, 0, 1, 0, /* 0x3f */
518 {0, 0, 1, 0, /* 0x40 */
525 {1, 0, 2, 0, /* 0x41 */
532 {0, 0, 2, 0, /* 0x42 */
539 {1, 0, 2, 0, /* 0x43 */
546 {0, 0, 2, 0, /* 0x44 */
553 {1, 0, 3, 0, /* 0x45 */
560 {0, 0, 2, 0, /* 0x46 */
567 {1, 0, 2, 0, /* 0x47 */
574 {0, 0, 2, 0, /* 0x48 */
581 {1, 0, 3, 0, /* 0x49 */
588 {0, 0, 3, 0, /* 0x4a */
595 {1, 0, 3, 0, /* 0x4b */
602 {0, 0, 2, 0, /* 0x4c */
609 {1, 0, 3, 0, /* 0x4d */
616 {0, 0, 2, 0, /* 0x4e */
623 {1, 0, 2, 0, /* 0x4f */
630 {0, 0, 2, 0, /* 0x50 */
637 {1, 0, 3, 0, /* 0x51 */
644 {0, 0, 3, 0, /* 0x52 */
651 {1, 0, 3, 0, /* 0x53 */
658 {0, 0, 3, 0, /* 0x54 */
665 {1, 0, 4, 0, /* 0x55 */
672 {0, 0, 3, 0, /* 0x56 */
679 {1, 0, 3, 0, /* 0x57 */
686 {0, 0, 2, 0, /* 0x58 */
693 {1, 0, 3, 0, /* 0x59 */
700 {0, 0, 3, 0, /* 0x5a */
707 {1, 0, 3, 0, /* 0x5b */
714 {0, 0, 2, 0, /* 0x5c */
721 {1, 0, 3, 0, /* 0x5d */
728 {0, 0, 2, 0, /* 0x5e */
735 {1, 0, 2, 0, /* 0x5f */
742 {0, 0, 1, 0, /* 0x60 */
749 {1, 0, 2, 0, /* 0x61 */
756 {0, 0, 2, 0, /* 0x62 */
763 {1, 0, 2, 0, /* 0x63 */
770 {0, 0, 2, 0, /* 0x64 */
777 {1, 0, 3, 0, /* 0x65 */
784 {0, 0, 2, 0, /* 0x66 */
791 {1, 0, 2, 0, /* 0x67 */
798 {0, 0, 2, 0, /* 0x68 */
805 {1, 0, 3, 0, /* 0x69 */
812 {0, 0, 3, 0, /* 0x6a */
819 {1, 0, 3, 0, /* 0x6b */
826 {0, 0, 2, 0, /* 0x6c */
833 {1, 0, 3, 0, /* 0x6d */
840 {0, 0, 2, 0, /* 0x6e */
847 {1, 0, 2, 0, /* 0x6f */
854 {0, 0, 1, 0, /* 0x70 */
861 {1, 0, 2, 0, /* 0x71 */
868 {0, 0, 2, 0, /* 0x72 */
875 {1, 0, 2, 0, /* 0x73 */
882 {0, 0, 2, 0, /* 0x74 */
889 {1, 0, 3, 0, /* 0x75 */
896 {0, 0, 2, 0, /* 0x76 */
903 {1, 0, 2, 0, /* 0x77 */
910 {0, 0, 1, 0, /* 0x78 */
917 {1, 0, 2, 0, /* 0x79 */
924 {0, 0, 2, 0, /* 0x7a */
931 {1, 0, 2, 0, /* 0x7b */
938 {0, 0, 1, 0, /* 0x7c */
945 {1, 0, 2, 0, /* 0x7d */
952 {0, 0, 1, 0, /* 0x7e */
959 {1, 0, 1, 0, /* 0x7f */
966 {0, 1, 1, 0, /* 0x80 */
973 {1, 1, 2, 0, /* 0x81 */
980 {0, 1, 2, 0, /* 0x82 */
987 {1, 1, 2, 0, /* 0x83 */
994 {0, 1, 2, 0, /* 0x84 */
1001 {1, 1, 3, 0, /* 0x85 */
1008 {0, 1, 2, 0, /* 0x86 */
1015 {1, 1, 2, 0, /* 0x87 */
1022 {0, 1, 2, 0, /* 0x88 */
1029 {1, 1, 3, 0, /* 0x89 */
1036 {0, 1, 3, 0, /* 0x8a */
1043 {1, 1, 3, 0, /* 0x8b */
1050 {0, 1, 2, 0, /* 0x8c */
1057 {1, 1, 3, 0, /* 0x8d */
1064 {0, 1, 2, 0, /* 0x8e */
1071 {1, 1, 2, 0, /* 0x8f */
1078 {0, 1, 2, 0, /* 0x90 */
1085 {1, 1, 3, 0, /* 0x91 */
1092 {0, 1, 3, 0, /* 0x92 */
1099 {1, 1, 3, 0, /* 0x93 */
1106 {0, 1, 3, 0, /* 0x94 */
1113 {1, 1, 4, 0, /* 0x95 */
1120 {0, 1, 3, 0, /* 0x96 */
1127 {1, 1, 3, 0, /* 0x97 */
1134 {0, 1, 2, 0, /* 0x98 */
1141 {1, 1, 3, 0, /* 0x99 */
1148 {0, 1, 3, 0, /* 0x9a */
1155 {1, 1, 3, 0, /* 0x9b */
1162 {0, 1, 2, 0, /* 0x9c */
1169 {1, 1, 3, 0, /* 0x9d */
1176 {0, 1, 2, 0, /* 0x9e */
1183 {1, 1, 2, 0, /* 0x9f */
1190 {0, 1, 2, 0, /* 0xa0 */
1197 {1, 1, 3, 0, /* 0xa1 */
1204 {0, 1, 3, 0, /* 0xa2 */
1211 {1, 1, 3, 0, /* 0xa3 */
1218 {0, 1, 3, 0, /* 0xa4 */
1225 {1, 1, 4, 0, /* 0xa5 */
1232 {0, 1, 3, 0, /* 0xa6 */
1239 {1, 1, 3, 0, /* 0xa7 */
1246 {0, 1, 3, 0, /* 0xa8 */
1253 {1, 1, 4, 0, /* 0xa9 */
1260 {0, 1, 4, 0, /* 0xaa */
1267 {1, 1, 4, 0, /* 0xab */
1274 {0, 1, 3, 0, /* 0xac */
1281 {1, 1, 4, 0, /* 0xad */
1288 {0, 1, 3, 0, /* 0xae */
1295 {1, 1, 3, 0, /* 0xaf */
1302 {0, 1, 2, 0, /* 0xb0 */
1309 {1, 1, 3, 0, /* 0xb1 */
1316 {0, 1, 3, 0, /* 0xb2 */
1323 {1, 1, 3, 0, /* 0xb3 */
1330 {0, 1, 3, 0, /* 0xb4 */
1337 {1, 1, 4, 0, /* 0xb5 */
1344 {0, 1, 3, 0, /* 0xb6 */
1351 {1, 1, 3, 0, /* 0xb7 */
1358 {0, 1, 2, 0, /* 0xb8 */
1365 {1, 1, 3, 0, /* 0xb9 */
1372 {0, 1, 3, 0, /* 0xba */
1379 {1, 1, 3, 0, /* 0xbb */
1386 {0, 1, 2, 0, /* 0xbc */
1393 {1, 1, 3, 0, /* 0xbd */
1400 {0, 1, 2, 0, /* 0xbe */
1407 {1, 1, 2, 0, /* 0xbf */
1414 {0, 1, 1, 0, /* 0xc0 */
1421 {1, 1, 2, 0, /* 0xc1 */
1428 {0, 1, 2, 0, /* 0xc2 */
1435 {1, 1, 2, 0, /* 0xc3 */
1442 {0, 1, 2, 0, /* 0xc4 */
1449 {1, 1, 3, 0, /* 0xc5 */
1456 {0, 1, 2, 0, /* 0xc6 */
1463 {1, 1, 2, 0, /* 0xc7 */
1470 {0, 1, 2, 0, /* 0xc8 */
1477 {1, 1, 3, 0, /* 0xc9 */
1484 {0, 1, 3, 0, /* 0xca */
1491 {1, 1, 3, 0, /* 0xcb */
1498 {0, 1, 2, 0, /* 0xcc */
1505 {1, 1, 3, 0, /* 0xcd */
1512 {0, 1, 2, 0, /* 0xce */
1519 {1, 1, 2, 0, /* 0xcf */
1526 {0, 1, 2, 0, /* 0xd0 */
1533 {1, 1, 3, 0, /* 0xd1 */
1540 {0, 1, 3, 0, /* 0xd2 */
1547 {1, 1, 3, 0, /* 0xd3 */
1554 {0, 1, 3, 0, /* 0xd4 */
1561 {1, 1, 4, 0, /* 0xd5 */
1568 {0, 1, 3, 0, /* 0xd6 */
1575 {1, 1, 3, 0, /* 0xd7 */
1582 {0, 1, 2, 0, /* 0xd8 */
1589 {1, 1, 3, 0, /* 0xd9 */
1596 {0, 1, 3, 0, /* 0xda */
1603 {1, 1, 3, 0, /* 0xdb */
1610 {0, 1, 2, 0, /* 0xdc */
1617 {1, 1, 3, 0, /* 0xdd */
1624 {0, 1, 2, 0, /* 0xde */
1631 {1, 1, 2, 0, /* 0xdf */
1638 {0, 1, 1, 0, /* 0xe0 */
1645 {1, 1, 2, 0, /* 0xe1 */
1652 {0, 1, 2, 0, /* 0xe2 */
1659 {1, 1, 2, 0, /* 0xe3 */
1666 {0, 1, 2, 0, /* 0xe4 */
1673 {1, 1, 3, 0, /* 0xe5 */
1680 {0, 1, 2, 0, /* 0xe6 */
1687 {1, 1, 2, 0, /* 0xe7 */
1694 {0, 1, 2, 0, /* 0xe8 */
1701 {1, 1, 3, 0, /* 0xe9 */
1708 {0, 1, 3, 0, /* 0xea */
1715 {1, 1, 3, 0, /* 0xeb */
1722 {0, 1, 2, 0, /* 0xec */
1729 {1, 1, 3, 0, /* 0xed */
1736 {0, 1, 2, 0, /* 0xee */
1743 {1, 1, 2, 0, /* 0xef */
1750 {0, 1, 1, 0, /* 0xf0 */
1757 {1, 1, 2, 0, /* 0xf1 */
1764 {0, 1, 2, 0, /* 0xf2 */
1771 {1, 1, 2, 0, /* 0xf3 */
1778 {0, 1, 2, 0, /* 0xf4 */
1785 {1, 1, 3, 0, /* 0xf5 */
1792 {0, 1, 2, 0, /* 0xf6 */
1799 {1, 1, 2, 0, /* 0xf7 */
1806 {0, 1, 1, 0, /* 0xf8 */
1813 {1, 1, 2, 0, /* 0xf9 */
1820 {0, 1, 2, 0, /* 0xfa */
1827 {1, 1, 2, 0, /* 0xfb */
1834 {0, 1, 1, 0, /* 0xfc */
1841 {1, 1, 2, 0, /* 0xfd */
1848 {0, 1, 1, 0, /* 0xfe */
1855 {1, 1, 1, 0, /* 0xff */
1866 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1867 int ipv4_addr_legal,
1868 int ipv6_addr_legal,
1870 int ipv4_local_scope,
1871 int local_scope SCTP_UNUSED,/* XXX */
1875 if ((loopback_scope == 0) &&
1876 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1878 * skip loopback if not in scope *
1882 switch (ifa->address.sa.sa_family) {
1885 if (ipv4_addr_legal) {
1886 struct sockaddr_in *sin;
1888 sin = (struct sockaddr_in *)&ifa->address.sin;
1889 if (sin->sin_addr.s_addr == 0) {
1890 /* not in scope , unspecified */
1893 if ((ipv4_local_scope == 0) &&
1894 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1895 /* private address not in scope */
1905 if (ipv6_addr_legal) {
1906 struct sockaddr_in6 *sin6;
1909 * Must update the flags, bummer, which means any
1910 * IFA locks must now be applied HERE <->
1913 sctp_gather_internal_ifa_flags(ifa);
1915 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1918 /* ok to use deprecated addresses? */
1919 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1920 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1921 /* skip unspecifed addresses */
1924 if ( /* (local_scope == 0) && */
1925 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1928 if ((site_scope == 0) &&
1929 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1943 static struct mbuf *
1944 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1946 struct sctp_paramhdr *parmh;
1950 switch (ifa->address.sa.sa_family) {
1953 len = sizeof(struct sctp_ipv4addr_param);
1958 len = sizeof(struct sctp_ipv6addr_param);
1964 if (M_TRAILINGSPACE(m) >= len) {
1965 /* easy side we just drop it on the end */
1966 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1969 /* Need more space */
1971 while (SCTP_BUF_NEXT(mret) != NULL) {
1972 mret = SCTP_BUF_NEXT(mret);
1974 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1975 if (SCTP_BUF_NEXT(mret) == NULL) {
1976 /* We are hosed, can't add more addresses */
1979 mret = SCTP_BUF_NEXT(mret);
1980 parmh = mtod(mret, struct sctp_paramhdr *);
1982 /* now add the parameter */
1983 switch (ifa->address.sa.sa_family) {
1987 struct sctp_ipv4addr_param *ipv4p;
1988 struct sockaddr_in *sin;
1990 sin = (struct sockaddr_in *)&ifa->address.sin;
1991 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1992 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1993 parmh->param_length = htons(len);
1994 ipv4p->addr = sin->sin_addr.s_addr;
1995 SCTP_BUF_LEN(mret) += len;
2002 struct sctp_ipv6addr_param *ipv6p;
2003 struct sockaddr_in6 *sin6;
2005 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
2006 ipv6p = (struct sctp_ipv6addr_param *)parmh;
2007 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
2008 parmh->param_length = htons(len);
2009 memcpy(ipv6p->addr, &sin6->sin6_addr,
2010 sizeof(ipv6p->addr));
2011 /* clear embedded scope in the address */
2012 in6_clearscope((struct in6_addr *)ipv6p->addr);
2013 SCTP_BUF_LEN(mret) += len;
2025 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2026 struct sctp_scoping *scope,
2027 struct mbuf *m_at, int cnt_inits_to)
2029 struct sctp_vrf *vrf = NULL;
2030 int cnt, limit_out = 0, total_count;
2033 vrf_id = inp->def_vrf_id;
2034 SCTP_IPI_ADDR_RLOCK();
2035 vrf = sctp_find_vrf(vrf_id);
2037 SCTP_IPI_ADDR_RUNLOCK();
2040 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2041 struct sctp_ifa *sctp_ifap;
2042 struct sctp_ifn *sctp_ifnp;
2045 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2047 cnt = SCTP_ADDRESS_LIMIT;
2050 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2051 if ((scope->loopback_scope == 0) &&
2052 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2054 * Skip loopback devices if loopback_scope
2059 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2060 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2063 if (sctp_is_address_in_scope(sctp_ifap,
2064 scope->ipv4_addr_legal,
2065 scope->ipv6_addr_legal,
2066 scope->loopback_scope,
2067 scope->ipv4_local_scope,
2069 scope->site_scope, 1) == 0) {
2073 if (cnt > SCTP_ADDRESS_LIMIT) {
2077 if (cnt > SCTP_ADDRESS_LIMIT) {
2084 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2086 if ((scope->loopback_scope == 0) &&
2087 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2089 * Skip loopback devices if
2090 * loopback_scope not set
2094 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2095 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2098 if (sctp_is_address_in_scope(sctp_ifap,
2099 scope->ipv4_addr_legal,
2100 scope->ipv6_addr_legal,
2101 scope->loopback_scope,
2102 scope->ipv4_local_scope,
2104 scope->site_scope, 0) == 0) {
2107 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2118 if (total_count > SCTP_ADDRESS_LIMIT) {
2119 /* No more addresses */
2127 struct sctp_laddr *laddr;
2130 /* First, how many ? */
2131 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2132 if (laddr->ifa == NULL) {
2135 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2137 * Address being deleted by the system, dont
2141 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2143 * Address being deleted on this ep don't
2148 if (sctp_is_address_in_scope(laddr->ifa,
2149 scope->ipv4_addr_legal,
2150 scope->ipv6_addr_legal,
2151 scope->loopback_scope,
2152 scope->ipv4_local_scope,
2154 scope->site_scope, 1) == 0) {
2160 * To get through a NAT we only list addresses if we have
2161 * more than one. That way if you just bind a single address
2162 * we let the source of the init dictate our address.
2166 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2167 if (laddr->ifa == NULL) {
2170 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2173 if (sctp_is_address_in_scope(laddr->ifa,
2174 scope->ipv4_addr_legal,
2175 scope->ipv6_addr_legal,
2176 scope->loopback_scope,
2177 scope->ipv4_local_scope,
2179 scope->site_scope, 0) == 0) {
2182 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2184 if (cnt >= SCTP_ADDRESS_LIMIT) {
2190 SCTP_IPI_ADDR_RUNLOCK();
2194 static struct sctp_ifa *
2195 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2196 uint8_t dest_is_loop,
2197 uint8_t dest_is_priv,
2200 uint8_t dest_is_global = 0;
2202 /* dest_is_priv is true if destination is a private address */
2203 /* dest_is_loop is true if destination is a loopback addresses */
2206 * Here we determine if its a preferred address. A preferred address
2207 * means it is the same scope or higher scope then the destination.
2208 * L = loopback, P = private, G = global
2209 * -----------------------------------------
2210 * src | dest | result
2211 * ----------------------------------------
2213 * -----------------------------------------
2214 * P | L | yes-v4 no-v6
2215 * -----------------------------------------
2216 * G | L | yes-v4 no-v6
2217 * -----------------------------------------
2219 * -----------------------------------------
2221 * -----------------------------------------
2223 * -----------------------------------------
2225 * -----------------------------------------
2227 * -----------------------------------------
2229 * -----------------------------------------
2232 if (ifa->address.sa.sa_family != fam) {
2233 /* forget mis-matched family */
2236 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2239 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2240 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2241 /* Ok the address may be ok */
2243 if (fam == AF_INET6) {
2244 /* ok to use deprecated addresses? no lets not! */
2245 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2246 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2249 if (ifa->src_is_priv && !ifa->src_is_loop) {
2251 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2255 if (ifa->src_is_glob) {
2257 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2264 * Now that we know what is what, implement or table this could in
2265 * theory be done slicker (it used to be), but this is
2266 * straightforward and easier to validate :-)
2268 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2269 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2270 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2271 dest_is_loop, dest_is_priv, dest_is_global);
2273 if ((ifa->src_is_loop) && (dest_is_priv)) {
2274 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2277 if ((ifa->src_is_glob) && (dest_is_priv)) {
2278 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2281 if ((ifa->src_is_loop) && (dest_is_global)) {
2282 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2285 if ((ifa->src_is_priv) && (dest_is_global)) {
2286 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2289 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2290 /* its a preferred address */
2294 static struct sctp_ifa *
2295 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2296 uint8_t dest_is_loop,
2297 uint8_t dest_is_priv,
2300 uint8_t dest_is_global = 0;
2303 * Here we determine if its a acceptable address. A acceptable
2304 * address means it is the same scope or higher scope but we can
2305 * allow for NAT which means its ok to have a global dest and a
2308 * L = loopback, P = private, G = global
2309 * -----------------------------------------
2310 * src | dest | result
2311 * -----------------------------------------
2313 * -----------------------------------------
2314 * P | L | yes-v4 no-v6
2315 * -----------------------------------------
2317 * -----------------------------------------
2319 * -----------------------------------------
2321 * -----------------------------------------
2322 * G | P | yes - May not work
2323 * -----------------------------------------
2325 * -----------------------------------------
2326 * P | G | yes - May not work
2327 * -----------------------------------------
2329 * -----------------------------------------
2332 if (ifa->address.sa.sa_family != fam) {
2333 /* forget non matching family */
2334 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2335 ifa->address.sa.sa_family, fam);
2338 /* Ok the address may be ok */
2339 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2340 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2341 dest_is_loop, dest_is_priv);
2342 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2346 if (fam == AF_INET6) {
2347 /* ok to use deprecated addresses? */
2348 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2351 if (ifa->src_is_priv) {
2352 /* Special case, linklocal to loop */
2359 * Now that we know what is what, implement our table. This could in
2360 * theory be done slicker (it used to be), but this is
2361 * straightforward and easier to validate :-)
2363 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2366 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2369 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2372 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2375 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2376 /* its an acceptable address */
2381 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2383 struct sctp_laddr *laddr;
2386 /* There are no restrictions, no TCB :-) */
2389 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2390 if (laddr->ifa == NULL) {
2391 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2395 if (laddr->ifa == ifa) {
2396 /* Yes it is on the list */
2405 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2407 struct sctp_laddr *laddr;
2411 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2412 if (laddr->ifa == NULL) {
2413 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2417 if ((laddr->ifa == ifa) && laddr->action == 0)
2426 static struct sctp_ifa *
2427 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2430 int non_asoc_addr_ok,
2431 uint8_t dest_is_priv,
2432 uint8_t dest_is_loop,
2435 struct sctp_laddr *laddr, *starting_point;
2438 struct sctp_ifn *sctp_ifn;
2439 struct sctp_ifa *sctp_ifa, *sifa;
2440 struct sctp_vrf *vrf;
2443 vrf = sctp_find_vrf(vrf_id);
2447 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2448 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2449 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2451 * first question, is the ifn we will emit on in our list, if so, we
2452 * want such an address. Note that we first looked for a preferred
2456 /* is a preferred one on the interface we route out? */
2457 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2458 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2459 (non_asoc_addr_ok == 0))
2461 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2466 if (sctp_is_addr_in_ep(inp, sifa)) {
2467 atomic_add_int(&sifa->refcount, 1);
2473 * ok, now we now need to find one on the list of the addresses. We
2474 * can't get one on the emitting interface so let's find first a
2475 * preferred one. If not that an acceptable one otherwise... we
2478 starting_point = inp->next_addr_touse;
2480 if (inp->next_addr_touse == NULL) {
2481 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2484 for (laddr = inp->next_addr_touse; laddr;
2485 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2486 if (laddr->ifa == NULL) {
2487 /* address has been removed */
2490 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2491 /* address is being deleted */
2494 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2498 atomic_add_int(&sifa->refcount, 1);
2501 if (resettotop == 0) {
2502 inp->next_addr_touse = NULL;
2505 inp->next_addr_touse = starting_point;
2508 if (inp->next_addr_touse == NULL) {
2509 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2512 /* ok, what about an acceptable address in the inp */
2513 for (laddr = inp->next_addr_touse; laddr;
2514 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2515 if (laddr->ifa == NULL) {
2516 /* address has been removed */
2519 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2520 /* address is being deleted */
2523 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2527 atomic_add_int(&sifa->refcount, 1);
2530 if (resettotop == 0) {
2531 inp->next_addr_touse = NULL;
2532 goto once_again_too;
2535 * no address bound can be a source for the destination we are in
2543 static struct sctp_ifa *
2544 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2545 struct sctp_tcb *stcb,
2548 uint8_t dest_is_priv,
2549 uint8_t dest_is_loop,
2550 int non_asoc_addr_ok,
2553 struct sctp_laddr *laddr, *starting_point;
2555 struct sctp_ifn *sctp_ifn;
2556 struct sctp_ifa *sctp_ifa, *sifa;
2557 uint8_t start_at_beginning = 0;
2558 struct sctp_vrf *vrf;
2562 * first question, is the ifn we will emit on in our list, if so, we
2565 vrf = sctp_find_vrf(vrf_id);
2569 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2570 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2571 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2574 * first question, is the ifn we will emit on in our list? If so,
2575 * we want that one. First we look for a preferred. Second, we go
2576 * for an acceptable.
2579 /* first try for a preferred address on the ep */
2580 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2581 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2583 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2584 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2587 if (((non_asoc_addr_ok == 0) &&
2588 (sctp_is_addr_restricted(stcb, sifa))) ||
2589 (non_asoc_addr_ok &&
2590 (sctp_is_addr_restricted(stcb, sifa)) &&
2591 (!sctp_is_addr_pending(stcb, sifa)))) {
2592 /* on the no-no list */
2595 atomic_add_int(&sifa->refcount, 1);
2599 /* next try for an acceptable address on the ep */
2600 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2601 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2603 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2604 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2607 if (((non_asoc_addr_ok == 0) &&
2608 (sctp_is_addr_restricted(stcb, sifa))) ||
2609 (non_asoc_addr_ok &&
2610 (sctp_is_addr_restricted(stcb, sifa)) &&
2611 (!sctp_is_addr_pending(stcb, sifa)))) {
2612 /* on the no-no list */
2615 atomic_add_int(&sifa->refcount, 1);
2622 * if we can't find one like that then we must look at all addresses
2623 * bound to pick one at first preferable then secondly acceptable.
2625 starting_point = stcb->asoc.last_used_address;
2627 if (stcb->asoc.last_used_address == NULL) {
2628 start_at_beginning = 1;
2629 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2631 /* search beginning with the last used address */
2632 for (laddr = stcb->asoc.last_used_address; laddr;
2633 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2634 if (laddr->ifa == NULL) {
2635 /* address has been removed */
2638 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2639 /* address is being deleted */
2642 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2645 if (((non_asoc_addr_ok == 0) &&
2646 (sctp_is_addr_restricted(stcb, sifa))) ||
2647 (non_asoc_addr_ok &&
2648 (sctp_is_addr_restricted(stcb, sifa)) &&
2649 (!sctp_is_addr_pending(stcb, sifa)))) {
2650 /* on the no-no list */
2653 stcb->asoc.last_used_address = laddr;
2654 atomic_add_int(&sifa->refcount, 1);
2657 if (start_at_beginning == 0) {
2658 stcb->asoc.last_used_address = NULL;
2659 goto sctp_from_the_top;
2661 /* now try for any higher scope than the destination */
2662 stcb->asoc.last_used_address = starting_point;
2663 start_at_beginning = 0;
2665 if (stcb->asoc.last_used_address == NULL) {
2666 start_at_beginning = 1;
2667 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2669 /* search beginning with the last used address */
2670 for (laddr = stcb->asoc.last_used_address; laddr;
2671 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2672 if (laddr->ifa == NULL) {
2673 /* address has been removed */
2676 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2677 /* address is being deleted */
2680 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2684 if (((non_asoc_addr_ok == 0) &&
2685 (sctp_is_addr_restricted(stcb, sifa))) ||
2686 (non_asoc_addr_ok &&
2687 (sctp_is_addr_restricted(stcb, sifa)) &&
2688 (!sctp_is_addr_pending(stcb, sifa)))) {
2689 /* on the no-no list */
2692 stcb->asoc.last_used_address = laddr;
2693 atomic_add_int(&sifa->refcount, 1);
2696 if (start_at_beginning == 0) {
2697 stcb->asoc.last_used_address = NULL;
2698 goto sctp_from_the_top2;
2703 static struct sctp_ifa *
2704 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2705 struct sctp_tcb *stcb,
2706 int non_asoc_addr_ok,
2707 uint8_t dest_is_loop,
2708 uint8_t dest_is_priv,
2714 struct sctp_ifa *ifa, *sifa;
2715 int num_eligible_addr = 0;
2718 struct sockaddr_in6 sin6, lsa6;
2720 if (fam == AF_INET6) {
2721 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2722 (void)sa6_recoverscope(&sin6);
2725 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2726 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2727 (non_asoc_addr_ok == 0))
2729 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2734 if (fam == AF_INET6 &&
2736 sifa->src_is_loop && sifa->src_is_priv) {
2738 * don't allow fe80::1 to be a src on loop ::1, we
2739 * don't list it to the peer so we will get an
2744 if (fam == AF_INET6 &&
2745 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2746 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2748 * link-local <-> link-local must belong to the same
2751 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2752 (void)sa6_recoverscope(&lsa6);
2753 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2760 * Check if the IPv6 address matches to next-hop. In the
2761 * mobile case, old IPv6 address may be not deleted from the
2762 * interface. Then, the interface has previous and new
2763 * addresses. We should use one corresponding to the
2764 * next-hop. (by micchie)
2767 if (stcb && fam == AF_INET6 &&
2768 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2769 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2776 /* Avoid topologically incorrect IPv4 address */
2777 if (stcb && fam == AF_INET &&
2778 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2779 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2785 if (sctp_is_address_in_scope(ifa,
2786 stcb->asoc.ipv4_addr_legal,
2787 stcb->asoc.ipv6_addr_legal,
2788 stcb->asoc.loopback_scope,
2789 stcb->asoc.ipv4_local_scope,
2790 stcb->asoc.local_scope,
2791 stcb->asoc.site_scope, 0) == 0) {
2794 if (((non_asoc_addr_ok == 0) &&
2795 (sctp_is_addr_restricted(stcb, sifa))) ||
2796 (non_asoc_addr_ok &&
2797 (sctp_is_addr_restricted(stcb, sifa)) &&
2798 (!sctp_is_addr_pending(stcb, sifa)))) {
2800 * It is restricted for some reason..
2801 * probably not yet added.
2806 if (num_eligible_addr >= addr_wanted) {
2809 num_eligible_addr++;
2816 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2817 struct sctp_tcb *stcb,
2818 int non_asoc_addr_ok,
2819 uint8_t dest_is_loop,
2820 uint8_t dest_is_priv,
2823 struct sctp_ifa *ifa, *sifa;
2824 int num_eligible_addr = 0;
2826 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2827 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2828 (non_asoc_addr_ok == 0)) {
2831 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2837 if (sctp_is_address_in_scope(ifa,
2838 stcb->asoc.ipv4_addr_legal,
2839 stcb->asoc.ipv6_addr_legal,
2840 stcb->asoc.loopback_scope,
2841 stcb->asoc.ipv4_local_scope,
2842 stcb->asoc.local_scope,
2843 stcb->asoc.site_scope, 0) == 0) {
2846 if (((non_asoc_addr_ok == 0) &&
2847 (sctp_is_addr_restricted(stcb, sifa))) ||
2848 (non_asoc_addr_ok &&
2849 (sctp_is_addr_restricted(stcb, sifa)) &&
2850 (!sctp_is_addr_pending(stcb, sifa)))) {
2852 * It is restricted for some reason..
2853 * probably not yet added.
2858 num_eligible_addr++;
2860 return (num_eligible_addr);
2863 static struct sctp_ifa *
2864 sctp_choose_boundall(struct sctp_tcb *stcb,
2865 struct sctp_nets *net,
2868 uint8_t dest_is_priv,
2869 uint8_t dest_is_loop,
2870 int non_asoc_addr_ok,
2873 int cur_addr_num = 0, num_preferred = 0;
2875 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2876 struct sctp_ifa *sctp_ifa, *sifa;
2878 struct sctp_vrf *vrf;
2886 * For boundall we can use any address in the association.
2887 * If non_asoc_addr_ok is set we can use any address (at least in
2888 * theory). So we look for preferred addresses first. If we find one,
2889 * we use it. Otherwise we next try to get an address on the
2890 * interface, which we should be able to do (unless non_asoc_addr_ok
2891 * is false and we are routed out that way). In these cases where we
2892 * can't use the address of the interface we go through all the
2893 * ifn's looking for an address we can use and fill that in. Punting
2894 * means we send back address 0, which will probably cause problems
2895 * actually since then IP will fill in the address of the route ifn,
2896 * which means we probably already rejected it.. i.e. here comes an
2899 vrf = sctp_find_vrf(vrf_id);
2903 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2904 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2905 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2906 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2907 if (sctp_ifn == NULL) {
2908 /* ?? We don't have this guy ?? */
2909 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2910 goto bound_all_plan_b;
2912 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2913 ifn_index, sctp_ifn->ifn_name);
2916 cur_addr_num = net->indx_of_eligible_next_to_use;
2918 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2923 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2924 num_preferred, sctp_ifn->ifn_name);
2925 if (num_preferred == 0) {
2927 * no eligible addresses, we must use some other interface
2928 * address if we can find one.
2930 goto bound_all_plan_b;
2933 * Ok we have num_eligible_addr set with how many we can use, this
2934 * may vary from call to call due to addresses being deprecated
2937 if (cur_addr_num >= num_preferred) {
2941 * select the nth address from the list (where cur_addr_num is the
2942 * nth) and 0 is the first one, 1 is the second one etc...
2944 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2946 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2947 dest_is_priv, cur_addr_num, fam, ro);
2949 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2951 atomic_add_int(&sctp_ifa->refcount, 1);
2953 /* save off where the next one we will want */
2954 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2959 * plan_b: Look at all interfaces and find a preferred address. If
2960 * no preferred fall through to plan_c.
2963 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2964 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2965 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2966 sctp_ifn->ifn_name);
2967 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2968 /* wrong base scope */
2969 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2972 if ((sctp_ifn == looked_at) && looked_at) {
2973 /* already looked at this guy */
2974 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2977 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2978 dest_is_loop, dest_is_priv, fam);
2979 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2980 "Found ifn:%p %d preferred source addresses\n",
2981 ifn, num_preferred);
2982 if (num_preferred == 0) {
2983 /* None on this interface. */
2984 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2987 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2988 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2989 num_preferred, sctp_ifn, cur_addr_num);
2992 * Ok we have num_eligible_addr set with how many we can
2993 * use, this may vary from call to call due to addresses
2994 * being deprecated etc..
2996 if (cur_addr_num >= num_preferred) {
2999 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
3000 dest_is_priv, cur_addr_num, fam, ro);
3004 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3005 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3007 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3008 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3009 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3010 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3012 atomic_add_int(&sifa->refcount, 1);
3016 again_with_private_addresses_allowed:
3018 /* plan_c: do we have an acceptable address on the emit interface */
3020 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3021 if (emit_ifn == NULL) {
3022 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3025 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3026 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", sctp_ifa);
3027 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3028 (non_asoc_addr_ok == 0)) {
3029 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3032 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3035 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3039 if (sctp_is_address_in_scope(sifa,
3040 stcb->asoc.ipv4_addr_legal,
3041 stcb->asoc.ipv6_addr_legal,
3042 stcb->asoc.loopback_scope,
3043 stcb->asoc.ipv4_local_scope,
3044 stcb->asoc.local_scope,
3045 stcb->asoc.site_scope, 0) == 0) {
3046 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3050 if (((non_asoc_addr_ok == 0) &&
3051 (sctp_is_addr_restricted(stcb, sifa))) ||
3052 (non_asoc_addr_ok &&
3053 (sctp_is_addr_restricted(stcb, sifa)) &&
3054 (!sctp_is_addr_pending(stcb, sifa)))) {
3056 * It is restricted for some reason..
3057 * probably not yet added.
3059 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n");
3064 printf("Stcb is null - no print\n");
3066 atomic_add_int(&sifa->refcount, 1);
3071 * plan_d: We are in trouble. No preferred address on the emit
3072 * interface. And not even a preferred address on all interfaces. Go
3073 * out and see if we can find an acceptable address somewhere
3074 * amongst all interfaces.
3076 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", looked_at);
3077 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3078 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3079 /* wrong base scope */
3082 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3083 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3084 (non_asoc_addr_ok == 0))
3086 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3092 if (sctp_is_address_in_scope(sifa,
3093 stcb->asoc.ipv4_addr_legal,
3094 stcb->asoc.ipv6_addr_legal,
3095 stcb->asoc.loopback_scope,
3096 stcb->asoc.ipv4_local_scope,
3097 stcb->asoc.local_scope,
3098 stcb->asoc.site_scope, 0) == 0) {
3102 if (((non_asoc_addr_ok == 0) &&
3103 (sctp_is_addr_restricted(stcb, sifa))) ||
3104 (non_asoc_addr_ok &&
3105 (sctp_is_addr_restricted(stcb, sifa)) &&
3106 (!sctp_is_addr_pending(stcb, sifa)))) {
3108 * It is restricted for some
3109 * reason.. probably not yet added.
3119 if ((retried == 0) && (stcb->asoc.ipv4_local_scope == 0)) {
3120 stcb->asoc.ipv4_local_scope = 1;
3122 goto again_with_private_addresses_allowed;
3123 } else if (retried == 1) {
3124 stcb->asoc.ipv4_local_scope = 0;
3131 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3132 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3133 /* wrong base scope */
3136 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3137 struct sctp_ifa *tmp_sifa;
3139 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3140 (non_asoc_addr_ok == 0))
3142 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3145 if (tmp_sifa == NULL) {
3148 if (tmp_sifa == sifa) {
3152 if (sctp_is_address_in_scope(tmp_sifa,
3153 stcb->asoc.ipv4_addr_legal,
3154 stcb->asoc.ipv6_addr_legal,
3155 stcb->asoc.loopback_scope,
3156 stcb->asoc.ipv4_local_scope,
3157 stcb->asoc.local_scope,
3158 stcb->asoc.site_scope, 0) == 0) {
3161 if (((non_asoc_addr_ok == 0) &&
3162 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3163 (non_asoc_addr_ok &&
3164 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3165 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3175 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3176 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3177 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3182 atomic_add_int(&sifa->refcount, 1);
3190 /* tcb may be NULL */
3192 sctp_source_address_selection(struct sctp_inpcb *inp,
3193 struct sctp_tcb *stcb,
3195 struct sctp_nets *net,
3196 int non_asoc_addr_ok, uint32_t vrf_id)
3198 struct sctp_ifa *answer;
3199 uint8_t dest_is_priv, dest_is_loop;
3203 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3207 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3212 * Rules: - Find the route if needed, cache if I can. - Look at
3213 * interface address in route, Is it in the bound list. If so we
3214 * have the best source. - If not we must rotate amongst the
3219 * Do we need to pay attention to scope. We can have a private address
3220 * or a global address we are sourcing or sending to. So if we draw
3222 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3224 * ------------------------------------------
3225 * source * dest * result
3226 * -----------------------------------------
3227 * <a> Private * Global * NAT
3228 * -----------------------------------------
3229 * <b> Private * Private * No problem
3230 * -----------------------------------------
3231 * <c> Global * Private * Huh, How will this work?
3232 * -----------------------------------------
3233 * <d> Global * Global * No Problem
3234 *------------------------------------------
3235 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3237 *------------------------------------------
3238 * source * dest * result
3239 * -----------------------------------------
3240 * <a> Linklocal * Global *
3241 * -----------------------------------------
3242 * <b> Linklocal * Linklocal * No problem
3243 * -----------------------------------------
3244 * <c> Global * Linklocal * Huh, How will this work?
3245 * -----------------------------------------
3246 * <d> Global * Global * No Problem
3247 *------------------------------------------
3248 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3250 * And then we add to that what happens if there are multiple addresses
3251 * assigned to an interface. Remember the ifa on a ifn is a linked
3252 * list of addresses. So one interface can have more than one IP
3253 * address. What happens if we have both a private and a global
3254 * address? Do we then use context of destination to sort out which
3255 * one is best? And what about NAT's sending P->G may get you a NAT
3256 * translation, or should you select the G thats on the interface in
3261 * - count the number of addresses on the interface.
3262 * - if it is one, no problem except case <c>.
3263 * For <a> we will assume a NAT out there.
3264 * - if there are more than one, then we need to worry about scope P
3265 * or G. We should prefer G -> G and P -> P if possible.
3266 * Then as a secondary fall back to mixed types G->P being a last
3268 * - The above all works for bound all, but bound specific we need to
3269 * use the same concept but instead only consider the bound
3270 * addresses. If the bound set is NOT assigned to the interface then
3271 * we must use rotation amongst the bound addresses..
3273 if (ro->ro_rt == NULL) {
3275 * Need a route to cache.
3277 SCTP_RTALLOC(ro, vrf_id);
3279 if (ro->ro_rt == NULL) {
3282 fam = ro->ro_dst.sa_family;
3283 dest_is_priv = dest_is_loop = 0;
3284 /* Setup our scopes for the destination */
3288 /* Scope based on outbound address */
3289 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3292 /* mark it as local */
3293 net->addr_is_local = 1;
3295 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3302 /* Scope based on outbound address */
3303 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3304 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3306 * If the address is a loopback address, which
3307 * consists of "::1" OR "fe80::1%lo0", we are
3308 * loopback scope. But we don't use dest_is_priv
3309 * (link local addresses).
3313 /* mark it as local */
3314 net->addr_is_local = 1;
3316 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3322 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3323 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3324 SCTP_IPI_ADDR_RLOCK();
3325 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3329 answer = sctp_choose_boundall(stcb, net, ro, vrf_id,
3330 dest_is_priv, dest_is_loop,
3331 non_asoc_addr_ok, fam);
3332 SCTP_IPI_ADDR_RUNLOCK();
3339 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3340 vrf_id, dest_is_priv,
3342 non_asoc_addr_ok, fam);
3344 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3349 SCTP_IPI_ADDR_RUNLOCK();
3354 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3357 int tlen, at, found;
3358 struct sctp_sndinfo sndinfo;
3359 struct sctp_prinfo prinfo;
3360 struct sctp_authinfo authinfo;
3362 tlen = SCTP_BUF_LEN(control);
3366 * Independent of how many mbufs, find the c_type inside the control
3367 * structure and copy out the data.
3370 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3371 /* There is not enough room for one more. */
3374 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3375 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3376 /* We dont't have a complete CMSG header. */
3379 if (((int)cmh.cmsg_len + at) > tlen) {
3380 /* We don't have the complete CMSG. */
3383 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3384 ((c_type == cmh.cmsg_type) ||
3385 ((c_type == SCTP_SNDRCV) &&
3386 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3387 (cmh.cmsg_type == SCTP_PRINFO) ||
3388 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3389 if (c_type == cmh.cmsg_type) {
3390 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3393 /* It is exactly what we want. Copy it out. */
3394 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), cpsize, (caddr_t)data);
3397 struct sctp_sndrcvinfo *sndrcvinfo;
3399 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3401 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3404 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3406 switch (cmh.cmsg_type) {
3408 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_sndinfo)) {
3411 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3412 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3413 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3414 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3415 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3416 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3419 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_prinfo)) {
3422 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3423 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3424 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3427 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_authinfo)) {
3430 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3431 sndrcvinfo->sinfo_keynumber_valid = 1;
3432 sndrcvinfo->sinfo_keynumber = authinfo.auth_keyid;
3440 at += CMSG_ALIGN(cmh.cmsg_len);
3446 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3450 struct sctp_initmsg initmsg;
3453 struct sockaddr_in sin;
3457 struct sockaddr_in6 sin6;
3461 tlen = SCTP_BUF_LEN(control);
3464 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3465 /* There is not enough room for one more. */
3469 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3470 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3471 /* We dont't have a complete CMSG header. */
3475 if (((int)cmh.cmsg_len + at) > tlen) {
3476 /* We don't have the complete CMSG. */
3480 if (cmh.cmsg_level == IPPROTO_SCTP) {
3481 switch (cmh.cmsg_type) {
3483 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct sctp_initmsg)) {
3487 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3488 if (initmsg.sinit_max_attempts)
3489 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3490 if (initmsg.sinit_num_ostreams)
3491 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3492 if (initmsg.sinit_max_instreams)
3493 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3494 if (initmsg.sinit_max_init_timeo)
3495 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3496 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3497 struct sctp_stream_out *tmp_str;
3500 /* Default is NOT correct */
3501 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3502 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3503 SCTP_TCB_UNLOCK(stcb);
3504 SCTP_MALLOC(tmp_str,
3505 struct sctp_stream_out *,
3506 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3508 SCTP_TCB_LOCK(stcb);
3509 if (tmp_str != NULL) {
3510 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3511 stcb->asoc.strmout = tmp_str;
3512 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3514 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3516 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3517 stcb->asoc.strmout[i].next_sequence_sent = 0;
3518 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3519 stcb->asoc.strmout[i].stream_no = i;
3520 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3521 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL);
3526 case SCTP_DSTADDRV4:
3527 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3531 memset(&sin, 0, sizeof(struct sockaddr_in));
3532 sin.sin_family = AF_INET;
3533 sin.sin_len = sizeof(struct sockaddr_in);
3534 sin.sin_port = stcb->rport;
3535 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3536 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3537 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3538 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3542 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3543 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3550 case SCTP_DSTADDRV6:
3551 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3555 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3556 sin6.sin6_family = AF_INET6;
3557 sin6.sin6_len = sizeof(struct sockaddr_in6);
3558 sin6.sin6_port = stcb->rport;
3559 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3560 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3561 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3566 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3567 in6_sin6_2_sin(&sin, &sin6);
3568 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3569 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3570 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3574 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL,
3575 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3581 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL,
3582 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3592 at += CMSG_ALIGN(cmh.cmsg_len);
3597 static struct sctp_tcb *
3598 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3600 struct mbuf *control,
3601 struct sctp_nets **net_p,
3606 struct sctp_tcb *stcb;
3607 struct sockaddr *addr;
3610 struct sockaddr_in sin;
3614 struct sockaddr_in6 sin6;
3618 tlen = SCTP_BUF_LEN(control);
3621 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3622 /* There is not enough room for one more. */
3626 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3627 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(struct cmsghdr))) {
3628 /* We dont't have a complete CMSG header. */
3632 if (((int)cmh.cmsg_len + at) > tlen) {
3633 /* We don't have the complete CMSG. */
3637 if (cmh.cmsg_level == IPPROTO_SCTP) {
3638 switch (cmh.cmsg_type) {
3640 case SCTP_DSTADDRV4:
3641 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in_addr)) {
3645 memset(&sin, 0, sizeof(struct sockaddr_in));
3646 sin.sin_family = AF_INET;
3647 sin.sin_len = sizeof(struct sockaddr_in);
3648 sin.sin_port = port;
3649 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3650 addr = (struct sockaddr *)&sin;
3654 case SCTP_DSTADDRV6:
3655 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < sizeof(struct in6_addr)) {
3659 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3660 sin6.sin6_family = AF_INET6;
3661 sin6.sin6_len = sizeof(struct sockaddr_in6);
3662 sin6.sin6_port = port;
3663 m_copydata(control, at + CMSG_ALIGN(sizeof(struct cmsghdr)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3665 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3666 in6_sin6_2_sin(&sin, &sin6);
3667 addr = (struct sockaddr *)&sin;
3670 addr = (struct sockaddr *)&sin6;
3678 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3684 at += CMSG_ALIGN(cmh.cmsg_len);
3689 static struct mbuf *
3690 sctp_add_cookie(struct mbuf *init, int init_offset,
3691 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3693 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3694 struct sctp_state_cookie *stc;
3695 struct sctp_paramhdr *ph;
3701 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3702 sizeof(struct sctp_paramhdr)), 0,
3703 M_DONTWAIT, 1, MT_DATA);
3707 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3708 if (copy_init == NULL) {
3712 #ifdef SCTP_MBUF_LOGGING
3713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3718 if (SCTP_BUF_IS_EXTENDED(mat)) {
3719 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3721 mat = SCTP_BUF_NEXT(mat);
3725 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3727 if (copy_initack == NULL) {
3729 sctp_m_freem(copy_init);
3732 #ifdef SCTP_MBUF_LOGGING
3733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3738 if (SCTP_BUF_IS_EXTENDED(mat)) {
3739 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3741 mat = SCTP_BUF_NEXT(mat);
3745 /* easy side we just drop it on the end */
3746 ph = mtod(mret, struct sctp_paramhdr *);
3747 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3748 sizeof(struct sctp_paramhdr);
3749 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3750 sizeof(struct sctp_paramhdr));
3751 ph->param_type = htons(SCTP_STATE_COOKIE);
3752 ph->param_length = 0; /* fill in at the end */
3753 /* Fill in the stc cookie data */
3754 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3756 /* tack the INIT and then the INIT-ACK onto the chain */
3758 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3759 cookie_sz += SCTP_BUF_LEN(m_at);
3760 if (SCTP_BUF_NEXT(m_at) == NULL) {
3761 SCTP_BUF_NEXT(m_at) = copy_init;
3765 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3766 cookie_sz += SCTP_BUF_LEN(m_at);
3767 if (SCTP_BUF_NEXT(m_at) == NULL) {
3768 SCTP_BUF_NEXT(m_at) = copy_initack;
3772 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3773 cookie_sz += SCTP_BUF_LEN(m_at);
3774 if (SCTP_BUF_NEXT(m_at) == NULL) {
3778 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3780 /* no space, so free the entire chain */
3784 SCTP_BUF_LEN(sig) = 0;
3785 SCTP_BUF_NEXT(m_at) = sig;
3787 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3788 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3790 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3791 cookie_sz += SCTP_SIGNATURE_SIZE;
3792 ph->param_length = htons(cookie_sz);
3798 sctp_get_ect(struct sctp_tcb *stcb)
3800 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) {
3801 return (SCTP_ECT0_BIT);
3808 sctp_handle_no_route(struct sctp_tcb *stcb,
3809 struct sctp_nets *net,
3812 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3815 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3816 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3817 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3818 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3819 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3820 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3822 SCTP_FAILED_THRESHOLD,
3825 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3826 net->dest_state &= ~SCTP_ADDR_PF;
3830 if (net == stcb->asoc.primary_destination) {
3831 /* need a new primary */
3832 struct sctp_nets *alt;
3834 alt = sctp_find_alternate_net(stcb, net, 0);
3836 if (stcb->asoc.alternate) {
3837 sctp_free_remote_addr(stcb->asoc.alternate);
3839 stcb->asoc.alternate = alt;
3840 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3841 if (net->ro._s_addr) {
3842 sctp_free_ifa(net->ro._s_addr);
3843 net->ro._s_addr = NULL;
3845 net->src_addr_selected = 0;
3853 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3854 struct sctp_tcb *stcb, /* may be NULL */
3855 struct sctp_nets *net,
3856 struct sockaddr *to,
3858 uint32_t auth_offset,
3859 struct sctp_auth_chunk *auth,
3860 uint16_t auth_keyid,
3861 int nofragment_flag,
3868 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3869 int so_locked SCTP_UNUSED,
3873 union sctp_sockstore *over_addr,
3876 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3879 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3880 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3881 * - fill in the HMAC digest of any AUTH chunk in the packet.
3882 * - calculate and fill in the SCTP checksum.
3883 * - prepend an IP address header.
3884 * - if boundall use INADDR_ANY.
3885 * - if boundspecific do source address selection.
3886 * - set fragmentation option for ipV4.
3887 * - On return from IP output, check/adjust mtu size of output
3888 * interface and smallest_mtu size as well.
3890 /* Will need ifdefs around this */
3893 struct sctphdr *sctphdr;
3897 sctp_route_t *ro = NULL;
3898 struct udphdr *udp = NULL;
3901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3902 struct socket *so = NULL;
3906 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3907 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3912 vrf_id = stcb->asoc.vrf_id;
3914 vrf_id = inp->def_vrf_id;
3917 /* fill in the HMAC digest for any AUTH chunk in the packet */
3918 if ((auth != NULL) && (stcb != NULL)) {
3919 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3922 tos_value = net->dscp;
3924 tos_value = stcb->asoc.default_dscp;
3926 tos_value = inp->sctp_ep.default_dscp;
3929 switch (to->sa_family) {
3933 struct ip *ip = NULL;
3934 sctp_route_t iproute;
3937 len = sizeof(struct ip) + sizeof(struct sctphdr);
3939 len += sizeof(struct udphdr);
3941 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3944 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3947 SCTP_ALIGN_TO_END(newm, len);
3948 SCTP_BUF_LEN(newm) = len;
3949 SCTP_BUF_NEXT(newm) = m;
3953 if (net->flowidset == 0) {
3954 panic("Flow ID not set");
3957 m->m_pkthdr.flowid = net->flowid;
3958 m->m_flags |= M_FLOWID;
3960 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
3961 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
3962 m->m_flags |= M_FLOWID;
3965 packet_length = sctp_calculate_len(m);
3966 ip = mtod(m, struct ip *);
3967 ip->ip_v = IPVERSION;
3968 ip->ip_hl = (sizeof(struct ip) >> 2);
3969 if (tos_value == 0) {
3971 * This means especially, that it is not set
3972 * at the SCTP layer. So use the value from
3975 tos_value = inp->ip_inp.inp.inp_ip_tos;
3979 tos_value |= sctp_get_ect(stcb);
3981 if ((nofragment_flag) && (port == 0)) {
3986 /* FreeBSD has a function for ip_id's */
3987 ip->ip_id = ip_newid();
3989 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3990 ip->ip_len = packet_length;
3991 ip->ip_tos = tos_value;
3993 ip->ip_p = IPPROTO_UDP;
3995 ip->ip_p = IPPROTO_SCTP;
4000 memset(&iproute, 0, sizeof(iproute));
4001 memcpy(&ro->ro_dst, to, to->sa_len);
4003 ro = (sctp_route_t *) & net->ro;
4005 /* Now the address selection part */
4006 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4008 /* call the routine to select the src address */
4009 if (net && out_of_asoc_ok == 0) {
4010 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4011 sctp_free_ifa(net->ro._s_addr);
4012 net->ro._s_addr = NULL;
4013 net->src_addr_selected = 0;
4019 if (net->src_addr_selected == 0) {
4020 /* Cache the source address */
4021 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4024 net->src_addr_selected = 1;
4026 if (net->ro._s_addr == NULL) {
4027 /* No route to host */
4028 net->src_addr_selected = 0;
4029 sctp_handle_no_route(stcb, net, so_locked);
4030 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4032 return (EHOSTUNREACH);
4034 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4036 if (over_addr == NULL) {
4037 struct sctp_ifa *_lsrc;
4039 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4043 if (_lsrc == NULL) {
4044 sctp_handle_no_route(stcb, net, so_locked);
4045 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4047 return (EHOSTUNREACH);
4049 ip->ip_src = _lsrc->address.sin.sin_addr;
4050 sctp_free_ifa(_lsrc);
4052 ip->ip_src = over_addr->sin.sin_addr;
4053 SCTP_RTALLOC(ro, vrf_id);
4057 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4058 sctp_handle_no_route(stcb, net, so_locked);
4059 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4061 return (EHOSTUNREACH);
4063 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4064 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4065 udp->uh_dport = port;
4066 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
4067 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4068 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4070 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4073 sctphdr->src_port = src_port;
4074 sctphdr->dest_port = dest_port;
4075 sctphdr->v_tag = v_tag;
4076 sctphdr->checksum = 0;
4079 * If source address selection fails and we find no
4080 * route then the ip_output should fail as well with
4081 * a NO_ROUTE_TO_HOST type error. We probably should
4082 * catch that somewhere and abort the association
4083 * right away (assuming this is an INIT being sent).
4085 if (ro->ro_rt == NULL) {
4087 * src addr selection failed to find a route
4088 * (or valid source addr), so we can't get
4089 * there from here (yet)!
4091 sctp_handle_no_route(stcb, net, so_locked);
4092 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4094 return (EHOSTUNREACH);
4096 if (ro != &iproute) {
4097 memcpy(&iproute, ro, sizeof(*ro));
4099 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4100 (uint32_t) (ntohl(ip->ip_src.s_addr)));
4101 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4102 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
4103 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4106 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4107 /* failed to prepend data, give up */
4108 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4112 #ifdef SCTP_PACKET_LOGGING
4113 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4114 sctp_packet_log(m, packet_length);
4116 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4118 #if defined(SCTP_WITH_NO_CSUM)
4119 SCTP_STAT_INCR(sctps_sendnocrc);
4121 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4123 (stcb->asoc.loopback_scope))) {
4124 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4125 SCTP_STAT_INCR(sctps_sendswcrc);
4127 SCTP_STAT_INCR(sctps_sendnocrc);
4130 SCTP_ENABLE_UDP_CSUM(o_pak);
4132 #if defined(SCTP_WITH_NO_CSUM)
4133 SCTP_STAT_INCR(sctps_sendnocrc);
4135 m->m_pkthdr.csum_flags = CSUM_SCTP;
4136 m->m_pkthdr.csum_data = 0;
4137 SCTP_STAT_INCR(sctps_sendhwcrc);
4140 /* send it out. table id is taken from stcb */
4141 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4142 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4143 so = SCTP_INP_SO(inp);
4144 SCTP_SOCKET_UNLOCK(so, 0);
4147 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4148 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4149 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4150 atomic_add_int(&stcb->asoc.refcnt, 1);
4151 SCTP_TCB_UNLOCK(stcb);
4152 SCTP_SOCKET_LOCK(so, 0);
4153 SCTP_TCB_LOCK(stcb);
4154 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4157 SCTP_STAT_INCR(sctps_sendpackets);
4158 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4160 SCTP_STAT_INCR(sctps_senderrors);
4162 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4164 /* free tempy routes */
4171 * PMTU check versus smallest asoc MTU goes
4174 if ((ro->ro_rt != NULL) &&
4175 (net->ro._s_addr)) {
4178 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4180 mtu -= sizeof(struct udphdr);
4182 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
4183 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4186 } else if (ro->ro_rt == NULL) {
4187 /* route was freed */
4188 if (net->ro._s_addr &&
4189 net->src_addr_selected) {
4190 sctp_free_ifa(net->ro._s_addr);
4191 net->ro._s_addr = NULL;
4193 net->src_addr_selected = 0;
4202 uint32_t flowlabel, flowinfo;
4203 struct ip6_hdr *ip6h;
4204 struct route_in6 ip6route;
4206 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4208 struct sockaddr_in6 lsa6_storage;
4210 u_short prev_port = 0;
4214 flowlabel = net->flowlabel;
4216 flowlabel = stcb->asoc.default_flowlabel;
4218 flowlabel = inp->sctp_ep.default_flowlabel;
4220 if (flowlabel == 0) {
4222 * This means especially, that it is not set
4223 * at the SCTP layer. So use the value from
4226 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4228 flowlabel &= 0x000fffff;
4229 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
4231 len += sizeof(struct udphdr);
4233 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
4236 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4239 SCTP_ALIGN_TO_END(newm, len);
4240 SCTP_BUF_LEN(newm) = len;
4241 SCTP_BUF_NEXT(newm) = m;
4245 if (net->flowidset == 0) {
4246 panic("Flow ID not set");
4249 m->m_pkthdr.flowid = net->flowid;
4250 m->m_flags |= M_FLOWID;
4252 if ((init != NULL) && (init->m_flags & M_FLOWID)) {
4253 m->m_pkthdr.flowid = init->m_pkthdr.flowid;
4254 m->m_flags |= M_FLOWID;
4257 packet_length = sctp_calculate_len(m);
4259 ip6h = mtod(m, struct ip6_hdr *);
4260 /* protect *sin6 from overwrite */
4261 sin6 = (struct sockaddr_in6 *)to;
4265 /* KAME hack: embed scopeid */
4266 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4267 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4271 memset(&ip6route, 0, sizeof(ip6route));
4272 ro = (sctp_route_t *) & ip6route;
4273 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4275 ro = (sctp_route_t *) & net->ro;
4278 * We assume here that inp_flow is in host byte
4279 * order within the TCB!
4281 if (tos_value == 0) {
4283 * This means especially, that it is not set
4284 * at the SCTP layer. So use the value from
4287 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4291 tos_value |= sctp_get_ect(stcb);
4295 flowinfo |= tos_value;
4297 flowinfo |= flowlabel;
4298 ip6h->ip6_flow = htonl(flowinfo);
4300 ip6h->ip6_nxt = IPPROTO_UDP;
4302 ip6h->ip6_nxt = IPPROTO_SCTP;
4304 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
4305 ip6h->ip6_dst = sin6->sin6_addr;
4308 * Add SRC address selection here: we can only reuse
4309 * to a limited degree the kame src-addr-sel, since
4310 * we can try their selection but it may not be
4313 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
4314 lsa6_tmp.sin6_family = AF_INET6;
4315 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4317 if (net && out_of_asoc_ok == 0) {
4318 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4319 sctp_free_ifa(net->ro._s_addr);
4320 net->ro._s_addr = NULL;
4321 net->src_addr_selected = 0;
4327 if (net->src_addr_selected == 0) {
4328 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4329 /* KAME hack: embed scopeid */
4330 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4331 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4334 /* Cache the source address */
4335 net->ro._s_addr = sctp_source_address_selection(inp,
4341 (void)sa6_recoverscope(sin6);
4342 net->src_addr_selected = 1;
4344 if (net->ro._s_addr == NULL) {
4345 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4346 net->src_addr_selected = 0;
4347 sctp_handle_no_route(stcb, net, so_locked);
4348 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4350 return (EHOSTUNREACH);
4352 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4354 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4355 /* KAME hack: embed scopeid */
4356 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4357 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4360 if (over_addr == NULL) {
4361 struct sctp_ifa *_lsrc;
4363 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4367 if (_lsrc == NULL) {
4368 sctp_handle_no_route(stcb, net, so_locked);
4369 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4371 return (EHOSTUNREACH);
4373 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4374 sctp_free_ifa(_lsrc);
4376 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4377 SCTP_RTALLOC(ro, vrf_id);
4379 (void)sa6_recoverscope(sin6);
4381 lsa6->sin6_port = inp->sctp_lport;
4383 if (ro->ro_rt == NULL) {
4385 * src addr selection failed to find a route
4386 * (or valid source addr), so we can't get
4389 sctp_handle_no_route(stcb, net, so_locked);
4390 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4392 return (EHOSTUNREACH);
4395 * XXX: sa6 may not have a valid sin6_scope_id in
4396 * the non-SCOPEDROUTING case.
4398 bzero(&lsa6_storage, sizeof(lsa6_storage));
4399 lsa6_storage.sin6_family = AF_INET6;
4400 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4401 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4402 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4403 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4408 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4409 lsa6_storage.sin6_port = inp->sctp_lport;
4410 lsa6 = &lsa6_storage;
4411 ip6h->ip6_src = lsa6->sin6_addr;
4414 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4415 sctp_handle_no_route(stcb, net, so_locked);
4416 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4418 return (EHOSTUNREACH);
4420 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4421 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4422 udp->uh_dport = port;
4423 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4425 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4427 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4430 sctphdr->src_port = src_port;
4431 sctphdr->dest_port = dest_port;
4432 sctphdr->v_tag = v_tag;
4433 sctphdr->checksum = 0;
4436 * We set the hop limit now since there is a good
4437 * chance that our ro pointer is now filled
4439 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4440 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4443 /* Copy to be sure something bad is not happening */
4444 sin6->sin6_addr = ip6h->ip6_dst;
4445 lsa6->sin6_addr = ip6h->ip6_src;
4448 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4449 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4450 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4451 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4452 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4454 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4456 * preserve the port and scope for link
4459 prev_scope = sin6->sin6_scope_id;
4460 prev_port = sin6->sin6_port;
4462 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4463 /* failed to prepend data, give up */
4465 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4468 #ifdef SCTP_PACKET_LOGGING
4469 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4470 sctp_packet_log(m, packet_length);
4472 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4474 #if defined(SCTP_WITH_NO_CSUM)
4475 SCTP_STAT_INCR(sctps_sendnocrc);
4477 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4479 (stcb->asoc.loopback_scope))) {
4480 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4481 SCTP_STAT_INCR(sctps_sendswcrc);
4483 SCTP_STAT_INCR(sctps_sendnocrc);
4486 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4487 udp->uh_sum = 0xffff;
4490 #if defined(SCTP_WITH_NO_CSUM)
4491 SCTP_STAT_INCR(sctps_sendnocrc);
4493 m->m_pkthdr.csum_flags = CSUM_SCTP;
4494 m->m_pkthdr.csum_data = 0;
4495 SCTP_STAT_INCR(sctps_sendhwcrc);
4498 /* send it out. table id is taken from stcb */
4499 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4500 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4501 so = SCTP_INP_SO(inp);
4502 SCTP_SOCKET_UNLOCK(so, 0);
4505 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4506 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4507 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4508 atomic_add_int(&stcb->asoc.refcnt, 1);
4509 SCTP_TCB_UNLOCK(stcb);
4510 SCTP_SOCKET_LOCK(so, 0);
4511 SCTP_TCB_LOCK(stcb);
4512 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4516 /* for link local this must be done */
4517 sin6->sin6_scope_id = prev_scope;
4518 sin6->sin6_port = prev_port;
4520 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4521 SCTP_STAT_INCR(sctps_sendpackets);
4522 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4524 SCTP_STAT_INCR(sctps_senderrors);
4527 /* Now if we had a temp route free it */
4533 * PMTU check versus smallest asoc MTU goes
4536 if (ro->ro_rt == NULL) {
4537 /* Route was freed */
4538 if (net->ro._s_addr &&
4539 net->src_addr_selected) {
4540 sctp_free_ifa(net->ro._s_addr);
4541 net->ro._s_addr = NULL;
4543 net->src_addr_selected = 0;
4545 if ((ro->ro_rt != NULL) &&
4546 (net->ro._s_addr)) {
4549 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4551 (stcb->asoc.smallest_mtu > mtu)) {
4552 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4555 net->mtu -= sizeof(struct udphdr);
4559 if (ND_IFINFO(ifp)->linkmtu &&
4560 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4561 sctp_mtu_size_reset(inp,
4563 ND_IFINFO(ifp)->linkmtu);
4571 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4572 ((struct sockaddr *)to)->sa_family);
4574 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4581 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4582 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4587 struct mbuf *m, *m_at, *mp_last;
4588 struct sctp_nets *net;
4589 struct sctp_init_chunk *init;
4590 struct sctp_supported_addr_param *sup_addr;
4591 struct sctp_adaptation_layer_indication *ali;
4592 struct sctp_ecn_supported_param *ecn;
4593 struct sctp_prsctp_supported_param *prsctp;
4594 struct sctp_supported_chunk_types_param *pr_supported;
4595 int cnt_inits_to = 0;
4600 /* INIT's always go to the primary (and usually ONLY address) */
4602 net = stcb->asoc.primary_destination;
4604 net = TAILQ_FIRST(&stcb->asoc.nets);
4609 /* we confirm any address we send an INIT to */
4610 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4611 (void)sctp_set_primary_addr(stcb, NULL, net);
4613 /* we confirm any address we send an INIT to */
4614 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4616 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4618 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4620 * special hook, if we are sending to link local it will not
4621 * show up in our private address count.
4623 struct sockaddr_in6 *sin6l;
4625 sin6l = &net->ro._l_addr.sin6;
4626 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4630 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4631 /* This case should not happen */
4632 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4635 /* start the INIT timer */
4636 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4638 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4640 /* No memory, INIT timer will re-attempt. */
4641 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4644 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4646 * assume peer supports asconf in order to be able to queue local
4647 * address changes while an INIT is in flight and before the assoc
4650 stcb->asoc.peer_supports_asconf = 1;
4651 /* Now lets put the SCTP header in place */
4652 init = mtod(m, struct sctp_init_chunk *);
4653 /* now the chunk header */
4654 init->ch.chunk_type = SCTP_INITIATION;
4655 init->ch.chunk_flags = 0;
4656 /* fill in later from mbuf we build */
4657 init->ch.chunk_length = 0;
4658 /* place in my tag */
4659 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4660 /* set up some of the credits. */
4661 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4662 SCTP_MINIMAL_RWND));
4664 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4665 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4666 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4667 /* now the address restriction */
4668 /* XXX Should we take the address family of the socket into account? */
4669 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4671 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4674 /* we support 2 types: IPv4/IPv6 */
4675 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + 2 * sizeof(uint16_t));
4676 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4677 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4679 /* we support 1 type: IPv6 */
4680 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4681 sup_addr->addr_type[0] = htons(SCTP_IPV6_ADDRESS);
4682 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4685 /* we support 1 type: IPv4 */
4686 sup_addr->ph.param_length = htons(sizeof(struct sctp_paramhdr) + sizeof(uint16_t));
4687 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4688 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4690 SCTP_BUF_LEN(m) += sizeof(struct sctp_supported_addr_param);
4691 /* adaptation layer indication parameter */
4692 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(struct sctp_supported_addr_param));
4693 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4694 ali->ph.param_length = htons(sizeof(*ali));
4695 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4696 SCTP_BUF_LEN(m) += sizeof(*ali);
4697 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4699 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4700 /* Add NAT friendly parameter */
4701 struct sctp_paramhdr *ph;
4703 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4704 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4705 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4706 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4707 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4709 /* now any cookie time extensions */
4710 if (stcb->asoc.cookie_preserve_req) {
4711 struct sctp_cookie_perserve_param *cookie_preserve;
4713 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4714 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4715 cookie_preserve->ph.param_length = htons(
4716 sizeof(*cookie_preserve));
4717 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4718 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4719 ecn = (struct sctp_ecn_supported_param *)(
4720 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4721 stcb->asoc.cookie_preserve_req = 0;
4724 if (stcb->asoc.ecn_allowed == 1) {
4725 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4726 ecn->ph.param_length = htons(sizeof(*ecn));
4727 SCTP_BUF_LEN(m) += sizeof(*ecn);
4728 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4731 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4733 /* And now tell the peer we do pr-sctp */
4734 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4735 prsctp->ph.param_length = htons(sizeof(*prsctp));
4736 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4738 /* And now tell the peer we do all the extensions */
4739 pr_supported = (struct sctp_supported_chunk_types_param *)
4740 ((caddr_t)prsctp + sizeof(*prsctp));
4741 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4743 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4744 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4745 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4746 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4747 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4748 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4749 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4751 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4752 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4754 p_len = sizeof(*pr_supported) + num_ext;
4755 pr_supported->ph.param_length = htons(p_len);
4756 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4757 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4760 /* add authentication parameters */
4761 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4762 struct sctp_auth_random *randp;
4763 struct sctp_auth_hmac_algo *hmacs;
4764 struct sctp_auth_chunk_list *chunks;
4766 /* attach RANDOM parameter, if available */
4767 if (stcb->asoc.authinfo.random != NULL) {
4768 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4769 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4770 /* random key already contains the header */
4771 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4772 /* zero out any padding required */
4773 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4774 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4776 /* add HMAC_ALGO parameter */
4777 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4778 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4779 (uint8_t *) hmacs->hmac_ids);
4781 p_len += sizeof(*hmacs);
4782 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4783 hmacs->ph.param_length = htons(p_len);
4784 /* zero out any padding required */
4785 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4786 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4788 /* add CHUNKS parameter */
4789 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4790 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4791 chunks->chunk_types);
4793 p_len += sizeof(*chunks);
4794 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4795 chunks->ph.param_length = htons(p_len);
4796 /* zero out any padding required */
4797 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4798 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4801 /* now the addresses */
4803 struct sctp_scoping scp;
4806 * To optimize this we could put the scoping stuff into a
4807 * structure and remove the individual uint8's from the
4808 * assoc structure. Then we could just sifa in the address
4809 * within the stcb. But for now this is a quick hack to get
4810 * the address stuff teased apart.
4813 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4814 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4815 scp.loopback_scope = stcb->asoc.loopback_scope;
4816 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4817 scp.local_scope = stcb->asoc.local_scope;
4818 scp.site_scope = stcb->asoc.site_scope;
4820 sctp_add_addresses_to_i_ia(inp, stcb, &scp, m, cnt_inits_to);
4823 /* calulate the size and update pkt header and chunk header */
4825 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4826 if (SCTP_BUF_NEXT(m_at) == NULL)
4828 p_len += SCTP_BUF_LEN(m_at);
4830 init->ch.chunk_length = htons(p_len);
4832 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4833 * here since the timer will drive a retranmission.
4836 /* I don't expect this to execute but we will be safe here */
4838 if ((padval) && (mp_last)) {
4840 * The compiler worries that mp_last may not be set even
4841 * though I think it is impossible :-> however we add
4842 * mp_last here just in case.
4844 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4846 /* Houston we have a problem, no space */
4851 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4852 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4853 (struct sockaddr *)&net->ro._l_addr,
4854 m, 0, NULL, 0, 0, 0, 0,
4855 inp->sctp_lport, stcb->rport, htonl(0),
4856 net->port, so_locked, NULL, NULL);
4857 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4858 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4859 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4863 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4864 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4867 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4868 * being equal to the beginning of the params i.e. (iphlen +
4869 * sizeof(struct sctp_init_msg) parse through the parameters to the
4870 * end of the mbuf verifying that all parameters are known.
4872 * For unknown parameters build and return a mbuf with
4873 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4874 * processing this chunk stop, and set *abort_processing to 1.
4876 * By having param_offset be pre-set to where parameters begin it is
4877 * hoped that this routine may be reused in the future by new
4880 struct sctp_paramhdr *phdr, params;
4882 struct mbuf *mat, *op_err;
4883 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4884 int at, limit, pad_needed;
4885 uint16_t ptype, plen, padded_size;
4888 *abort_processing = 0;
4891 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4894 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4895 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4896 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4897 ptype = ntohs(phdr->param_type);
4898 plen = ntohs(phdr->param_length);
4899 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4900 /* wacked parameter */
4901 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4904 limit -= SCTP_SIZE32(plen);
4906 * All parameters for all chunks that we know/understand are
4907 * listed here. We process them other places and make
4908 * appropriate stop actions per the upper bits. However this
4909 * is the generic routine processor's can call to get back
4910 * an operr.. to either incorporate (init-ack) or send.
4912 padded_size = SCTP_SIZE32(plen);
4914 /* Param's with variable size */
4915 case SCTP_HEARTBEAT_INFO:
4916 case SCTP_STATE_COOKIE:
4917 case SCTP_UNRECOG_PARAM:
4918 case SCTP_ERROR_CAUSE_IND:
4922 /* Param's with variable size within a range */
4923 case SCTP_CHUNK_LIST:
4924 case SCTP_SUPPORTED_CHUNK_EXT:
4925 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4926 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4931 case SCTP_SUPPORTED_ADDRTYPE:
4932 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4933 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4939 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4940 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4945 case SCTP_SET_PRIM_ADDR:
4946 case SCTP_DEL_IP_ADDRESS:
4947 case SCTP_ADD_IP_ADDRESS:
4948 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4949 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4950 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4955 /* Param's with a fixed size */
4956 case SCTP_IPV4_ADDRESS:
4957 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4958 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4963 case SCTP_IPV6_ADDRESS:
4964 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4965 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4970 case SCTP_COOKIE_PRESERVE:
4971 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4972 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4977 case SCTP_HAS_NAT_SUPPORT:
4980 case SCTP_PRSCTP_SUPPORTED:
4982 if (padded_size != sizeof(struct sctp_paramhdr)) {
4983 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
4988 case SCTP_ECN_CAPABLE:
4989 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4990 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4995 case SCTP_ULP_ADAPTATION:
4996 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4997 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5002 case SCTP_SUCCESS_REPORT:
5003 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5004 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5009 case SCTP_HOSTNAME_ADDRESS:
5011 /* We can NOT handle HOST NAME addresses!! */
5014 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5015 *abort_processing = 1;
5016 if (op_err == NULL) {
5017 /* Ok need to try to get a mbuf */
5019 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5021 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5024 l_len += sizeof(struct sctp_paramhdr);
5025 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5027 SCTP_BUF_LEN(op_err) = 0;
5029 * pre-reserve space for ip
5030 * and sctp header and
5034 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5036 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5038 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5039 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5043 /* If we have space */
5044 struct sctp_paramhdr s;
5047 uint32_t cpthis = 0;
5049 pad_needed = 4 - (err_at % 4);
5050 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5051 err_at += pad_needed;
5053 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5054 s.param_length = htons(sizeof(s) + plen);
5055 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5056 err_at += sizeof(s);
5057 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5059 sctp_m_freem(op_err);
5061 * we are out of memory but
5062 * we still need to have a
5063 * look at what to do (the
5064 * system is in trouble
5069 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5076 * we do not recognize the parameter figure out what
5079 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5080 if ((ptype & 0x4000) == 0x4000) {
5081 /* Report bit is set?? */
5082 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5083 if (op_err == NULL) {
5086 /* Ok need to try to get an mbuf */
5088 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5090 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5093 l_len += sizeof(struct sctp_paramhdr);
5094 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5096 SCTP_BUF_LEN(op_err) = 0;
5098 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5100 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5102 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5103 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5107 /* If we have space */
5108 struct sctp_paramhdr s;
5111 uint32_t cpthis = 0;
5113 pad_needed = 4 - (err_at % 4);
5114 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5115 err_at += pad_needed;
5117 s.param_type = htons(SCTP_UNRECOG_PARAM);
5118 s.param_length = htons(sizeof(s) + plen);
5119 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5120 err_at += sizeof(s);
5121 if (plen > sizeof(tempbuf)) {
5122 plen = sizeof(tempbuf);
5124 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
5126 sctp_m_freem(op_err);
5128 * we are out of memory but
5129 * we still need to have a
5130 * look at what to do (the
5131 * system is in trouble
5135 goto more_processing;
5137 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5142 if ((ptype & 0x8000) == 0x0000) {
5143 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5146 /* skip this chunk and continue processing */
5147 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5148 at += SCTP_SIZE32(plen);
5153 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5157 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5158 *abort_processing = 1;
5159 if ((op_err == NULL) && phdr) {
5163 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5165 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
5167 l_len += (2 * sizeof(struct sctp_paramhdr));
5168 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
5170 SCTP_BUF_LEN(op_err) = 0;
5172 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5174 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5176 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5177 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5180 if ((op_err) && phdr) {
5181 struct sctp_paramhdr s;
5184 uint32_t cpthis = 0;
5186 pad_needed = 4 - (err_at % 4);
5187 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5188 err_at += pad_needed;
5190 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5191 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5192 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5193 err_at += sizeof(s);
5194 /* Only copy back the p-hdr that caused the issue */
5195 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5201 sctp_are_there_new_addresses(struct sctp_association *asoc,
5202 struct mbuf *in_initpkt, int offset)
5205 * Given a INIT packet, look through the packet to verify that there
5206 * are NO new addresses. As we go through the parameters add reports
5207 * of any un-understood parameters that require an error. Also we
5208 * must return (1) to drop the packet if we see a un-understood
5209 * parameter that tells us to drop the chunk.
5211 struct sockaddr *sa_touse;
5212 struct sockaddr *sa;
5213 struct sctp_paramhdr *phdr, params;
5214 uint16_t ptype, plen;
5216 struct sctp_nets *net;
5220 struct sockaddr_in sin4, *sa4;
5224 struct sockaddr_in6 sin6, *sa6;
5225 struct ip6_hdr *ip6h;
5230 memset(&sin4, 0, sizeof(sin4));
5231 sin4.sin_family = AF_INET;
5232 sin4.sin_len = sizeof(sin4);
5235 memset(&sin6, 0, sizeof(sin6));
5236 sin6.sin6_family = AF_INET6;
5237 sin6.sin6_len = sizeof(sin6);
5240 /* First what about the src address of the pkt ? */
5241 iph = mtod(in_initpkt, struct ip *);
5242 switch (iph->ip_v) {
5245 /* source addr is IPv4 */
5246 sin4.sin_addr = iph->ip_src;
5247 sa_touse = (struct sockaddr *)&sin4;
5251 case IPV6_VERSION >> 4:
5252 /* source addr is IPv6 */
5253 ip6h = mtod(in_initpkt, struct ip6_hdr *);
5254 sin6.sin6_addr = ip6h->ip6_src;
5255 sa_touse = (struct sockaddr *)&sin6;
5263 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5264 sa = (struct sockaddr *)&net->ro._l_addr;
5265 if (sa->sa_family == sa_touse->sa_family) {
5267 if (sa->sa_family == AF_INET) {
5268 sa4 = (struct sockaddr_in *)sa;
5269 if (sa4->sin_addr.s_addr == sin4.sin_addr.s_addr) {
5276 if (sa->sa_family == AF_INET6) {
5277 sa6 = (struct sockaddr_in6 *)sa;
5278 if (SCTP6_ARE_ADDR_EQUAL(sa6, &sin6)) {
5287 /* New address added! no need to look futher. */
5290 /* Ok so far lets munge through the rest of the packet */
5291 offset += sizeof(struct sctp_init_chunk);
5292 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5295 ptype = ntohs(phdr->param_type);
5296 plen = ntohs(phdr->param_length);
5299 case SCTP_IPV4_ADDRESS:
5301 struct sctp_ipv4addr_param *p4, p4_buf;
5303 phdr = sctp_get_next_param(in_initpkt, offset,
5304 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5305 if (plen != sizeof(struct sctp_ipv4addr_param) ||
5309 p4 = (struct sctp_ipv4addr_param *)phdr;
5310 sin4.sin_addr.s_addr = p4->addr;
5311 sa_touse = (struct sockaddr *)&sin4;
5316 case SCTP_IPV6_ADDRESS:
5318 struct sctp_ipv6addr_param *p6, p6_buf;
5320 phdr = sctp_get_next_param(in_initpkt, offset,
5321 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5322 if (plen != sizeof(struct sctp_ipv6addr_param) ||
5326 p6 = (struct sctp_ipv6addr_param *)phdr;
5327 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5329 sa_touse = (struct sockaddr *)&sin6;
5338 /* ok, sa_touse points to one to check */
5340 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5341 sa = (struct sockaddr *)&net->ro._l_addr;
5342 if (sa->sa_family != sa_touse->sa_family) {
5346 if (sa->sa_family == AF_INET) {
5347 sa4 = (struct sockaddr_in *)sa;
5348 if (sa4->sin_addr.s_addr ==
5349 sin4.sin_addr.s_addr) {
5356 if (sa->sa_family == AF_INET6) {
5357 sa6 = (struct sockaddr_in6 *)sa;
5358 if (SCTP6_ARE_ADDR_EQUAL(
5367 /* New addr added! no need to look further */
5371 offset += SCTP_SIZE32(plen);
5372 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5378 * Given a MBUF chain that was sent into us containing an INIT. Build a
5379 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5380 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5381 * message (i.e. the struct sctp_init_msg).
5384 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5385 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
5386 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
5388 struct sctp_association *asoc;
5389 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
5390 struct sctp_init_ack_chunk *initack;
5391 struct sctp_adaptation_layer_indication *ali;
5392 struct sctp_ecn_supported_param *ecn;
5393 struct sctp_prsctp_supported_param *prsctp;
5394 struct sctp_supported_chunk_types_param *pr_supported;
5395 union sctp_sockstore store, store1, *over_addr;
5398 struct sockaddr_in *sin, *to_sin;
5402 struct sockaddr_in6 *sin6, *to_sin6;
5408 struct ip6_hdr *ip6;
5411 struct sockaddr *to;
5412 struct sctp_state_cookie stc;
5413 struct sctp_nets *net = NULL;
5414 uint8_t *signature = NULL;
5415 int cnt_inits_to = 0;
5416 uint16_t his_limit, i_want;
5417 int abort_flag, padval;
5420 int nat_friendly = 0;
5428 if ((asoc != NULL) &&
5429 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5430 (sctp_are_there_new_addresses(asoc, init_pkt, offset))) {
5431 /* new addresses, out of here in non-cookie-wait states */
5433 * Send a ABORT, we don't add the new address error clause
5434 * though we even set the T bit and copy in the 0 tag.. this
5435 * looks no different than if no listener was present.
5437 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
5441 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5442 (offset + sizeof(struct sctp_init_chunk)),
5443 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5446 sctp_send_abort(init_pkt, iphlen, sh,
5447 init_chk->init.initiate_tag, op_err, vrf_id, port);
5450 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5452 /* No memory, INIT timer will re-attempt. */
5454 sctp_m_freem(op_err);
5457 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5459 /* the time I built cookie */
5460 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5462 /* populate any tie tags */
5464 /* unlock before tag selections */
5465 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5466 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5467 stc.cookie_life = asoc->cookie_life;
5468 net = asoc->primary_destination;
5470 stc.tie_tag_my_vtag = 0;
5471 stc.tie_tag_peer_vtag = 0;
5472 /* life I will award this cookie */
5473 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5476 /* copy in the ports for later check */
5477 stc.myport = sh->dest_port;
5478 stc.peerport = sh->src_port;
5481 * If we wanted to honor cookie life extentions, we would add to
5482 * stc.cookie_life. For now we should NOT honor any extension
5484 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5485 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5486 struct inpcb *in_inp;
5488 /* Its a V6 socket */
5489 in_inp = (struct inpcb *)inp;
5490 stc.ipv6_addr_legal = 1;
5491 /* Now look at the binding flag to see if V4 will be legal */
5492 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5493 stc.ipv4_addr_legal = 1;
5495 /* V4 addresses are NOT legal on the association */
5496 stc.ipv4_addr_legal = 0;
5499 /* Its a V4 socket, no - V6 */
5500 stc.ipv4_addr_legal = 1;
5501 stc.ipv6_addr_legal = 0;
5504 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5509 /* now for scope setup */
5510 memset((caddr_t)&store, 0, sizeof(store));
5511 memset((caddr_t)&store1, 0, sizeof(store1));
5514 to_sin = &store1.sin;
5518 to_sin6 = &store1.sin6;
5520 iph = mtod(init_pkt, struct ip *);
5521 /* establish the to_addr's */
5522 switch (iph->ip_v) {
5525 to_sin->sin_port = sh->dest_port;
5526 to_sin->sin_family = AF_INET;
5527 to_sin->sin_len = sizeof(struct sockaddr_in);
5528 to_sin->sin_addr = iph->ip_dst;
5532 case IPV6_VERSION >> 4:
5533 ip6 = mtod(init_pkt, struct ip6_hdr *);
5534 to_sin6->sin6_addr = ip6->ip6_dst;
5535 to_sin6->sin6_scope_id = 0;
5536 to_sin6->sin6_port = sh->dest_port;
5537 to_sin6->sin6_family = AF_INET6;
5538 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5547 to = (struct sockaddr *)&store;
5548 switch (iph->ip_v) {
5552 sin->sin_family = AF_INET;
5553 sin->sin_len = sizeof(struct sockaddr_in);
5554 sin->sin_port = sh->src_port;
5555 sin->sin_addr = iph->ip_src;
5556 /* lookup address */
5557 stc.address[0] = sin->sin_addr.s_addr;
5561 stc.addr_type = SCTP_IPV4_ADDRESS;
5562 /* local from address */
5563 stc.laddress[0] = to_sin->sin_addr.s_addr;
5564 stc.laddress[1] = 0;
5565 stc.laddress[2] = 0;
5566 stc.laddress[3] = 0;
5567 stc.laddr_type = SCTP_IPV4_ADDRESS;
5568 /* scope_id is only for v6 */
5570 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5571 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5576 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5577 /* Must use the address in this case */
5578 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5579 stc.loopback_scope = 1;
5582 stc.local_scope = 0;
5588 case IPV6_VERSION >> 4:
5590 ip6 = mtod(init_pkt, struct ip6_hdr *);
5591 sin6->sin6_family = AF_INET6;
5592 sin6->sin6_len = sizeof(struct sockaddr_in6);
5593 sin6->sin6_port = sh->src_port;
5594 sin6->sin6_addr = ip6->ip6_src;
5595 /* lookup address */
5596 memcpy(&stc.address, &sin6->sin6_addr,
5597 sizeof(struct in6_addr));
5598 sin6->sin6_scope_id = 0;
5599 stc.addr_type = SCTP_IPV6_ADDRESS;
5601 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5603 * FIX ME: does this have scope from
5606 (void)sa6_recoverscope(sin6);
5607 stc.scope_id = sin6->sin6_scope_id;
5608 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5609 stc.loopback_scope = 1;
5610 stc.local_scope = 0;
5613 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5615 * If the new destination is a
5616 * LINK_LOCAL we must have common
5617 * both site and local scope. Don't
5618 * set local scope though since we
5619 * must depend on the source to be
5620 * added implicitly. We cannot
5621 * assure just because we share one
5622 * link that all links are common.
5624 stc.local_scope = 0;
5628 * we start counting for the private
5629 * address stuff at 1. since the
5630 * link local we source from won't
5631 * show up in our scoped count.
5635 * pull out the scope_id from
5639 * FIX ME: does this have scope from
5642 (void)sa6_recoverscope(sin6);
5643 stc.scope_id = sin6->sin6_scope_id;
5644 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5645 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5647 * If the new destination is
5648 * SITE_LOCAL then we must have site
5653 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5654 stc.laddr_type = SCTP_IPV6_ADDRESS;
5664 /* set the scope per the existing tcb */
5667 struct sctp_nets *lnet;
5671 stc.loopback_scope = asoc->loopback_scope;
5672 stc.ipv4_scope = asoc->ipv4_local_scope;
5673 stc.site_scope = asoc->site_scope;
5674 stc.local_scope = asoc->local_scope;
5676 /* Why do we not consider IPv4 LL addresses? */
5677 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5678 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5679 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5681 * if we have a LL address, start
5689 /* use the net pointer */
5690 to = (struct sockaddr *)&net->ro._l_addr;
5691 switch (to->sa_family) {
5694 sin = (struct sockaddr_in *)to;
5695 stc.address[0] = sin->sin_addr.s_addr;
5699 stc.addr_type = SCTP_IPV4_ADDRESS;
5700 if (net->src_addr_selected == 0) {
5702 * strange case here, the INIT should have
5703 * did the selection.
5705 net->ro._s_addr = sctp_source_address_selection(inp,
5706 stcb, (sctp_route_t *) & net->ro,
5708 if (net->ro._s_addr == NULL)
5711 net->src_addr_selected = 1;
5714 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5715 stc.laddress[1] = 0;
5716 stc.laddress[2] = 0;
5717 stc.laddress[3] = 0;
5718 stc.laddr_type = SCTP_IPV4_ADDRESS;
5719 /* scope_id is only for v6 */
5725 sin6 = (struct sockaddr_in6 *)to;
5726 memcpy(&stc.address, &sin6->sin6_addr,
5727 sizeof(struct in6_addr));
5728 stc.addr_type = SCTP_IPV6_ADDRESS;
5729 stc.scope_id = sin6->sin6_scope_id;
5730 if (net->src_addr_selected == 0) {
5732 * strange case here, the INIT should have
5733 * did the selection.
5735 net->ro._s_addr = sctp_source_address_selection(inp,
5736 stcb, (sctp_route_t *) & net->ro,
5738 if (net->ro._s_addr == NULL)
5741 net->src_addr_selected = 1;
5743 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5744 sizeof(struct in6_addr));
5745 stc.laddr_type = SCTP_IPV6_ADDRESS;
5750 /* Now lets put the SCTP header in place */
5751 initack = mtod(m, struct sctp_init_ack_chunk *);
5752 /* Save it off for quick ref */
5753 stc.peers_vtag = init_chk->init.initiate_tag;
5755 memcpy(stc.identification, SCTP_VERSION_STRING,
5756 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5757 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5758 /* now the chunk header */
5759 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5760 initack->ch.chunk_flags = 0;
5761 /* fill in later from mbuf we build */
5762 initack->ch.chunk_length = 0;
5763 /* place in my tag */
5764 if ((asoc != NULL) &&
5765 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5766 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5767 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5768 /* re-use the v-tags and init-seq here */
5769 initack->init.initiate_tag = htonl(asoc->my_vtag);
5770 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5772 uint32_t vtag, itsn;
5774 if (hold_inp_lock) {
5775 SCTP_INP_INCR_REF(inp);
5776 SCTP_INP_RUNLOCK(inp);
5779 atomic_add_int(&asoc->refcnt, 1);
5780 SCTP_TCB_UNLOCK(stcb);
5782 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5783 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5785 * Got a duplicate vtag on some guy behind a
5786 * nat make sure we don't use it.
5790 initack->init.initiate_tag = htonl(vtag);
5791 /* get a TSN to use too */
5792 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5793 initack->init.initial_tsn = htonl(itsn);
5794 SCTP_TCB_LOCK(stcb);
5795 atomic_add_int(&asoc->refcnt, -1);
5797 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5798 initack->init.initiate_tag = htonl(vtag);
5799 /* get a TSN to use too */
5800 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5802 if (hold_inp_lock) {
5803 SCTP_INP_RLOCK(inp);
5804 SCTP_INP_DECR_REF(inp);
5807 /* save away my tag to */
5808 stc.my_vtag = initack->init.initiate_tag;
5810 /* set up some of the credits. */
5811 so = inp->sctp_socket;
5813 /* memory problem */
5817 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5819 /* set what I want */
5820 his_limit = ntohs(init_chk->init.num_inbound_streams);
5821 /* choose what I want */
5823 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5824 i_want = asoc->streamoutcnt;
5826 i_want = inp->sctp_ep.pre_open_stream_count;
5829 i_want = inp->sctp_ep.pre_open_stream_count;
5831 if (his_limit < i_want) {
5832 /* I Want more :< */
5833 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5835 /* I can have what I want :> */
5836 initack->init.num_outbound_streams = htons(i_want);
5838 /* tell him his limt. */
5839 initack->init.num_inbound_streams =
5840 htons(inp->sctp_ep.max_open_streams_intome);
5842 /* adaptation layer indication parameter */
5843 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5844 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5845 ali->ph.param_length = htons(sizeof(*ali));
5846 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5847 SCTP_BUF_LEN(m) += sizeof(*ali);
5848 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5851 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) ||
5852 (inp->sctp_ecn_enable == 1)) {
5853 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5854 ecn->ph.param_length = htons(sizeof(*ecn));
5855 SCTP_BUF_LEN(m) += sizeof(*ecn);
5857 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5860 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5862 /* And now tell the peer we do pr-sctp */
5863 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5864 prsctp->ph.param_length = htons(sizeof(*prsctp));
5865 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5867 /* Add NAT friendly parameter */
5868 struct sctp_paramhdr *ph;
5870 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5871 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5872 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5873 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5875 /* And now tell the peer we do all the extensions */
5876 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5877 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5879 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5880 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5881 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5882 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5883 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5884 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5885 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5886 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5887 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5888 p_len = sizeof(*pr_supported) + num_ext;
5889 pr_supported->ph.param_length = htons(p_len);
5890 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5891 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5893 /* add authentication parameters */
5894 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5895 struct sctp_auth_random *randp;
5896 struct sctp_auth_hmac_algo *hmacs;
5897 struct sctp_auth_chunk_list *chunks;
5898 uint16_t random_len;
5900 /* generate and add RANDOM parameter */
5901 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5902 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5903 randp->ph.param_type = htons(SCTP_RANDOM);
5904 p_len = sizeof(*randp) + random_len;
5905 randp->ph.param_length = htons(p_len);
5906 SCTP_READ_RANDOM(randp->random_data, random_len);
5907 /* zero out any padding required */
5908 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5909 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5911 /* add HMAC_ALGO parameter */
5912 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5913 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5914 (uint8_t *) hmacs->hmac_ids);
5916 p_len += sizeof(*hmacs);
5917 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5918 hmacs->ph.param_length = htons(p_len);
5919 /* zero out any padding required */
5920 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5921 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5923 /* add CHUNKS parameter */
5924 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5925 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5926 chunks->chunk_types);
5928 p_len += sizeof(*chunks);
5929 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5930 chunks->ph.param_length = htons(p_len);
5931 /* zero out any padding required */
5932 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5933 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5937 /* now the addresses */
5939 struct sctp_scoping scp;
5942 * To optimize this we could put the scoping stuff into a
5943 * structure and remove the individual uint8's from the stc
5944 * structure. Then we could just sifa in the address within
5945 * the stc.. but for now this is a quick hack to get the
5946 * address stuff teased apart.
5948 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5949 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5950 scp.loopback_scope = stc.loopback_scope;
5951 scp.ipv4_local_scope = stc.ipv4_scope;
5952 scp.local_scope = stc.local_scope;
5953 scp.site_scope = stc.site_scope;
5954 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to);
5957 /* tack on the operational error if present */
5966 llen += SCTP_BUF_LEN(ol);
5967 ol = SCTP_BUF_NEXT(ol);
5970 /* must add a pad to the param */
5971 uint32_t cpthis = 0;
5974 padlen = 4 - (llen % 4);
5975 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5977 while (SCTP_BUF_NEXT(m_at) != NULL) {
5978 m_at = SCTP_BUF_NEXT(m_at);
5980 SCTP_BUF_NEXT(m_at) = op_err;
5981 while (SCTP_BUF_NEXT(m_at) != NULL) {
5982 m_at = SCTP_BUF_NEXT(m_at);
5985 /* pre-calulate the size and update pkt header and chunk header */
5987 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5988 p_len += SCTP_BUF_LEN(m_tmp);
5989 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5990 /* m_tmp should now point to last one */
5995 /* Now we must build a cookie */
5996 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
5997 if (m_cookie == NULL) {
5998 /* memory problem */
6002 /* Now append the cookie to the end and update the space/size */
6003 SCTP_BUF_NEXT(m_tmp) = m_cookie;
6005 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6006 p_len += SCTP_BUF_LEN(m_tmp);
6007 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6008 /* m_tmp should now point to last one */
6014 * Place in the size, but we don't include the last pad (if any) in
6017 initack->ch.chunk_length = htons(p_len);
6020 * Time to sign the cookie, we don't sign over the cookie signature
6021 * though thus we set trailer.
6023 (void)sctp_hmac_m(SCTP_HMAC,
6024 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6025 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6026 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
6028 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6029 * here since the timer will drive a retranmission.
6032 if ((padval) && (mp_last)) {
6033 /* see my previous comments on mp_last */
6034 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) {
6035 /* Houston we have a problem, no space */
6040 if (stc.loopback_scope) {
6041 over_addr = &store1;
6046 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6048 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6049 port, SCTP_SO_NOT_LOCKED, over_addr, init_pkt);
6050 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6055 sctp_prune_prsctp(struct sctp_tcb *stcb,
6056 struct sctp_association *asoc,
6057 struct sctp_sndrcvinfo *srcv,
6061 struct sctp_tmit_chunk *chk, *nchk;
6063 SCTP_TCB_LOCK_ASSERT(stcb);
6064 if ((asoc->peer_supports_prsctp) &&
6065 (asoc->sent_queue_cnt_removeable > 0)) {
6066 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6068 * Look for chunks marked with the PR_SCTP flag AND
6069 * the buffer space flag. If the one being sent is
6070 * equal or greater priority then purge the old one
6071 * and free some space.
6073 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6075 * This one is PR-SCTP AND buffer space
6078 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6080 * Lower numbers equates to higher
6081 * priority so if the one we are
6082 * looking at has a larger or equal
6083 * priority we want to drop the data
6084 * and NOT retransmit it.
6088 * We release the book_size
6089 * if the mbuf is here
6094 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6095 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
6097 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
6098 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6101 freed_spc += ret_spc;
6102 if (freed_spc >= dataout) {
6105 } /* if chunk was present */
6106 } /* if of sufficent priority */
6107 } /* if chunk has enabled */
6108 } /* tailqforeach */
6110 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6111 /* Here we must move to the sent queue and mark */
6112 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6113 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6116 * We release the book_size
6117 * if the mbuf is here
6121 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6122 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
6125 freed_spc += ret_spc;
6126 if (freed_spc >= dataout) {
6129 } /* end if chk->data */
6130 } /* end if right class */
6131 } /* end if chk pr-sctp */
6132 } /* tailqforeachsafe (chk) */
6133 } /* if enabled in asoc */
6137 sctp_get_frag_point(struct sctp_tcb *stcb,
6138 struct sctp_association *asoc)
6143 * For endpoints that have both v6 and v4 addresses we must reserve
6144 * room for the ipv6 header, for those that are only dealing with V4
6145 * we use a larger frag point.
6147 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6148 ovh = SCTP_MED_OVERHEAD;
6150 ovh = SCTP_MED_V4_OVERHEAD;
6153 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6154 siz = asoc->smallest_mtu - ovh;
6156 siz = (stcb->asoc.sctp_frag_point - ovh);
6158 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6160 /* A data chunk MUST fit in a cluster */
6161 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6164 /* adjust for an AUTH chunk if DATA requires auth */
6165 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6166 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6169 /* make it an even word boundary please */
6176 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6180 * We assume that the user wants PR_SCTP_TTL if the user provides a
6181 * positive lifetime but does not specify any PR_SCTP policy. This
6182 * is a BAD assumption and causes problems at least with the
6183 * U-Vancovers MPI folks. I will change this to be no policy means
6186 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6187 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6192 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6193 case CHUNK_FLAGS_PR_SCTP_BUF:
6195 * Time to live is a priority stored in tv_sec when doing
6196 * the buffer drop thing.
6198 sp->ts.tv_sec = sp->timetolive;
6201 case CHUNK_FLAGS_PR_SCTP_TTL:
6205 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6206 tv.tv_sec = sp->timetolive / 1000;
6207 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6209 * TODO sctp_constants.h needs alternative time
6210 * macros when _KERNEL is undefined.
6212 timevaladd(&sp->ts, &tv);
6215 case CHUNK_FLAGS_PR_SCTP_RTX:
6217 * Time to live is a the number or retransmissions stored in
6220 sp->ts.tv_sec = sp->timetolive;
6224 SCTPDBG(SCTP_DEBUG_USRREQ1,
6225 "Unknown PR_SCTP policy %u.\n",
6226 PR_SCTP_POLICY(sp->sinfo_flags));
6232 sctp_msg_append(struct sctp_tcb *stcb,
6233 struct sctp_nets *net,
6235 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6239 struct sctp_stream_queue_pending *sp = NULL;
6240 struct sctp_stream_out *strm;
6243 * Given an mbuf chain, put it into the association send queue and
6244 * place it on the wheel
6246 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6247 /* Invalid stream number */
6248 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6252 if ((stcb->asoc.stream_locked) &&
6253 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6254 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6258 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6259 /* Now can we send this? */
6260 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6261 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6262 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6263 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6264 /* got data while shutting down */
6265 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6269 sctp_alloc_a_strmoq(stcb, sp);
6271 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6275 sp->sinfo_flags = srcv->sinfo_flags;
6276 sp->timetolive = srcv->sinfo_timetolive;
6277 sp->ppid = srcv->sinfo_ppid;
6278 sp->context = srcv->sinfo_context;
6280 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6282 atomic_add_int(&sp->net->ref_count, 1);
6286 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6287 sp->stream = srcv->sinfo_stream;
6288 sp->msg_is_complete = 1;
6289 sp->sender_all_done = 1;
6292 sp->tail_mbuf = NULL;
6293 sctp_set_prsctp_policy(sp);
6295 * We could in theory (for sendall) sifa the length in, but we would
6296 * still have to hunt through the chain since we need to setup the
6300 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6301 if (SCTP_BUF_NEXT(at) == NULL)
6303 sp->length += SCTP_BUF_LEN(at);
6305 if (srcv->sinfo_keynumber_valid) {
6306 sp->auth_keyid = srcv->sinfo_keynumber;
6308 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6310 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6311 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6312 sp->holds_key_ref = 1;
6314 if (hold_stcb_lock == 0) {
6315 SCTP_TCB_SEND_LOCK(stcb);
6317 sctp_snd_sb_alloc(stcb, sp->length);
6318 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6319 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6320 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
6321 sp->strseq = strm->next_sequence_sent;
6322 strm->next_sequence_sent++;
6324 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6326 if (hold_stcb_lock == 0) {
6327 SCTP_TCB_SEND_UNLOCK(stcb);
6337 static struct mbuf *
6338 sctp_copy_mbufchain(struct mbuf *clonechain,
6339 struct mbuf *outchain,
6340 struct mbuf **endofchain,
6343 uint8_t copy_by_ref)
6346 struct mbuf *appendchain;
6350 if (endofchain == NULL) {
6354 sctp_m_freem(outchain);
6357 if (can_take_mbuf) {
6358 appendchain = clonechain;
6361 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6363 /* Its not in a cluster */
6364 if (*endofchain == NULL) {
6365 /* lets get a mbuf cluster */
6366 if (outchain == NULL) {
6367 /* This is the general case */
6369 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6370 if (outchain == NULL) {
6373 SCTP_BUF_LEN(outchain) = 0;
6374 *endofchain = outchain;
6375 /* get the prepend space */
6376 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6379 * We really should not get a NULL
6385 if (SCTP_BUF_NEXT(m) == NULL) {
6389 m = SCTP_BUF_NEXT(m);
6392 if (*endofchain == NULL) {
6394 * huh, TSNH XXX maybe we
6397 sctp_m_freem(outchain);
6401 /* get the new end of length */
6402 len = M_TRAILINGSPACE(*endofchain);
6404 /* how much is left at the end? */
6405 len = M_TRAILINGSPACE(*endofchain);
6407 /* Find the end of the data, for appending */
6408 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6410 /* Now lets copy it out */
6411 if (len >= sizeofcpy) {
6412 /* It all fits, copy it in */
6413 m_copydata(clonechain, 0, sizeofcpy, cp);
6414 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6416 /* fill up the end of the chain */
6418 m_copydata(clonechain, 0, len, cp);
6419 SCTP_BUF_LEN((*endofchain)) += len;
6420 /* now we need another one */
6423 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6428 SCTP_BUF_NEXT((*endofchain)) = m;
6430 cp = mtod((*endofchain), caddr_t);
6431 m_copydata(clonechain, len, sizeofcpy, cp);
6432 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6436 /* copy the old fashion way */
6437 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6438 #ifdef SCTP_MBUF_LOGGING
6439 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6444 if (SCTP_BUF_IS_EXTENDED(mat)) {
6445 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6447 mat = SCTP_BUF_NEXT(mat);
6453 if (appendchain == NULL) {
6456 sctp_m_freem(outchain);
6460 /* tack on to the end */
6461 if (*endofchain != NULL) {
6462 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6466 if (SCTP_BUF_NEXT(m) == NULL) {
6467 SCTP_BUF_NEXT(m) = appendchain;
6470 m = SCTP_BUF_NEXT(m);
6474 * save off the end and update the end-chain postion
6478 if (SCTP_BUF_NEXT(m) == NULL) {
6482 m = SCTP_BUF_NEXT(m);
6486 /* save off the end and update the end-chain postion */
6489 if (SCTP_BUF_NEXT(m) == NULL) {
6493 m = SCTP_BUF_NEXT(m);
6495 return (appendchain);
6500 sctp_med_chunk_output(struct sctp_inpcb *inp,
6501 struct sctp_tcb *stcb,
6502 struct sctp_association *asoc,
6505 int control_only, int from_where,
6506 struct timeval *now, int *now_filled, int frag_point, int so_locked
6507 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6513 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6514 uint32_t val SCTP_UNUSED)
6516 struct sctp_copy_all *ca;
6519 int added_control = 0;
6520 int un_sent, do_chunk_output = 1;
6521 struct sctp_association *asoc;
6522 struct sctp_nets *net;
6524 ca = (struct sctp_copy_all *)ptr;
6525 if (ca->m == NULL) {
6528 if (ca->inp != inp) {
6532 if ((ca->m) && ca->sndlen) {
6533 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6535 /* can't copy so we are done */
6539 #ifdef SCTP_MBUF_LOGGING
6540 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6545 if (SCTP_BUF_IS_EXTENDED(mat)) {
6546 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6548 mat = SCTP_BUF_NEXT(mat);
6555 SCTP_TCB_LOCK_ASSERT(stcb);
6556 if (stcb->asoc.alternate) {
6557 net = stcb->asoc.alternate;
6559 net = stcb->asoc.primary_destination;
6561 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6562 /* Abort this assoc with m as the user defined reason */
6564 struct sctp_paramhdr *ph;
6566 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6568 ph = mtod(m, struct sctp_paramhdr *);
6569 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6570 ph->param_length = htons(ca->sndlen);
6573 * We add one here to keep the assoc from
6574 * dis-appearing on us.
6576 atomic_add_int(&stcb->asoc.refcnt, 1);
6577 sctp_abort_an_association(inp, stcb,
6578 SCTP_RESPONSE_TO_USER_REQ,
6579 m, SCTP_SO_NOT_LOCKED);
6581 * sctp_abort_an_association calls sctp_free_asoc()
6582 * free association will NOT free it since we
6583 * incremented the refcnt .. we do this to prevent
6584 * it being freed and things getting tricky since we
6585 * could end up (from free_asoc) calling inpcb_free
6586 * which would get a recursive lock call to the
6587 * iterator lock.. But as a consequence of that the
6588 * stcb will return to us un-locked.. since
6589 * free_asoc returns with either no TCB or the TCB
6590 * unlocked, we must relock.. to unlock in the
6591 * iterator timer :-0
6593 SCTP_TCB_LOCK(stcb);
6594 atomic_add_int(&stcb->asoc.refcnt, -1);
6595 goto no_chunk_output;
6599 ret = sctp_msg_append(stcb, net, m,
6603 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6604 /* shutdown this assoc */
6607 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
6609 if (TAILQ_EMPTY(&asoc->send_queue) &&
6610 TAILQ_EMPTY(&asoc->sent_queue) &&
6612 if (asoc->locked_on_sending) {
6616 * there is nothing queued to send, so I'm
6619 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6620 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6621 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6623 * only send SHUTDOWN the first time
6626 sctp_send_shutdown(stcb, net);
6627 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6628 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6630 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6631 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6632 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6634 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6635 asoc->primary_destination);
6637 do_chunk_output = 0;
6641 * we still got (or just got) data to send,
6642 * so set SHUTDOWN_PENDING
6645 * XXX sockets draft says that SCTP_EOF
6646 * should be sent with no data. currently,
6647 * we will allow user data to be sent first
6648 * and move to SHUTDOWN-PENDING
6650 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6651 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6652 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6653 if (asoc->locked_on_sending) {
6655 * Locked to send out the
6658 struct sctp_stream_queue_pending *sp;
6660 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6662 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6663 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6666 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6667 if (TAILQ_EMPTY(&asoc->send_queue) &&
6668 TAILQ_EMPTY(&asoc->sent_queue) &&
6669 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6671 atomic_add_int(&stcb->asoc.refcnt, 1);
6672 sctp_abort_an_association(stcb->sctp_ep, stcb,
6673 SCTP_RESPONSE_TO_USER_REQ,
6674 NULL, SCTP_SO_NOT_LOCKED);
6675 atomic_add_int(&stcb->asoc.refcnt, -1);
6676 goto no_chunk_output;
6678 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6679 asoc->primary_destination);
6685 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6686 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6688 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6689 (stcb->asoc.total_flight > 0) &&
6690 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6692 do_chunk_output = 0;
6694 if (do_chunk_output)
6695 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6696 else if (added_control) {
6697 int num_out = 0, reason = 0, now_filled = 0;
6701 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6702 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6703 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6714 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6716 struct sctp_copy_all *ca;
6718 ca = (struct sctp_copy_all *)ptr;
6720 * Do a notify here? Kacheong suggests that the notify be done at
6721 * the send time.. so you would push up a notification if any send
6722 * failed. Don't know if this is feasable since the only failures we
6723 * have is "memory" related and if you cannot get an mbuf to send
6724 * the data you surely can't get an mbuf to send up to notify the
6725 * user you can't send the data :->
6728 /* now free everything */
6729 sctp_m_freem(ca->m);
6730 SCTP_FREE(ca, SCTP_M_COPYAL);
6734 #define MC_ALIGN(m, len) do { \
6735 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6740 static struct mbuf *
6741 sctp_copy_out_all(struct uio *uio, int len)
6743 struct mbuf *ret, *at;
6744 int left, willcpy, cancpy, error;
6746 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6752 SCTP_BUF_LEN(ret) = 0;
6753 /* save space for the data chunk header */
6754 cancpy = M_TRAILINGSPACE(ret);
6755 willcpy = min(cancpy, left);
6758 /* Align data to the end */
6759 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6765 SCTP_BUF_LEN(at) = willcpy;
6766 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6769 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6770 if (SCTP_BUF_NEXT(at) == NULL) {
6773 at = SCTP_BUF_NEXT(at);
6774 SCTP_BUF_LEN(at) = 0;
6775 cancpy = M_TRAILINGSPACE(at);
6776 willcpy = min(cancpy, left);
6783 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6784 struct sctp_sndrcvinfo *srcv)
6787 struct sctp_copy_all *ca;
6789 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6793 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6796 memset(ca, 0, sizeof(struct sctp_copy_all));
6800 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6803 * take off the sendall flag, it would be bad if we failed to do
6806 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6807 /* get length and mbuf chain */
6809 ca->sndlen = uio->uio_resid;
6810 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6811 if (ca->m == NULL) {
6812 SCTP_FREE(ca, SCTP_M_COPYAL);
6813 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6817 /* Gather the length of the send */
6823 ca->sndlen += SCTP_BUF_LEN(m);
6824 m = SCTP_BUF_NEXT(m);
6828 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6829 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6830 SCTP_ASOC_ANY_STATE,
6832 sctp_sendall_completes, inp, 1);
6834 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6835 SCTP_FREE(ca, SCTP_M_COPYAL);
6836 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6844 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6846 struct sctp_tmit_chunk *chk, *nchk;
6848 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6849 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6850 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6852 sctp_m_freem(chk->data);
6855 asoc->ctrl_queue_cnt--;
6856 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6862 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6864 struct sctp_association *asoc;
6865 struct sctp_tmit_chunk *chk, *nchk;
6866 struct sctp_asconf_chunk *acp;
6869 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6870 /* find SCTP_ASCONF chunk in queue */
6871 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6873 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6874 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6879 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6881 sctp_m_freem(chk->data);
6884 asoc->ctrl_queue_cnt--;
6885 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6892 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6893 struct sctp_association *asoc,
6894 struct sctp_tmit_chunk **data_list,
6896 struct sctp_nets *net)
6899 struct sctp_tmit_chunk *tp1;
6901 for (i = 0; i < bundle_at; i++) {
6902 /* off of the send queue */
6903 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6904 asoc->send_queue_cnt--;
6907 * Any chunk NOT 0 you zap the time chunk 0 gets
6908 * zapped or set based on if a RTO measurment is
6911 data_list[i]->do_rtt = 0;
6914 data_list[i]->sent_rcv_time = net->last_sent_time;
6915 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6916 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6917 if (data_list[i]->whoTo == NULL) {
6918 data_list[i]->whoTo = net;
6919 atomic_add_int(&net->ref_count, 1);
6921 /* on to the sent queue */
6922 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6923 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6924 struct sctp_tmit_chunk *tpp;
6926 /* need to move back */
6928 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6930 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6934 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) {
6937 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6939 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6944 /* This does not lower until the cum-ack passes it */
6945 asoc->sent_queue_cnt++;
6946 if ((asoc->peers_rwnd <= 0) &&
6947 (asoc->total_flight == 0) &&
6949 /* Mark the chunk as being a window probe */
6950 SCTP_STAT_INCR(sctps_windowprobed);
6952 #ifdef SCTP_AUDITING_ENABLED
6953 sctp_audit_log(0xC2, 3);
6955 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6956 data_list[i]->snd_count = 1;
6957 data_list[i]->rec.data.chunk_was_revoked = 0;
6958 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6959 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6960 data_list[i]->whoTo->flight_size,
6961 data_list[i]->book_size,
6962 (uintptr_t) data_list[i]->whoTo,
6963 data_list[i]->rec.data.TSN_seq);
6965 sctp_flight_size_increase(data_list[i]);
6966 sctp_total_flight_increase(stcb, data_list[i]);
6967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6968 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6969 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6971 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6972 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6973 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6974 /* SWS sender side engages */
6975 asoc->peers_rwnd = 0;
6978 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
6979 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
6984 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
6985 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6990 struct sctp_tmit_chunk *chk, *nchk;
6992 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6993 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6994 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6995 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6996 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6997 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6998 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6999 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7000 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7001 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7002 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7003 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7004 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7005 /* Stray chunks must be cleaned up */
7007 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7009 sctp_m_freem(chk->data);
7012 asoc->ctrl_queue_cnt--;
7013 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
7014 asoc->fwd_tsn_cnt--;
7015 sctp_free_a_chunk(stcb, chk, so_locked);
7016 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7017 /* special handling, we must look into the param */
7018 if (chk != asoc->str_reset) {
7019 goto clean_up_anyway;
7027 sctp_can_we_split_this(struct sctp_tcb *stcb,
7029 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
7032 * Make a decision on if I should split a msg into multiple parts.
7033 * This is only asked of incomplete messages.
7037 * If we are doing EEOR we need to always send it if its the
7038 * entire thing, since it might be all the guy is putting in
7041 if (goal_mtu >= length) {
7043 * If we have data outstanding,
7044 * we get another chance when the sack
7045 * arrives to transmit - wait for more data
7047 if (stcb->asoc.total_flight == 0) {
7049 * If nothing is in flight, we zero the
7057 /* You can fill the rest */
7062 * For those strange folk that make the send buffer
7063 * smaller than our fragmentation point, we can't
7064 * get a full msg in so we have to allow splitting.
7066 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7069 if ((length <= goal_mtu) ||
7070 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7071 /* Sub-optimial residual don't split in non-eeor mode. */
7075 * If we reach here length is larger than the goal_mtu. Do we wish
7076 * to split it for the sake of packet putting together?
7078 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7079 /* Its ok to split it */
7080 return (min(goal_mtu, frag_point));
7082 /* Nope, can't split */
7088 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7089 struct sctp_stream_out *strq,
7091 uint32_t frag_point,
7097 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7102 /* Move from the stream to the send_queue keeping track of the total */
7103 struct sctp_association *asoc;
7104 struct sctp_stream_queue_pending *sp;
7105 struct sctp_tmit_chunk *chk;
7106 struct sctp_data_chunk *dchkh;
7107 uint32_t to_move, length;
7108 uint8_t rcv_flags = 0;
7110 uint8_t send_lock_up = 0;
7112 SCTP_TCB_LOCK_ASSERT(stcb);
7115 /* sa_ignore FREED_MEMORY */
7116 sp = TAILQ_FIRST(&strq->outqueue);
7119 if (send_lock_up == 0) {
7120 SCTP_TCB_SEND_LOCK(stcb);
7123 sp = TAILQ_FIRST(&strq->outqueue);
7127 if (strq->last_msg_incomplete) {
7128 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7130 strq->last_msg_incomplete);
7131 strq->last_msg_incomplete = 0;
7135 SCTP_TCB_SEND_UNLOCK(stcb);
7140 if ((sp->msg_is_complete) && (sp->length == 0)) {
7141 if (sp->sender_all_done) {
7143 * We are doing differed cleanup. Last time through
7144 * when we took all the data the sender_all_done was
7147 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7148 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7149 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7150 sp->sender_all_done,
7152 sp->msg_is_complete,
7156 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7157 SCTP_TCB_SEND_LOCK(stcb);
7160 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7161 TAILQ_REMOVE(&strq->outqueue, sp, next);
7162 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7164 sctp_free_remote_addr(sp->net);
7168 sctp_m_freem(sp->data);
7171 sctp_free_a_strmoq(stcb, sp, so_locked);
7172 /* we can't be locked to it */
7174 stcb->asoc.locked_on_sending = NULL;
7176 SCTP_TCB_SEND_UNLOCK(stcb);
7179 /* back to get the next msg */
7183 * sender just finished this but still holds a
7192 /* is there some to get */
7193 if (sp->length == 0) {
7199 } else if (sp->discard_rest) {
7200 if (send_lock_up == 0) {
7201 SCTP_TCB_SEND_LOCK(stcb);
7204 /* Whack down the size */
7205 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7206 if ((stcb->sctp_socket != NULL) && \
7207 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7208 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7209 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7212 sctp_m_freem(sp->data);
7214 sp->tail_mbuf = NULL;
7224 some_taken = sp->some_taken;
7225 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
7226 sp->msg_is_complete = 1;
7229 length = sp->length;
7230 if (sp->msg_is_complete) {
7231 /* The message is complete */
7232 to_move = min(length, frag_point);
7233 if (to_move == length) {
7234 /* All of it fits in the MTU */
7235 if (sp->some_taken) {
7236 rcv_flags |= SCTP_DATA_LAST_FRAG;
7237 sp->put_last_out = 1;
7239 rcv_flags |= SCTP_DATA_NOT_FRAG;
7240 sp->put_last_out = 1;
7243 /* Not all of it fits, we fragment */
7244 if (sp->some_taken == 0) {
7245 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7250 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
7253 * We use a snapshot of length in case it
7254 * is expanding during the compare.
7259 if (to_move >= llen) {
7261 if (send_lock_up == 0) {
7263 * We are taking all of an incomplete msg
7264 * thus we need a send lock.
7266 SCTP_TCB_SEND_LOCK(stcb);
7268 if (sp->msg_is_complete) {
7270 * the sender finished the
7277 if (sp->some_taken == 0) {
7278 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7282 /* Nothing to take. */
7283 if (sp->some_taken) {
7292 /* If we reach here, we can copy out a chunk */
7293 sctp_alloc_a_chunk(stcb, chk);
7295 /* No chunk memory */
7301 * Setup for unordered if needed by looking at the user sent info
7304 if (sp->sinfo_flags & SCTP_UNORDERED) {
7305 rcv_flags |= SCTP_DATA_UNORDERED;
7307 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
7308 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
7309 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7311 /* clear out the chunk before setting up */
7312 memset(chk, 0, sizeof(*chk));
7313 chk->rec.data.rcv_flags = rcv_flags;
7315 if (to_move >= length) {
7316 /* we think we can steal the whole thing */
7317 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7318 SCTP_TCB_SEND_LOCK(stcb);
7321 if (to_move < sp->length) {
7322 /* bail, it changed */
7325 chk->data = sp->data;
7326 chk->last_mbuf = sp->tail_mbuf;
7327 /* register the stealing */
7328 sp->data = sp->tail_mbuf = NULL;
7333 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
7334 chk->last_mbuf = NULL;
7335 if (chk->data == NULL) {
7336 sp->some_taken = some_taken;
7337 sctp_free_a_chunk(stcb, chk, so_locked);
7342 #ifdef SCTP_MBUF_LOGGING
7343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7348 if (SCTP_BUF_IS_EXTENDED(mat)) {
7349 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
7351 mat = SCTP_BUF_NEXT(mat);
7355 /* Pull off the data */
7356 m_adj(sp->data, to_move);
7357 /* Now lets work our way down and compact it */
7359 while (m && (SCTP_BUF_LEN(m) == 0)) {
7360 sp->data = SCTP_BUF_NEXT(m);
7361 SCTP_BUF_NEXT(m) = NULL;
7362 if (sp->tail_mbuf == m) {
7364 * Freeing tail? TSNH since
7365 * we supposedly were taking less
7366 * than the sp->length.
7369 panic("Huh, freing tail? - TSNH");
7371 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7372 sp->tail_mbuf = sp->data = NULL;
7381 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7382 chk->copy_by_ref = 1;
7384 chk->copy_by_ref = 0;
7387 * get last_mbuf and counts of mb useage This is ugly but hopefully
7388 * its only one mbuf.
7390 if (chk->last_mbuf == NULL) {
7391 chk->last_mbuf = chk->data;
7392 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7393 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7396 if (to_move > length) {
7397 /*- This should not happen either
7398 * since we always lower to_move to the size
7399 * of sp->length if its larger.
7402 panic("Huh, how can to_move be larger?");
7404 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7408 atomic_subtract_int(&sp->length, to_move);
7410 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7411 /* Not enough room for a chunk header, get some */
7414 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7417 * we're in trouble here. _PREPEND below will free
7418 * all the data if there is no leading space, so we
7419 * must put the data back and restore.
7421 if (send_lock_up == 0) {
7422 SCTP_TCB_SEND_LOCK(stcb);
7425 if (chk->data == NULL) {
7426 /* unsteal the data */
7427 sp->data = chk->data;
7428 sp->tail_mbuf = chk->last_mbuf;
7432 /* reassemble the data */
7434 sp->data = chk->data;
7435 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7437 sp->some_taken = some_taken;
7438 atomic_add_int(&sp->length, to_move);
7441 sctp_free_a_chunk(stcb, chk, so_locked);
7445 SCTP_BUF_LEN(m) = 0;
7446 SCTP_BUF_NEXT(m) = chk->data;
7448 M_ALIGN(chk->data, 4);
7451 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7452 if (chk->data == NULL) {
7453 /* HELP, TSNH since we assured it would not above? */
7455 panic("prepend failes HELP?");
7457 SCTP_PRINTF("prepend fails HELP?\n");
7458 sctp_free_a_chunk(stcb, chk, so_locked);
7464 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7465 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7466 chk->book_size_scale = 0;
7467 chk->sent = SCTP_DATAGRAM_UNSENT;
7470 chk->asoc = &stcb->asoc;
7471 chk->pad_inplace = 0;
7472 chk->no_fr_allowed = 0;
7473 chk->rec.data.stream_seq = sp->strseq;
7474 chk->rec.data.stream_number = sp->stream;
7475 chk->rec.data.payloadtype = sp->ppid;
7476 chk->rec.data.context = sp->context;
7477 chk->rec.data.doing_fast_retransmit = 0;
7479 chk->rec.data.timetodrop = sp->ts;
7480 chk->flags = sp->act_flags;
7483 chk->whoTo = sp->net;
7484 atomic_add_int(&chk->whoTo->ref_count, 1);
7488 if (sp->holds_key_ref) {
7489 chk->auth_keyid = sp->auth_keyid;
7490 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7491 chk->holds_key_ref = 1;
7493 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7494 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7495 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7496 (uintptr_t) stcb, sp->length,
7497 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7498 chk->rec.data.TSN_seq);
7500 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7502 * Put the rest of the things in place now. Size was done earlier in
7503 * previous loop prior to padding.
7506 #ifdef SCTP_ASOCLOG_OF_TSNS
7507 SCTP_TCB_LOCK_ASSERT(stcb);
7508 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7509 asoc->tsn_out_at = 0;
7510 asoc->tsn_out_wrapped = 1;
7512 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7513 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7514 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7515 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7516 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7517 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7518 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7519 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7523 dchkh->ch.chunk_type = SCTP_DATA;
7524 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7525 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7526 dchkh->dp.stream_id = htons(strq->stream_no);
7527 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7528 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7529 dchkh->ch.chunk_length = htons(chk->send_size);
7530 /* Now advance the chk->send_size by the actual pad needed. */
7531 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7536 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7537 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7538 chk->pad_inplace = 1;
7540 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7541 /* pad added an mbuf */
7542 chk->last_mbuf = lm;
7544 chk->send_size += pads;
7546 /* We only re-set the policy if it is on */
7547 if (sp->pr_sctp_on) {
7548 sctp_set_prsctp_policy(sp);
7549 asoc->pr_sctp_cnt++;
7550 chk->pr_sctp_on = 1;
7552 chk->pr_sctp_on = 0;
7554 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7555 /* All done pull and kill the message */
7556 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7557 if (sp->put_last_out == 0) {
7558 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7559 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7560 sp->sender_all_done,
7562 sp->msg_is_complete,
7566 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7567 SCTP_TCB_SEND_LOCK(stcb);
7570 TAILQ_REMOVE(&strq->outqueue, sp, next);
7571 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7573 sctp_free_remote_addr(sp->net);
7577 sctp_m_freem(sp->data);
7580 sctp_free_a_strmoq(stcb, sp, so_locked);
7582 /* we can't be locked to it */
7584 stcb->asoc.locked_on_sending = NULL;
7586 /* more to go, we are locked */
7589 asoc->chunks_on_out_queue++;
7590 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7591 asoc->send_queue_cnt++;
7594 SCTP_TCB_SEND_UNLOCK(stcb);
7601 sctp_fill_outqueue(struct sctp_tcb *stcb,
7602 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7603 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7608 struct sctp_association *asoc;
7609 struct sctp_stream_out *strq;
7610 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7613 SCTP_TCB_LOCK_ASSERT(stcb);
7616 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
7617 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7619 /* ?? not sure what else to do */
7620 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7623 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7625 /* Need an allowance for the data chunk header too */
7626 goal_mtu -= sizeof(struct sctp_data_chunk);
7628 /* must make even word boundary */
7629 goal_mtu &= 0xfffffffc;
7630 if (asoc->locked_on_sending) {
7631 /* We are stuck on one stream until the message completes. */
7632 strq = asoc->locked_on_sending;
7635 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7638 while ((goal_mtu > 0) && strq) {
7641 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked,
7642 &giveup, eeor_mode, &bail, so_locked);
7644 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
7647 asoc->locked_on_sending = strq;
7648 if ((moved_how_much == 0) || (giveup) || bail)
7649 /* no more to move for now */
7652 asoc->locked_on_sending = NULL;
7653 if ((giveup) || bail) {
7656 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7661 total_moved += moved_how_much;
7662 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7663 goal_mtu &= 0xfffffffc;
7668 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7670 if (total_moved == 0) {
7671 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7672 (net == stcb->asoc.primary_destination)) {
7673 /* ran dry for primary network net */
7674 SCTP_STAT_INCR(sctps_primary_randry);
7675 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7676 /* ran dry with CMT on */
7677 SCTP_STAT_INCR(sctps_cmt_randry);
7683 sctp_fix_ecn_echo(struct sctp_association *asoc)
7685 struct sctp_tmit_chunk *chk;
7687 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7688 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7689 chk->sent = SCTP_DATAGRAM_UNSENT;
7695 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7697 struct sctp_association *asoc;
7698 struct sctp_tmit_chunk *chk;
7699 struct sctp_stream_queue_pending *sp;
7706 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7707 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7708 if (sp->net == net) {
7709 sctp_free_remote_addr(sp->net);
7714 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7715 if (chk->whoTo == net) {
7716 sctp_free_remote_addr(chk->whoTo);
7723 sctp_med_chunk_output(struct sctp_inpcb *inp,
7724 struct sctp_tcb *stcb,
7725 struct sctp_association *asoc,
7728 int control_only, int from_where,
7729 struct timeval *now, int *now_filled, int frag_point, int so_locked
7730 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7736 * Ok this is the generic chunk service queue. we must do the
7737 * following: - Service the stream queue that is next, moving any
7738 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7739 * LAST to the out queue in one pass) and assigning TSN's - Check to
7740 * see if the cwnd/rwnd allows any output, if so we go ahead and
7741 * fomulate and send the low level chunks. Making sure to combine
7742 * any control in the control chunk queue also.
7744 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7745 struct mbuf *outchain, *endoutchain;
7746 struct sctp_tmit_chunk *chk, *nchk;
7748 /* temp arrays for unlinking */
7749 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7750 int no_fragmentflg, error;
7751 unsigned int max_rwnd_per_dest, max_send_per_dest;
7752 int one_chunk, hbflag, skip_data_for_this_net;
7753 int asconf, cookie, no_out_cnt;
7754 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7755 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7757 uint32_t auth_offset = 0;
7758 struct sctp_auth_chunk *auth = NULL;
7759 uint16_t auth_keyid;
7760 int override_ok = 1;
7761 int skip_fill_up = 0;
7762 int data_auth_reqd = 0;
7765 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7771 auth_keyid = stcb->asoc.authinfo.active_keyid;
7773 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7774 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7775 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7780 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7782 * First lets prime the pump. For each destination, if there is room
7783 * in the flight size, attempt to pull an MTU's worth out of the
7784 * stream queues into the general send_queue
7786 #ifdef SCTP_AUDITING_ENABLED
7787 sctp_audit_log(0xC2, 2);
7789 SCTP_TCB_LOCK_ASSERT(stcb);
7791 if ((control_only) || (asoc->stream_reset_outstanding))
7796 /* Nothing to possible to send? */
7797 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7798 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7799 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7800 TAILQ_EMPTY(&asoc->send_queue) &&
7801 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
7806 if (asoc->peers_rwnd == 0) {
7807 /* No room in peers rwnd */
7809 if (asoc->total_flight > 0) {
7810 /* we are allowed one chunk in flight */
7814 if (stcb->asoc.ecn_echo_cnt_onq) {
7815 /* Record where a sack goes, if any */
7816 if (no_data_chunks &&
7817 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7818 /* Nothing but ECNe to send - we don't do that */
7819 goto nothing_to_send;
7821 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7822 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7823 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7824 sack_goes_to = chk->whoTo;
7829 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7830 if (stcb->sctp_socket)
7831 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7833 max_send_per_dest = 0;
7834 if (no_data_chunks == 0) {
7835 /* How many non-directed chunks are there? */
7836 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7837 if (chk->whoTo == NULL) {
7839 * We already have non-directed chunks on
7840 * the queue, no need to do a fill-up.
7848 if ((no_data_chunks == 0) &&
7849 (skip_fill_up == 0) &&
7850 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7851 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7853 * This for loop we are in takes in each net, if
7854 * its's got space in cwnd and has data sent to it
7855 * (when CMT is off) then it calls
7856 * sctp_fill_outqueue for the net. This gets data on
7857 * the send queue for that network.
7859 * In sctp_fill_outqueue TSN's are assigned and data is
7860 * copied out of the stream buffers. Note mostly
7861 * copy by reference (we hope).
7863 net->window_probe = 0;
7864 if ((net != stcb->asoc.alternate) &&
7865 ((net->dest_state & SCTP_ADDR_PF) ||
7866 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7867 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7869 sctp_log_cwnd(stcb, net, 1,
7870 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7874 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7875 (net->flight_size == 0)) {
7876 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7878 if (net->flight_size >= net->cwnd) {
7879 /* skip this network, no room - can't fill */
7880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7881 sctp_log_cwnd(stcb, net, 3,
7882 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7886 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7887 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7889 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7891 /* memory alloc failure */
7897 /* now service each destination and send out what we can for it */
7898 /* Nothing to send? */
7899 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7900 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7901 TAILQ_EMPTY(&asoc->send_queue)) {
7905 if (asoc->sctp_cmt_on_off > 0) {
7906 /* get the last start point */
7907 start_at = asoc->last_net_cmt_send_started;
7908 if (start_at == NULL) {
7909 /* null so to beginning */
7910 start_at = TAILQ_FIRST(&asoc->nets);
7912 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7913 if (start_at == NULL) {
7914 start_at = TAILQ_FIRST(&asoc->nets);
7917 asoc->last_net_cmt_send_started = start_at;
7919 start_at = TAILQ_FIRST(&asoc->nets);
7921 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7922 if (chk->whoTo == NULL) {
7923 if (asoc->alternate) {
7924 chk->whoTo = asoc->alternate;
7926 chk->whoTo = asoc->primary_destination;
7928 atomic_add_int(&chk->whoTo->ref_count, 1);
7931 old_start_at = NULL;
7932 again_one_more_time:
7933 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7934 /* how much can we send? */
7935 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7936 if (old_start_at && (old_start_at == net)) {
7937 /* through list ocmpletely. */
7941 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7942 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7943 (net->flight_size >= net->cwnd)) {
7945 * Nothing on control or asconf and flight is full,
7946 * we can skip even in the CMT case.
7951 endoutchain = outchain = NULL;
7954 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7955 skip_data_for_this_net = 1;
7957 skip_data_for_this_net = 0;
7959 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7961 * if we have a route and an ifp check to see if we
7962 * have room to send to this guy
7966 ifp = net->ro.ro_rt->rt_ifp;
7967 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7968 SCTP_STAT_INCR(sctps_ifnomemqueued);
7969 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7970 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7975 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7978 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7983 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7993 if (mtu > asoc->peers_rwnd) {
7994 if (asoc->total_flight > 0) {
7995 /* We have a packet in flight somewhere */
7996 r_mtu = asoc->peers_rwnd;
7998 /* We are always allowed to send one MTU out */
8005 /************************/
8006 /* ASCONF transmission */
8007 /************************/
8008 /* Now first lets go through the asconf queue */
8009 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8010 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8013 if (chk->whoTo == NULL) {
8014 if (asoc->alternate == NULL) {
8015 if (asoc->primary_destination != net) {
8019 if (asoc->alternate != net) {
8024 if (chk->whoTo != net) {
8028 if (chk->data == NULL) {
8031 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8032 chk->sent != SCTP_DATAGRAM_RESEND) {
8036 * if no AUTH is yet included and this chunk
8037 * requires it, make sure to account for it. We
8038 * don't apply the size until the AUTH chunk is
8039 * actually added below in case there is no room for
8040 * this chunk. NOTE: we overload the use of "omtu"
8043 if ((auth == NULL) &&
8044 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8045 stcb->asoc.peer_auth_chunks)) {
8046 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8049 /* Here we do NOT factor the r_mtu */
8050 if ((chk->send_size < (int)(mtu - omtu)) ||
8051 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8053 * We probably should glom the mbuf chain
8054 * from the chk->data for control but the
8055 * problem is it becomes yet one more level
8056 * of tracking to do if for some reason
8057 * output fails. Then I have got to
8058 * reconstruct the merged control chain.. el
8059 * yucko.. for now we take the easy way and
8063 * Add an AUTH chunk, if chunk requires it
8064 * save the offset into the chain for AUTH
8066 if ((auth == NULL) &&
8067 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8068 stcb->asoc.peer_auth_chunks))) {
8069 outchain = sctp_add_auth_chunk(outchain,
8074 chk->rec.chunk_id.id);
8075 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8077 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8078 (int)chk->rec.chunk_id.can_take_data,
8079 chk->send_size, chk->copy_by_ref);
8080 if (outchain == NULL) {
8082 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8085 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8086 /* update our MTU size */
8087 if (mtu > (chk->send_size + omtu))
8088 mtu -= (chk->send_size + omtu);
8091 to_out += (chk->send_size + omtu);
8092 /* Do clear IP_DF ? */
8093 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8096 if (chk->rec.chunk_id.can_take_data)
8099 * set hb flag since we can use these for
8105 * should sysctl this: don't bundle data
8106 * with ASCONF since it requires AUTH
8109 chk->sent = SCTP_DATAGRAM_SENT;
8110 if (chk->whoTo == NULL) {
8112 atomic_add_int(&net->ref_count, 1);
8117 * Ok we are out of room but we can
8118 * output without effecting the
8119 * flight size since this little guy
8120 * is a control only packet.
8122 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8124 * do NOT clear the asconf flag as
8125 * it is used to do appropriate
8126 * source address selection.
8128 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8129 (struct sockaddr *)&net->ro._l_addr,
8130 outchain, auth_offset, auth,
8131 stcb->asoc.authinfo.active_keyid,
8132 no_fragmentflg, 0, asconf,
8133 inp->sctp_lport, stcb->rport,
8134 htonl(stcb->asoc.peer_vtag),
8135 net->port, so_locked, NULL, NULL))) {
8136 if (error == ENOBUFS) {
8137 asoc->ifp_had_enobuf = 1;
8138 SCTP_STAT_INCR(sctps_lowlevelerr);
8140 if (from_where == 0) {
8141 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8143 if (*now_filled == 0) {
8144 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8146 *now = net->last_sent_time;
8148 net->last_sent_time = *now;
8151 /* error, could not output */
8152 if (error == EHOSTUNREACH) {
8158 sctp_move_chunks_from_net(stcb, net);
8163 asoc->ifp_had_enobuf = 0;
8164 if (*now_filled == 0) {
8165 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8167 *now = net->last_sent_time;
8169 net->last_sent_time = *now;
8173 * increase the number we sent, if a
8174 * cookie is sent we don't tell them
8177 outchain = endoutchain = NULL;
8181 *num_out += ctl_cnt;
8182 /* recalc a clean slate and setup */
8183 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8184 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8186 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
8193 /************************/
8194 /* Control transmission */
8195 /************************/
8196 /* Now first lets go through the control queue */
8197 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8198 if ((sack_goes_to) &&
8199 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8200 (chk->whoTo != sack_goes_to)) {
8202 * if we have a sack in queue, and we are
8203 * looking at an ecn echo that is NOT queued
8204 * to where the sack is going..
8206 if (chk->whoTo == net) {
8208 * Don't transmit it to where its
8209 * going (current net)
8212 } else if (sack_goes_to == net) {
8214 * But do transmit it to this
8217 goto skip_net_check;
8220 if (chk->whoTo == NULL) {
8221 if (asoc->alternate == NULL) {
8222 if (asoc->primary_destination != net) {
8226 if (asoc->alternate != net) {
8231 if (chk->whoTo != net) {
8236 if (chk->data == NULL) {
8239 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8241 * It must be unsent. Cookies and ASCONF's
8242 * hang around but there timers will force
8243 * when marked for resend.
8248 * if no AUTH is yet included and this chunk
8249 * requires it, make sure to account for it. We
8250 * don't apply the size until the AUTH chunk is
8251 * actually added below in case there is no room for
8252 * this chunk. NOTE: we overload the use of "omtu"
8255 if ((auth == NULL) &&
8256 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8257 stcb->asoc.peer_auth_chunks)) {
8258 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8261 /* Here we do NOT factor the r_mtu */
8262 if ((chk->send_size <= (int)(mtu - omtu)) ||
8263 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8265 * We probably should glom the mbuf chain
8266 * from the chk->data for control but the
8267 * problem is it becomes yet one more level
8268 * of tracking to do if for some reason
8269 * output fails. Then I have got to
8270 * reconstruct the merged control chain.. el
8271 * yucko.. for now we take the easy way and
8275 * Add an AUTH chunk, if chunk requires it
8276 * save the offset into the chain for AUTH
8278 if ((auth == NULL) &&
8279 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8280 stcb->asoc.peer_auth_chunks))) {
8281 outchain = sctp_add_auth_chunk(outchain,
8286 chk->rec.chunk_id.id);
8287 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8289 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8290 (int)chk->rec.chunk_id.can_take_data,
8291 chk->send_size, chk->copy_by_ref);
8292 if (outchain == NULL) {
8294 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8297 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8298 /* update our MTU size */
8299 if (mtu > (chk->send_size + omtu))
8300 mtu -= (chk->send_size + omtu);
8303 to_out += (chk->send_size + omtu);
8304 /* Do clear IP_DF ? */
8305 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8308 if (chk->rec.chunk_id.can_take_data)
8310 /* Mark things to be removed, if needed */
8311 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8312 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8313 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8314 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8315 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8316 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8317 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8318 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8319 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8320 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8321 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8322 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8325 /* remove these chunks at the end */
8326 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8327 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8328 /* turn off the timer */
8329 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8330 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8331 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8337 * Other chunks, since they have
8338 * timers running (i.e. COOKIE) we
8339 * just "trust" that it gets sent or
8343 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8346 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8348 * Increment ecne send count
8349 * here this means we may be
8350 * over-zealous in our
8351 * counting if the send
8352 * fails, but its the best
8353 * place to do it (we used
8354 * to do it in the queue of
8355 * the chunk, but that did
8356 * not tell how many times
8359 SCTP_STAT_INCR(sctps_sendecne);
8361 chk->sent = SCTP_DATAGRAM_SENT;
8362 if (chk->whoTo == NULL) {
8364 atomic_add_int(&net->ref_count, 1);
8370 * Ok we are out of room but we can
8371 * output without effecting the
8372 * flight size since this little guy
8373 * is a control only packet.
8376 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8378 * do NOT clear the asconf
8379 * flag as it is used to do
8380 * appropriate source
8381 * address selection.
8385 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8388 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8389 (struct sockaddr *)&net->ro._l_addr,
8392 stcb->asoc.authinfo.active_keyid,
8393 no_fragmentflg, 0, asconf,
8394 inp->sctp_lport, stcb->rport,
8395 htonl(stcb->asoc.peer_vtag),
8396 net->port, so_locked, NULL, NULL))) {
8397 if (error == ENOBUFS) {
8398 asoc->ifp_had_enobuf = 1;
8399 SCTP_STAT_INCR(sctps_lowlevelerr);
8401 if (from_where == 0) {
8402 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8404 /* error, could not output */
8406 if (*now_filled == 0) {
8407 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8409 *now = net->last_sent_time;
8411 net->last_sent_time = *now;
8415 if (error == EHOSTUNREACH) {
8421 sctp_move_chunks_from_net(stcb, net);
8426 asoc->ifp_had_enobuf = 0;
8427 /* Only HB or ASCONF advances time */
8429 if (*now_filled == 0) {
8430 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8432 *now = net->last_sent_time;
8434 net->last_sent_time = *now;
8439 * increase the number we sent, if a
8440 * cookie is sent we don't tell them
8443 outchain = endoutchain = NULL;
8447 *num_out += ctl_cnt;
8448 /* recalc a clean slate and setup */
8449 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8450 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8452 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
8459 /* JRI: if dest is in PF state, do not send data to it */
8460 if ((asoc->sctp_cmt_on_off > 0) &&
8461 (net != stcb->asoc.alternate) &&
8462 (net->dest_state & SCTP_ADDR_PF)) {
8465 if (net->flight_size >= net->cwnd) {
8468 if ((asoc->sctp_cmt_on_off > 0) &&
8469 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8470 (net->flight_size > max_rwnd_per_dest)) {
8474 * We need a specific accounting for the usage of the send
8475 * buffer. We also need to check the number of messages per
8476 * net. For now, this is better than nothing and it disabled
8479 if ((asoc->sctp_cmt_on_off > 0) &&
8480 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8481 (max_send_per_dest > 0) &&
8482 (net->flight_size > max_send_per_dest)) {
8485 /*********************/
8486 /* Data transmission */
8487 /*********************/
8489 * if AUTH for DATA is required and no AUTH has been added
8490 * yet, account for this in the mtu now... if no data can be
8491 * bundled, this adjustment won't matter anyways since the
8492 * packet will be going out...
8494 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8495 stcb->asoc.peer_auth_chunks);
8496 if (data_auth_reqd && (auth == NULL)) {
8497 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8499 /* now lets add any data within the MTU constraints */
8500 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8502 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8503 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8509 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8510 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8520 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8521 (skip_data_for_this_net == 0)) ||
8523 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8524 if (no_data_chunks) {
8525 /* let only control go out */
8529 if (net->flight_size >= net->cwnd) {
8530 /* skip this net, no room for data */
8534 if ((chk->whoTo != NULL) &&
8535 (chk->whoTo != net)) {
8536 /* Don't send the chunk on this net */
8539 if (asoc->sctp_cmt_on_off == 0) {
8540 if ((asoc->alternate) &&
8541 (asoc->alternate != net) &&
8542 (chk->whoTo == NULL)) {
8544 } else if ((net != asoc->primary_destination) &&
8545 (asoc->alternate == NULL) &&
8546 (chk->whoTo == NULL)) {
8550 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8552 * strange, we have a chunk that is
8553 * to big for its destination and
8554 * yet no fragment ok flag.
8555 * Something went wrong when the
8556 * PMTU changed...we did not mark
8557 * this chunk for some reason?? I
8558 * will fix it here by letting IP
8559 * fragment it for now and printing
8560 * a warning. This really should not
8563 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8564 chk->send_size, mtu);
8565 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8567 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8568 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8569 struct sctp_data_chunk *dchkh;
8571 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8572 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8574 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8575 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8576 /* ok we will add this one */
8579 * Add an AUTH chunk, if chunk
8580 * requires it, save the offset into
8581 * the chain for AUTH
8583 if (data_auth_reqd) {
8585 outchain = sctp_add_auth_chunk(outchain,
8591 auth_keyid = chk->auth_keyid;
8593 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8594 } else if (override_ok) {
8599 auth_keyid = chk->auth_keyid;
8601 } else if (auth_keyid != chk->auth_keyid) {
8609 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8610 chk->send_size, chk->copy_by_ref);
8611 if (outchain == NULL) {
8612 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8613 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8614 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8617 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8620 /* upate our MTU size */
8621 /* Do clear IP_DF ? */
8622 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8625 /* unsigned subtraction of mtu */
8626 if (mtu > chk->send_size)
8627 mtu -= chk->send_size;
8630 /* unsigned subtraction of r_mtu */
8631 if (r_mtu > chk->send_size)
8632 r_mtu -= chk->send_size;
8636 to_out += chk->send_size;
8637 if ((to_out > mx_mtu) && no_fragmentflg) {
8639 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8641 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8645 chk->window_probe = 0;
8646 data_list[bundle_at++] = chk;
8647 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8650 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8651 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8652 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8654 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8656 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8657 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8667 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8669 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8670 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8671 data_list[0]->window_probe = 1;
8672 net->window_probe = 1;
8678 * Must be sent in order of the
8679 * TSN's (on a network)
8683 } /* for (chunk gather loop for this net) */
8684 } /* if asoc.state OPEN */
8686 /* Is there something to send for this destination? */
8688 /* We may need to start a control timer or two */
8690 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8693 * do NOT clear the asconf flag as it is
8694 * used to do appropriate source address
8699 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8702 /* must start a send timer if data is being sent */
8703 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8705 * no timer running on this destination
8708 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8710 /* Now send it, if there is anything to send :> */
8711 if ((error = sctp_lowlevel_chunk_output(inp,
8714 (struct sockaddr *)&net->ro._l_addr,
8722 inp->sctp_lport, stcb->rport,
8723 htonl(stcb->asoc.peer_vtag),
8724 net->port, so_locked, NULL, NULL))) {
8725 /* error, we could not output */
8726 if (error == ENOBUFS) {
8727 SCTP_STAT_INCR(sctps_lowlevelerr);
8728 asoc->ifp_had_enobuf = 1;
8730 if (from_where == 0) {
8731 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8733 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8735 if (*now_filled == 0) {
8736 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8738 *now = net->last_sent_time;
8740 net->last_sent_time = *now;
8744 if (error == EHOSTUNREACH) {
8746 * Destination went unreachable
8749 sctp_move_chunks_from_net(stcb, net);
8753 * I add this line to be paranoid. As far as
8754 * I can tell the continue, takes us back to
8755 * the top of the for, but just to make sure
8756 * I will reset these again here.
8758 ctl_cnt = bundle_at = 0;
8759 continue; /* This takes us back to the
8760 * for() for the nets. */
8762 asoc->ifp_had_enobuf = 0;
8767 if (bundle_at || hbflag) {
8768 /* For data/asconf and hb set time */
8769 if (*now_filled == 0) {
8770 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8772 *now = net->last_sent_time;
8774 net->last_sent_time = *now;
8778 *num_out += (ctl_cnt + bundle_at);
8781 /* setup for a RTO measurement */
8782 tsns_sent = data_list[0]->rec.data.TSN_seq;
8783 /* fill time if not already filled */
8784 if (*now_filled == 0) {
8785 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8787 *now = asoc->time_last_sent;
8789 asoc->time_last_sent = *now;
8791 if (net->rto_needed) {
8792 data_list[0]->do_rtt = 1;
8793 net->rto_needed = 0;
8795 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8796 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8802 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8803 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8806 if (old_start_at == NULL) {
8807 old_start_at = start_at;
8808 start_at = TAILQ_FIRST(&asoc->nets);
8810 goto again_one_more_time;
8813 * At the end there should be no NON timed chunks hanging on this
8816 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8817 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8819 if ((*num_out == 0) && (*reason_code == 0)) {
8824 sctp_clean_up_ctl(stcb, asoc, so_locked);
8829 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8832 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8833 * the control chunk queue.
8835 struct sctp_chunkhdr *hdr;
8836 struct sctp_tmit_chunk *chk;
8839 SCTP_TCB_LOCK_ASSERT(stcb);
8840 sctp_alloc_a_chunk(stcb, chk);
8843 sctp_m_freem(op_err);
8846 chk->copy_by_ref = 0;
8847 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8848 if (op_err == NULL) {
8849 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
8854 while (mat != NULL) {
8855 chk->send_size += SCTP_BUF_LEN(mat);
8856 mat = SCTP_BUF_NEXT(mat);
8858 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8859 chk->rec.chunk_id.can_take_data = 1;
8860 chk->sent = SCTP_DATAGRAM_UNSENT;
8863 chk->asoc = &stcb->asoc;
8866 hdr = mtod(op_err, struct sctp_chunkhdr *);
8867 hdr->chunk_type = SCTP_OPERATION_ERROR;
8868 hdr->chunk_flags = 0;
8869 hdr->chunk_length = htons(chk->send_size);
8870 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8873 chk->asoc->ctrl_queue_cnt++;
8877 sctp_send_cookie_echo(struct mbuf *m,
8879 struct sctp_tcb *stcb,
8880 struct sctp_nets *net)
8883 * pull out the cookie and put it at the front of the control chunk
8887 struct mbuf *cookie;
8888 struct sctp_paramhdr parm, *phdr;
8889 struct sctp_chunkhdr *hdr;
8890 struct sctp_tmit_chunk *chk;
8891 uint16_t ptype, plen;
8893 /* First find the cookie in the param area */
8895 at = offset + sizeof(struct sctp_init_chunk);
8897 SCTP_TCB_LOCK_ASSERT(stcb);
8899 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8903 ptype = ntohs(phdr->param_type);
8904 plen = ntohs(phdr->param_length);
8905 if (ptype == SCTP_STATE_COOKIE) {
8908 /* found the cookie */
8909 if ((pad = (plen % 4))) {
8912 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8913 if (cookie == NULL) {
8917 #ifdef SCTP_MBUF_LOGGING
8918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8923 if (SCTP_BUF_IS_EXTENDED(mat)) {
8924 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8926 mat = SCTP_BUF_NEXT(mat);
8932 at += SCTP_SIZE32(plen);
8934 if (cookie == NULL) {
8935 /* Did not find the cookie */
8938 /* ok, we got the cookie lets change it into a cookie echo chunk */
8940 /* first the change from param to cookie */
8941 hdr = mtod(cookie, struct sctp_chunkhdr *);
8942 hdr->chunk_type = SCTP_COOKIE_ECHO;
8943 hdr->chunk_flags = 0;
8944 /* get the chunk stuff now and place it in the FRONT of the queue */
8945 sctp_alloc_a_chunk(stcb, chk);
8948 sctp_m_freem(cookie);
8951 chk->copy_by_ref = 0;
8952 chk->send_size = plen;
8953 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8954 chk->rec.chunk_id.can_take_data = 0;
8955 chk->sent = SCTP_DATAGRAM_UNSENT;
8957 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8958 chk->asoc = &stcb->asoc;
8961 atomic_add_int(&chk->whoTo->ref_count, 1);
8962 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8963 chk->asoc->ctrl_queue_cnt++;
8968 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8972 struct sctp_nets *net)
8975 * take a HB request and make it into a HB ack and send it.
8977 struct mbuf *outchain;
8978 struct sctp_chunkhdr *chdr;
8979 struct sctp_tmit_chunk *chk;
8983 /* must have a net pointer */
8986 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8987 if (outchain == NULL) {
8988 /* gak out of memory */
8991 #ifdef SCTP_MBUF_LOGGING
8992 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8997 if (SCTP_BUF_IS_EXTENDED(mat)) {
8998 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9000 mat = SCTP_BUF_NEXT(mat);
9004 chdr = mtod(outchain, struct sctp_chunkhdr *);
9005 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9006 chdr->chunk_flags = 0;
9007 if (chk_length % 4) {
9009 uint32_t cpthis = 0;
9012 padlen = 4 - (chk_length % 4);
9013 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9015 sctp_alloc_a_chunk(stcb, chk);
9018 sctp_m_freem(outchain);
9021 chk->copy_by_ref = 0;
9022 chk->send_size = chk_length;
9023 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9024 chk->rec.chunk_id.can_take_data = 1;
9025 chk->sent = SCTP_DATAGRAM_UNSENT;
9028 chk->asoc = &stcb->asoc;
9029 chk->data = outchain;
9031 atomic_add_int(&chk->whoTo->ref_count, 1);
9032 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9033 chk->asoc->ctrl_queue_cnt++;
9037 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9039 /* formulate and queue a cookie-ack back to sender */
9040 struct mbuf *cookie_ack;
9041 struct sctp_chunkhdr *hdr;
9042 struct sctp_tmit_chunk *chk;
9045 SCTP_TCB_LOCK_ASSERT(stcb);
9047 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
9048 if (cookie_ack == NULL) {
9052 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9053 sctp_alloc_a_chunk(stcb, chk);
9056 sctp_m_freem(cookie_ack);
9059 chk->copy_by_ref = 0;
9060 chk->send_size = sizeof(struct sctp_chunkhdr);
9061 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9062 chk->rec.chunk_id.can_take_data = 1;
9063 chk->sent = SCTP_DATAGRAM_UNSENT;
9066 chk->asoc = &stcb->asoc;
9067 chk->data = cookie_ack;
9068 if (chk->asoc->last_control_chunk_from != NULL) {
9069 chk->whoTo = chk->asoc->last_control_chunk_from;
9070 atomic_add_int(&chk->whoTo->ref_count, 1);
9074 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9075 hdr->chunk_type = SCTP_COOKIE_ACK;
9076 hdr->chunk_flags = 0;
9077 hdr->chunk_length = htons(chk->send_size);
9078 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9079 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9080 chk->asoc->ctrl_queue_cnt++;
9086 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9088 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9089 struct mbuf *m_shutdown_ack;
9090 struct sctp_shutdown_ack_chunk *ack_cp;
9091 struct sctp_tmit_chunk *chk;
9093 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9094 if (m_shutdown_ack == NULL) {
9098 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9099 sctp_alloc_a_chunk(stcb, chk);
9102 sctp_m_freem(m_shutdown_ack);
9105 chk->copy_by_ref = 0;
9106 chk->send_size = sizeof(struct sctp_chunkhdr);
9107 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9108 chk->rec.chunk_id.can_take_data = 1;
9109 chk->sent = SCTP_DATAGRAM_UNSENT;
9112 chk->asoc = &stcb->asoc;
9113 chk->data = m_shutdown_ack;
9116 atomic_add_int(&chk->whoTo->ref_count, 1);
9118 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9119 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9120 ack_cp->ch.chunk_flags = 0;
9121 ack_cp->ch.chunk_length = htons(chk->send_size);
9122 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9123 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9124 chk->asoc->ctrl_queue_cnt++;
9129 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9131 /* formulate and queue a SHUTDOWN to the sender */
9132 struct mbuf *m_shutdown;
9133 struct sctp_shutdown_chunk *shutdown_cp;
9134 struct sctp_tmit_chunk *chk;
9136 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
9137 if (m_shutdown == NULL) {
9141 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9142 sctp_alloc_a_chunk(stcb, chk);
9145 sctp_m_freem(m_shutdown);
9148 chk->copy_by_ref = 0;
9149 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9150 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9151 chk->rec.chunk_id.can_take_data = 1;
9152 chk->sent = SCTP_DATAGRAM_UNSENT;
9155 chk->asoc = &stcb->asoc;
9156 chk->data = m_shutdown;
9159 atomic_add_int(&chk->whoTo->ref_count, 1);
9161 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9162 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9163 shutdown_cp->ch.chunk_flags = 0;
9164 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9165 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9166 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9167 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9168 chk->asoc->ctrl_queue_cnt++;
9173 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9176 * formulate and queue an ASCONF to the peer. ASCONF parameters
9177 * should be queued on the assoc queue.
9179 struct sctp_tmit_chunk *chk;
9180 struct mbuf *m_asconf;
9183 SCTP_TCB_LOCK_ASSERT(stcb);
9185 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9186 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9187 /* can't send a new one if there is one in flight already */
9190 /* compose an ASCONF chunk, maximum length is PMTU */
9191 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9192 if (m_asconf == NULL) {
9195 sctp_alloc_a_chunk(stcb, chk);
9198 sctp_m_freem(m_asconf);
9201 chk->copy_by_ref = 0;
9202 chk->data = m_asconf;
9203 chk->send_size = len;
9204 chk->rec.chunk_id.id = SCTP_ASCONF;
9205 chk->rec.chunk_id.can_take_data = 0;
9206 chk->sent = SCTP_DATAGRAM_UNSENT;
9208 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9209 chk->asoc = &stcb->asoc;
9212 atomic_add_int(&chk->whoTo->ref_count, 1);
9214 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9215 chk->asoc->ctrl_queue_cnt++;
9220 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9223 * formulate and queue a asconf-ack back to sender. the asconf-ack
9224 * must be stored in the tcb.
9226 struct sctp_tmit_chunk *chk;
9227 struct sctp_asconf_ack *ack, *latest_ack;
9229 struct sctp_nets *net = NULL;
9231 SCTP_TCB_LOCK_ASSERT(stcb);
9232 /* Get the latest ASCONF-ACK */
9233 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9234 if (latest_ack == NULL) {
9237 if (latest_ack->last_sent_to != NULL &&
9238 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9239 /* we're doing a retransmission */
9240 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9243 if (stcb->asoc.last_control_chunk_from == NULL) {
9244 if (stcb->asoc.alternate) {
9245 net = stcb->asoc.alternate;
9247 net = stcb->asoc.primary_destination;
9250 net = stcb->asoc.last_control_chunk_from;
9255 if (stcb->asoc.last_control_chunk_from == NULL) {
9256 if (stcb->asoc.alternate) {
9257 net = stcb->asoc.alternate;
9259 net = stcb->asoc.primary_destination;
9262 net = stcb->asoc.last_control_chunk_from;
9265 latest_ack->last_sent_to = net;
9267 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9268 if (ack->data == NULL) {
9271 /* copy the asconf_ack */
9272 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
9273 if (m_ack == NULL) {
9274 /* couldn't copy it */
9277 #ifdef SCTP_MBUF_LOGGING
9278 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9283 if (SCTP_BUF_IS_EXTENDED(mat)) {
9284 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
9286 mat = SCTP_BUF_NEXT(mat);
9291 sctp_alloc_a_chunk(stcb, chk);
9295 sctp_m_freem(m_ack);
9298 chk->copy_by_ref = 0;
9302 atomic_add_int(&chk->whoTo->ref_count, 1);
9307 chk->send_size = ack->len;
9308 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9309 chk->rec.chunk_id.can_take_data = 1;
9310 chk->sent = SCTP_DATAGRAM_UNSENT;
9312 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
9313 chk->asoc = &stcb->asoc;
9315 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9316 chk->asoc->ctrl_queue_cnt++;
9323 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9324 struct sctp_tcb *stcb,
9325 struct sctp_association *asoc,
9326 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9327 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9333 * send out one MTU of retransmission. If fast_retransmit is
9334 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9335 * rwnd. For a Cookie or Asconf in the control chunk queue we
9336 * retransmit them by themselves.
9338 * For data chunks we will pick out the lowest TSN's in the sent_queue
9339 * marked for resend and bundle them all together (up to a MTU of
9340 * destination). The address to send to should have been
9341 * selected/changed where the retransmission was marked (i.e. in FR
9342 * or t3-timeout routines).
9344 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9345 struct sctp_tmit_chunk *chk, *fwd;
9346 struct mbuf *m, *endofchain;
9347 struct sctp_nets *net = NULL;
9348 uint32_t tsns_sent = 0;
9349 int no_fragmentflg, bundle_at, cnt_thru;
9351 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9352 struct sctp_auth_chunk *auth = NULL;
9353 uint32_t auth_offset = 0;
9354 uint16_t auth_keyid;
9355 int override_ok = 1;
9356 int data_auth_reqd = 0;
9359 SCTP_TCB_LOCK_ASSERT(stcb);
9360 tmr_started = ctl_cnt = bundle_at = error = 0;
9365 endofchain = m = NULL;
9366 auth_keyid = stcb->asoc.authinfo.active_keyid;
9367 #ifdef SCTP_AUDITING_ENABLED
9368 sctp_audit_log(0xC3, 1);
9370 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9371 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9372 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9373 asoc->sent_queue_retran_cnt);
9374 asoc->sent_queue_cnt = 0;
9375 asoc->sent_queue_cnt_removeable = 0;
9376 /* send back 0/0 so we enter normal transmission */
9380 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9381 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9382 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9383 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9384 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9387 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9388 if (chk != asoc->str_reset) {
9390 * not eligible for retran if its
9397 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9401 * Add an AUTH chunk, if chunk requires it save the
9402 * offset into the chain for AUTH
9404 if ((auth == NULL) &&
9405 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9406 stcb->asoc.peer_auth_chunks))) {
9407 m = sctp_add_auth_chunk(m, &endofchain,
9408 &auth, &auth_offset,
9410 chk->rec.chunk_id.id);
9411 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9413 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9419 /* do we have control chunks to retransmit? */
9421 /* Start a timer no matter if we suceed or fail */
9422 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9423 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9424 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9425 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9426 chk->snd_count++; /* update our count */
9427 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9428 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9429 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9430 no_fragmentflg, 0, 0,
9431 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9432 chk->whoTo->port, so_locked, NULL, NULL))) {
9433 SCTP_STAT_INCR(sctps_lowlevelerr);
9440 * We don't want to mark the net->sent time here since this
9441 * we use this for HB and retrans cannot measure RTT
9443 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9445 chk->sent = SCTP_DATAGRAM_SENT;
9446 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9450 /* Clean up the fwd-tsn list */
9451 sctp_clean_up_ctl(stcb, asoc, so_locked);
9456 * Ok, it is just data retransmission we need to do or that and a
9457 * fwd-tsn with it all.
9459 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9460 return (SCTP_RETRAN_DONE);
9462 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9463 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9464 /* not yet open, resend the cookie and that is it */
9467 #ifdef SCTP_AUDITING_ENABLED
9468 sctp_auditing(20, inp, stcb, NULL);
9470 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9471 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9472 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9473 /* No, not sent to this net or not ready for rtx */
9476 if (chk->data == NULL) {
9477 printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9478 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9481 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9482 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9483 /* Gak, we have exceeded max unlucky retran, abort! */
9484 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9486 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9487 atomic_add_int(&stcb->asoc.refcnt, 1);
9488 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
9489 SCTP_TCB_LOCK(stcb);
9490 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9491 return (SCTP_RETRAN_EXIT);
9493 /* pick up the net */
9495 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9496 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
9498 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9501 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9502 /* No room in peers rwnd */
9505 tsn = asoc->last_acked_seq + 1;
9506 if (tsn == chk->rec.data.TSN_seq) {
9508 * we make a special exception for this
9509 * case. The peer has no rwnd but is missing
9510 * the lowest chunk.. which is probably what
9511 * is holding up the rwnd.
9513 goto one_chunk_around;
9518 if (asoc->peers_rwnd < mtu) {
9520 if ((asoc->peers_rwnd == 0) &&
9521 (asoc->total_flight == 0)) {
9522 chk->window_probe = 1;
9523 chk->whoTo->window_probe = 1;
9526 #ifdef SCTP_AUDITING_ENABLED
9527 sctp_audit_log(0xC3, 2);
9531 net->fast_retran_ip = 0;
9532 if (chk->rec.data.doing_fast_retransmit == 0) {
9534 * if no FR in progress skip destination that have
9535 * flight_size > cwnd.
9537 if (net->flight_size >= net->cwnd) {
9542 * Mark the destination net to have FR recovery
9546 net->fast_retran_ip = 1;
9550 * if no AUTH is yet included and this chunk requires it,
9551 * make sure to account for it. We don't apply the size
9552 * until the AUTH chunk is actually added below in case
9553 * there is no room for this chunk.
9555 if (data_auth_reqd && (auth == NULL)) {
9556 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9560 if ((chk->send_size <= (mtu - dmtu)) ||
9561 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9562 /* ok we will add this one */
9563 if (data_auth_reqd) {
9565 m = sctp_add_auth_chunk(m,
9571 auth_keyid = chk->auth_keyid;
9573 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9574 } else if (override_ok) {
9575 auth_keyid = chk->auth_keyid;
9577 } else if (chk->auth_keyid != auth_keyid) {
9578 /* different keyid, so done bundling */
9582 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9584 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9587 /* Do clear IP_DF ? */
9588 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9591 /* upate our MTU size */
9592 if (mtu > (chk->send_size + dmtu))
9593 mtu -= (chk->send_size + dmtu);
9596 data_list[bundle_at++] = chk;
9597 if (one_chunk && (asoc->total_flight <= 0)) {
9598 SCTP_STAT_INCR(sctps_windowprobed);
9601 if (one_chunk == 0) {
9603 * now are there anymore forward from chk to pick
9606 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9607 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9608 /* Nope, not for retran */
9611 if (fwd->whoTo != net) {
9612 /* Nope, not the net in question */
9615 if (data_auth_reqd && (auth == NULL)) {
9616 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9619 if (fwd->send_size <= (mtu - dmtu)) {
9620 if (data_auth_reqd) {
9622 m = sctp_add_auth_chunk(m,
9628 auth_keyid = fwd->auth_keyid;
9630 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9631 } else if (override_ok) {
9632 auth_keyid = fwd->auth_keyid;
9634 } else if (fwd->auth_keyid != auth_keyid) {
9642 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9644 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9647 /* Do clear IP_DF ? */
9648 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9651 /* upate our MTU size */
9652 if (mtu > (fwd->send_size + dmtu))
9653 mtu -= (fwd->send_size + dmtu);
9656 data_list[bundle_at++] = fwd;
9657 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9661 /* can't fit so we are done */
9666 /* Is there something to send for this destination? */
9669 * No matter if we fail/or suceed we should start a
9670 * timer. A failure is like a lost IP packet :-)
9672 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9674 * no timer running on this destination
9677 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9680 /* Now lets send it, if there is anything to send :> */
9681 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9682 (struct sockaddr *)&net->ro._l_addr, m,
9683 auth_offset, auth, auth_keyid,
9684 no_fragmentflg, 0, 0,
9685 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9686 net->port, so_locked, NULL, NULL))) {
9687 /* error, we could not output */
9688 SCTP_STAT_INCR(sctps_lowlevelerr);
9696 * We don't want to mark the net->sent time here
9697 * since this we use this for HB and retrans cannot
9700 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9702 /* For auto-close */
9704 if (*now_filled == 0) {
9705 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9706 *now = asoc->time_last_sent;
9709 asoc->time_last_sent = *now;
9711 *cnt_out += bundle_at;
9712 #ifdef SCTP_AUDITING_ENABLED
9713 sctp_audit_log(0xC4, bundle_at);
9716 tsns_sent = data_list[0]->rec.data.TSN_seq;
9718 for (i = 0; i < bundle_at; i++) {
9719 SCTP_STAT_INCR(sctps_sendretransdata);
9720 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9722 * When we have a revoked data, and we
9723 * retransmit it, then we clear the revoked
9724 * flag since this flag dictates if we
9725 * subtracted from the fs
9727 if (data_list[i]->rec.data.chunk_was_revoked) {
9728 /* Deflate the cwnd */
9729 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9730 data_list[i]->rec.data.chunk_was_revoked = 0;
9732 data_list[i]->snd_count++;
9733 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9734 /* record the time */
9735 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9736 if (data_list[i]->book_size_scale) {
9738 * need to double the book size on
9741 data_list[i]->book_size_scale = 0;
9743 * Since we double the booksize, we
9744 * must also double the output queue
9745 * size, since this get shrunk when
9746 * we free by this amount.
9748 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9749 data_list[i]->book_size *= 2;
9753 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9754 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9755 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9757 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9758 (uint32_t) (data_list[i]->send_size +
9759 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9762 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9763 data_list[i]->whoTo->flight_size,
9764 data_list[i]->book_size,
9765 (uintptr_t) data_list[i]->whoTo,
9766 data_list[i]->rec.data.TSN_seq);
9768 sctp_flight_size_increase(data_list[i]);
9769 sctp_total_flight_increase(stcb, data_list[i]);
9770 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9771 /* SWS sender side engages */
9772 asoc->peers_rwnd = 0;
9775 (data_list[i]->rec.data.doing_fast_retransmit)) {
9776 SCTP_STAT_INCR(sctps_sendfastretrans);
9777 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9778 (tmr_started == 0)) {
9780 * ok we just fast-retrans'd
9781 * the lowest TSN, i.e the
9782 * first on the list. In
9783 * this case we want to give
9784 * some more time to get a
9785 * SACK back without a
9788 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9789 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9790 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9795 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9797 #ifdef SCTP_AUDITING_ENABLED
9798 sctp_auditing(21, inp, stcb, NULL);
9804 if (asoc->sent_queue_retran_cnt <= 0) {
9805 /* all done we have no more to retran */
9806 asoc->sent_queue_retran_cnt = 0;
9810 /* No more room in rwnd */
9813 /* stop the for loop here. we sent out a packet */
9821 sctp_timer_validation(struct sctp_inpcb *inp,
9822 struct sctp_tcb *stcb,
9823 struct sctp_association *asoc,
9826 struct sctp_nets *net;
9828 /* Validate that a timer is running somewhere */
9829 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9830 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9831 /* Here is a timer */
9835 SCTP_TCB_LOCK_ASSERT(stcb);
9836 /* Gak, we did not have a timer somewhere */
9837 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9838 if (asoc->alternate) {
9839 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9841 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9847 sctp_chunk_output(struct sctp_inpcb *inp,
9848 struct sctp_tcb *stcb,
9851 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9857 * Ok this is the generic chunk service queue. we must do the
9859 * - See if there are retransmits pending, if so we must
9861 * - Service the stream queue that is next, moving any
9862 * message (note I must get a complete message i.e.
9863 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9865 * - Check to see if the cwnd/rwnd allows any output, if so we
9866 * go ahead and fomulate and send the low level chunks. Making sure
9867 * to combine any control in the control chunk queue also.
9869 struct sctp_association *asoc;
9870 struct sctp_nets *net;
9871 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0;
9872 unsigned int burst_cnt = 0;
9876 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9879 unsigned int tot_frs = 0;
9882 /* The Nagle algorithm is only applied when handling a send call. */
9883 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9884 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9892 SCTP_TCB_LOCK_ASSERT(stcb);
9894 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9896 if ((un_sent <= 0) &&
9897 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9898 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9899 (asoc->sent_queue_retran_cnt == 0)) {
9900 /* Nothing to do unless there is something to be sent left */
9904 * Do we have something to send, data or control AND a sack timer
9905 * running, if so piggy-back the sack.
9907 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9908 sctp_send_sack(stcb, so_locked);
9909 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9911 while (asoc->sent_queue_retran_cnt) {
9913 * Ok, it is retransmission time only, we send out only ONE
9914 * packet with a single call off to the retran code.
9916 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9918 * Special hook for handling cookiess discarded
9919 * by peer that carried data. Send cookie-ack only
9920 * and then the next call with get the retran's.
9922 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9924 &now, &now_filled, frag_point, so_locked);
9926 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9927 /* if its not from a HB then do it */
9929 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9935 * its from any other place, we don't allow retran
9936 * output (only control)
9941 /* Can't send anymore */
9943 * now lets push out control by calling med-level
9944 * output once. this assures that we WILL send HB's
9947 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9949 &now, &now_filled, frag_point, so_locked);
9950 #ifdef SCTP_AUDITING_ENABLED
9951 sctp_auditing(8, inp, stcb, NULL);
9953 (void)sctp_timer_validation(inp, stcb, asoc, ret);
9958 * The count was off.. retran is not happening so do
9959 * the normal retransmission.
9961 #ifdef SCTP_AUDITING_ENABLED
9962 sctp_auditing(9, inp, stcb, NULL);
9964 if (ret == SCTP_RETRAN_EXIT) {
9969 if (from_where == SCTP_OUTPUT_FROM_T3) {
9970 /* Only one transmission allowed out of a timeout */
9971 #ifdef SCTP_AUDITING_ENABLED
9972 sctp_auditing(10, inp, stcb, NULL);
9974 /* Push out any control */
9975 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9976 &now, &now_filled, frag_point, so_locked);
9979 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
9980 /* Hit FR burst limit */
9983 if ((num_out == 0) && (ret == 0)) {
9984 /* No more retrans to send */
9988 #ifdef SCTP_AUDITING_ENABLED
9989 sctp_auditing(12, inp, stcb, NULL);
9991 /* Check for bad destinations, if they exist move chunks around. */
9992 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9993 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
9995 * if possible move things off of this address we
9996 * still may send below due to the dormant state but
9997 * we try to find an alternate address to send to
9998 * and if we have one we move all queued data on the
9999 * out wheel to this alternate address.
10001 if (net->ref_count > 1)
10002 sctp_move_chunks_from_net(stcb, net);
10005 * if ((asoc->sat_network) || (net->addr_is_local))
10006 * { burst_limit = asoc->max_burst *
10007 * SCTP_SAT_NETWORK_BURST_INCR; }
10009 if (asoc->max_burst > 0) {
10010 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10011 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10013 * JRS - Use the congestion
10014 * control given in the
10015 * congestion control module
10017 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10019 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10021 SCTP_STAT_INCR(sctps_maxburstqueued);
10023 net->fast_retran_ip = 0;
10025 if (net->flight_size == 0) {
10027 * Should be decaying the
10039 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10040 &reason_code, 0, from_where,
10041 &now, &now_filled, frag_point, so_locked);
10043 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10045 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10048 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10049 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10053 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10055 tot_out += num_out;
10057 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10058 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10059 if (num_out == 0) {
10060 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10065 * When the Nagle algorithm is used, look at how
10066 * much is unsent, then if its smaller than an MTU
10067 * and we have data in flight we stop, except if we
10068 * are handling a fragmented user message.
10070 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10071 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
10072 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10073 (stcb->asoc.total_flight > 0) &&
10074 ((stcb->asoc.locked_on_sending == NULL) ||
10075 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
10079 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10080 TAILQ_EMPTY(&asoc->send_queue) &&
10081 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
10082 /* Nothing left to send */
10085 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10086 /* Nothing left to send */
10089 } while (num_out &&
10090 ((asoc->max_burst == 0) ||
10091 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10092 (burst_cnt < asoc->max_burst)));
10094 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10095 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10096 SCTP_STAT_INCR(sctps_maxburstqueued);
10097 asoc->burst_limit_applied = 1;
10098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10099 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10102 asoc->burst_limit_applied = 0;
10105 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10106 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10108 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10112 * Now we need to clean up the control chunk chain if a ECNE is on
10113 * it. It must be marked as UNSENT again so next call will continue
10114 * to send it until such time that we get a CWR, to remove it.
10116 if (stcb->asoc.ecn_echo_cnt_onq)
10117 sctp_fix_ecn_echo(asoc);
10124 struct sctp_inpcb *inp,
10126 struct sockaddr *addr,
10127 struct mbuf *control,
10132 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10135 if (inp->sctp_socket == NULL) {
10136 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10139 return (sctp_sosend(inp->sctp_socket,
10141 (struct uio *)NULL,
10149 send_forward_tsn(struct sctp_tcb *stcb,
10150 struct sctp_association *asoc)
10152 struct sctp_tmit_chunk *chk;
10153 struct sctp_forward_tsn_chunk *fwdtsn;
10154 uint32_t advance_peer_ack_point;
10156 SCTP_TCB_LOCK_ASSERT(stcb);
10157 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10158 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10159 /* mark it to unsent */
10160 chk->sent = SCTP_DATAGRAM_UNSENT;
10161 chk->snd_count = 0;
10162 /* Do we correct its output location? */
10164 sctp_free_remote_addr(chk->whoTo);
10167 goto sctp_fill_in_rest;
10170 /* Ok if we reach here we must build one */
10171 sctp_alloc_a_chunk(stcb, chk);
10175 asoc->fwd_tsn_cnt++;
10176 chk->copy_by_ref = 0;
10177 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10178 chk->rec.chunk_id.can_take_data = 0;
10181 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10182 if (chk->data == NULL) {
10183 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10186 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10187 chk->sent = SCTP_DATAGRAM_UNSENT;
10188 chk->snd_count = 0;
10189 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10190 asoc->ctrl_queue_cnt++;
10193 * Here we go through and fill out the part that deals with
10194 * stream/seq of the ones we skip.
10196 SCTP_BUF_LEN(chk->data) = 0;
10198 struct sctp_tmit_chunk *at, *tp1, *last;
10199 struct sctp_strseq *strseq;
10200 unsigned int cnt_of_space, i, ovh;
10201 unsigned int space_needed;
10202 unsigned int cnt_of_skipped = 0;
10204 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10205 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
10206 /* no more to look at */
10209 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10210 /* We don't report these */
10215 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10216 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10218 cnt_of_space = M_TRAILINGSPACE(chk->data);
10220 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10221 ovh = SCTP_MIN_OVERHEAD;
10223 ovh = SCTP_MIN_V4_OVERHEAD;
10225 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10226 /* trim to a mtu size */
10227 cnt_of_space = asoc->smallest_mtu - ovh;
10229 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10230 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10231 0xff, 0, cnt_of_skipped,
10232 asoc->advanced_peer_ack_point);
10235 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10236 if (cnt_of_space < space_needed) {
10238 * ok we must trim down the chunk by lowering the
10239 * advance peer ack point.
10241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10242 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10243 0xff, 0xff, cnt_of_space,
10246 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10247 cnt_of_skipped /= sizeof(struct sctp_strseq);
10249 * Go through and find the TSN that will be the one
10252 at = TAILQ_FIRST(&asoc->sent_queue);
10254 for (i = 0; i < cnt_of_skipped; i++) {
10255 tp1 = TAILQ_NEXT(at, sctp_next);
10262 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10263 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10264 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
10265 asoc->advanced_peer_ack_point);
10269 * last now points to last one I can report, update
10273 advance_peer_ack_point = last->rec.data.TSN_seq;
10274 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10275 cnt_of_skipped * sizeof(struct sctp_strseq);
10277 chk->send_size = space_needed;
10278 /* Setup the chunk */
10279 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10280 fwdtsn->ch.chunk_length = htons(chk->send_size);
10281 fwdtsn->ch.chunk_flags = 0;
10282 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10283 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10284 SCTP_BUF_LEN(chk->data) = chk->send_size;
10287 * Move pointer to after the fwdtsn and transfer to the
10290 strseq = (struct sctp_strseq *)fwdtsn;
10292 * Now populate the strseq list. This is done blindly
10293 * without pulling out duplicate stream info. This is
10294 * inefficent but won't harm the process since the peer will
10295 * look at these in sequence and will thus release anything.
10296 * It could mean we exceed the PMTU and chop off some that
10297 * we could have included.. but this is unlikely (aka 1432/4
10298 * would mean 300+ stream seq's would have to be reported in
10299 * one FWD-TSN. With a bit of work we can later FIX this to
10300 * optimize and pull out duplcates.. but it does add more
10301 * overhead. So for now... not!
10303 at = TAILQ_FIRST(&asoc->sent_queue);
10304 for (i = 0; i < cnt_of_skipped; i++) {
10305 tp1 = TAILQ_NEXT(at, sctp_next);
10308 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10309 /* We don't report these */
10314 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
10315 at->rec.data.fwd_tsn_cnt = 0;
10317 strseq->stream = ntohs(at->rec.data.stream_number);
10318 strseq->sequence = ntohs(at->rec.data.stream_seq);
10327 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10328 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10334 * Queue up a SACK or NR-SACK in the control queue.
10335 * We must first check to see if a SACK or NR-SACK is
10336 * somehow on the control queue.
10337 * If so, we will take and and remove the old one.
10339 struct sctp_association *asoc;
10340 struct sctp_tmit_chunk *chk, *a_chk;
10341 struct sctp_sack_chunk *sack;
10342 struct sctp_nr_sack_chunk *nr_sack;
10343 struct sctp_gap_ack_block *gap_descriptor;
10344 struct sack_track *selector;
10349 int limit_reached = 0;
10350 unsigned int i, siz, j;
10351 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10354 uint32_t highest_tsn;
10359 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
10360 (stcb->asoc.peer_supports_nr_sack == 1)) {
10361 type = SCTP_NR_SELECTIVE_ACK;
10363 type = SCTP_SELECTIVE_ACK;
10366 asoc = &stcb->asoc;
10367 SCTP_TCB_LOCK_ASSERT(stcb);
10368 if (asoc->last_data_chunk_from == NULL) {
10369 /* Hmm we never received anything */
10372 sctp_slide_mapping_arrays(stcb);
10373 sctp_set_rwnd(stcb, asoc);
10374 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10375 if (chk->rec.chunk_id.id == type) {
10376 /* Hmm, found a sack already on queue, remove it */
10377 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10378 asoc->ctrl_queue_cnt--;
10381 sctp_m_freem(a_chk->data);
10382 a_chk->data = NULL;
10384 if (a_chk->whoTo) {
10385 sctp_free_remote_addr(a_chk->whoTo);
10386 a_chk->whoTo = NULL;
10391 if (a_chk == NULL) {
10392 sctp_alloc_a_chunk(stcb, a_chk);
10393 if (a_chk == NULL) {
10394 /* No memory so we drop the idea, and set a timer */
10395 if (stcb->asoc.delayed_ack) {
10396 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10397 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10398 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10399 stcb->sctp_ep, stcb, NULL);
10401 stcb->asoc.send_sack = 1;
10405 a_chk->copy_by_ref = 0;
10406 a_chk->rec.chunk_id.id = type;
10407 a_chk->rec.chunk_id.can_take_data = 1;
10409 /* Clear our pkt counts */
10410 asoc->data_pkts_seen = 0;
10412 a_chk->asoc = asoc;
10413 a_chk->snd_count = 0;
10414 a_chk->send_size = 0; /* fill in later */
10415 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10416 a_chk->whoTo = NULL;
10418 if ((asoc->numduptsns) ||
10419 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) {
10421 * Ok, we have some duplicates or the destination for the
10422 * sack is unreachable, lets see if we can select an
10423 * alternate than asoc->last_data_chunk_from
10425 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) &&
10426 (asoc->used_alt_onsack > asoc->numnets)) {
10427 /* We used an alt last time, don't this time */
10428 a_chk->whoTo = NULL;
10430 asoc->used_alt_onsack++;
10431 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10433 if (a_chk->whoTo == NULL) {
10434 /* Nope, no alternate */
10435 a_chk->whoTo = asoc->last_data_chunk_from;
10436 asoc->used_alt_onsack = 0;
10440 * No duplicates so we use the last place we received data
10443 asoc->used_alt_onsack = 0;
10444 a_chk->whoTo = asoc->last_data_chunk_from;
10446 if (a_chk->whoTo) {
10447 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10449 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10450 highest_tsn = asoc->highest_tsn_inside_map;
10452 highest_tsn = asoc->highest_tsn_inside_nr_map;
10454 if (highest_tsn == asoc->cumulative_tsn) {
10456 if (type == SCTP_SELECTIVE_ACK) {
10457 space_req = sizeof(struct sctp_sack_chunk);
10459 space_req = sizeof(struct sctp_nr_sack_chunk);
10462 /* gaps get a cluster */
10463 space_req = MCLBYTES;
10465 /* Ok now lets formulate a MBUF with our sack */
10466 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10467 if ((a_chk->data == NULL) ||
10468 (a_chk->whoTo == NULL)) {
10469 /* rats, no mbuf memory */
10471 /* was a problem with the destination */
10472 sctp_m_freem(a_chk->data);
10473 a_chk->data = NULL;
10475 sctp_free_a_chunk(stcb, a_chk, so_locked);
10476 /* sa_ignore NO_NULL_CHK */
10477 if (stcb->asoc.delayed_ack) {
10478 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10479 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10480 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10481 stcb->sctp_ep, stcb, NULL);
10483 stcb->asoc.send_sack = 1;
10487 /* ok, lets go through and fill it in */
10488 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10489 space = M_TRAILINGSPACE(a_chk->data);
10490 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10491 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10493 limit = mtod(a_chk->data, caddr_t);
10498 if ((asoc->sctp_cmt_on_off > 0) &&
10499 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10501 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10502 * received, then set high bit to 1, else 0. Reset
10505 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10506 asoc->cmt_dac_pkts_rcvd = 0;
10508 #ifdef SCTP_ASOCLOG_OF_TSNS
10509 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10510 stcb->asoc.cumack_log_atsnt++;
10511 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10512 stcb->asoc.cumack_log_atsnt = 0;
10515 /* reset the readers interpretation */
10516 stcb->freed_by_sorcv_sincelast = 0;
10518 if (type == SCTP_SELECTIVE_ACK) {
10519 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10521 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10522 if (highest_tsn > asoc->mapping_array_base_tsn) {
10523 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10525 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10529 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10530 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10531 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10532 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10534 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10538 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10541 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10543 if (((type == SCTP_SELECTIVE_ACK) &&
10544 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10545 ((type == SCTP_NR_SELECTIVE_ACK) &&
10546 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10547 /* we have a gap .. maybe */
10548 for (i = 0; i < siz; i++) {
10549 tsn_map = asoc->mapping_array[i];
10550 if (type == SCTP_SELECTIVE_ACK) {
10551 tsn_map |= asoc->nr_mapping_array[i];
10555 * Clear all bits corresponding to TSNs
10556 * smaller or equal to the cumulative TSN.
10558 tsn_map &= (~0 << (1 - offset));
10560 selector = &sack_array[tsn_map];
10561 if (mergeable && selector->right_edge) {
10563 * Backup, left and right edges were ok to
10569 if (selector->num_entries == 0)
10572 for (j = 0; j < selector->num_entries; j++) {
10573 if (mergeable && selector->right_edge) {
10575 * do a merge by NOT setting
10581 * no merge, set the left
10585 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10587 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10590 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10596 if (selector->left_edge) {
10600 if (limit_reached) {
10601 /* Reached the limit stop */
10607 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10608 (limit_reached == 0)) {
10612 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10613 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10615 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10618 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10621 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10623 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10624 /* we have a gap .. maybe */
10625 for (i = 0; i < siz; i++) {
10626 tsn_map = asoc->nr_mapping_array[i];
10629 * Clear all bits corresponding to
10630 * TSNs smaller or equal to the
10633 tsn_map &= (~0 << (1 - offset));
10635 selector = &sack_array[tsn_map];
10636 if (mergeable && selector->right_edge) {
10638 * Backup, left and right edges were
10641 num_nr_gap_blocks--;
10644 if (selector->num_entries == 0)
10647 for (j = 0; j < selector->num_entries; j++) {
10648 if (mergeable && selector->right_edge) {
10650 * do a merge by NOT
10657 * no merge, set the
10661 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10663 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10664 num_nr_gap_blocks++;
10666 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10672 if (selector->left_edge) {
10676 if (limit_reached) {
10677 /* Reached the limit stop */
10684 /* now we must add any dups we are going to report. */
10685 if ((limit_reached == 0) && (asoc->numduptsns)) {
10686 dup = (uint32_t *) gap_descriptor;
10687 for (i = 0; i < asoc->numduptsns; i++) {
10688 *dup = htonl(asoc->dup_tsns[i]);
10691 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10696 asoc->numduptsns = 0;
10699 * now that the chunk is prepared queue it to the control chunk
10702 if (type == SCTP_SELECTIVE_ACK) {
10703 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10704 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10705 num_dups * sizeof(int32_t);
10706 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10707 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10708 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10709 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10710 sack->sack.num_dup_tsns = htons(num_dups);
10711 sack->ch.chunk_type = type;
10712 sack->ch.chunk_flags = flags;
10713 sack->ch.chunk_length = htons(a_chk->send_size);
10715 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10716 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10717 num_dups * sizeof(int32_t);
10718 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10719 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10720 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10721 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10722 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10723 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10724 nr_sack->nr_sack.reserved = 0;
10725 nr_sack->ch.chunk_type = type;
10726 nr_sack->ch.chunk_flags = flags;
10727 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10729 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10730 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10731 asoc->ctrl_queue_cnt++;
10732 asoc->send_sack = 0;
10733 SCTP_STAT_INCR(sctps_sendsacks);
10738 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10739 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10744 struct mbuf *m_abort;
10745 struct mbuf *m_out = NULL, *m_end = NULL;
10746 struct sctp_abort_chunk *abort = NULL;
10748 uint32_t auth_offset = 0;
10749 struct sctp_auth_chunk *auth = NULL;
10750 struct sctp_nets *net;
10753 * Add an AUTH chunk, if chunk requires it and save the offset into
10754 * the chain for AUTH
10756 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10757 stcb->asoc.peer_auth_chunks)) {
10758 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10759 stcb, SCTP_ABORT_ASSOCIATION);
10760 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10762 SCTP_TCB_LOCK_ASSERT(stcb);
10763 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10764 if (m_abort == NULL) {
10767 sctp_m_freem(m_out);
10770 /* link in any error */
10771 SCTP_BUF_NEXT(m_abort) = operr;
10778 sz += SCTP_BUF_LEN(n);
10779 n = SCTP_BUF_NEXT(n);
10782 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10783 if (m_out == NULL) {
10784 /* NO Auth chunk prepended, so reserve space in front */
10785 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10788 /* Put AUTH chunk at the front of the chain */
10789 SCTP_BUF_NEXT(m_end) = m_abort;
10791 if (stcb->asoc.alternate) {
10792 net = stcb->asoc.alternate;
10794 net = stcb->asoc.primary_destination;
10796 /* fill in the ABORT chunk */
10797 abort = mtod(m_abort, struct sctp_abort_chunk *);
10798 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10799 abort->ch.chunk_flags = 0;
10800 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10802 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10803 (struct sockaddr *)&net->ro._l_addr,
10804 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10805 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10806 stcb->asoc.primary_destination->port, so_locked, NULL, NULL);
10807 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10811 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10812 struct sctp_nets *net,
10815 /* formulate and SEND a SHUTDOWN-COMPLETE */
10816 struct mbuf *m_shutdown_comp;
10817 struct sctp_shutdown_complete_chunk *shutdown_complete;
10821 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10822 if (m_shutdown_comp == NULL) {
10826 if (reflect_vtag) {
10827 flags = SCTP_HAD_NO_TCB;
10828 vtag = stcb->asoc.my_vtag;
10831 vtag = stcb->asoc.peer_vtag;
10833 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10834 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10835 shutdown_complete->ch.chunk_flags = flags;
10836 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10837 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10838 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10839 (struct sockaddr *)&net->ro._l_addr,
10840 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
10841 stcb->sctp_ep->sctp_lport, stcb->rport,
10843 net->port, SCTP_SO_NOT_LOCKED, NULL, NULL);
10844 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10849 sctp_send_shutdown_complete2(struct mbuf *m, struct sctphdr *sh,
10850 uint32_t vrf_id, uint16_t port)
10852 /* formulate and SEND a SHUTDOWN-COMPLETE */
10853 struct mbuf *o_pak;
10856 struct udphdr *udp = NULL;
10857 int offset_out, len, mlen;
10858 struct sctp_shutdown_complete_msg *comp_cp;
10861 struct ip *iph_out;
10865 struct ip6_hdr *ip6, *ip6_out;
10869 iph = mtod(m, struct ip *);
10870 switch (iph->ip_v) {
10873 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10877 case IPV6_VERSION >> 4:
10878 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10885 len += sizeof(struct udphdr);
10887 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10888 if (mout == NULL) {
10891 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10892 SCTP_BUF_LEN(mout) = len;
10893 SCTP_BUF_NEXT(mout) = NULL;
10894 if (m->m_flags & M_FLOWID) {
10895 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
10896 mout->m_flags |= M_FLOWID;
10906 switch (iph->ip_v) {
10909 iph_out = mtod(mout, struct ip *);
10911 /* Fill in the IP header for the ABORT */
10912 iph_out->ip_v = IPVERSION;
10913 iph_out->ip_hl = (sizeof(struct ip) / 4);
10914 iph_out->ip_tos = (u_char)0;
10915 iph_out->ip_id = 0;
10916 iph_out->ip_off = 0;
10917 iph_out->ip_ttl = MAXTTL;
10919 iph_out->ip_p = IPPROTO_UDP;
10921 iph_out->ip_p = IPPROTO_SCTP;
10923 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10924 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10926 /* let IP layer calculate this */
10927 iph_out->ip_sum = 0;
10928 offset_out += sizeof(*iph_out);
10929 comp_cp = (struct sctp_shutdown_complete_msg *)(
10930 (caddr_t)iph_out + offset_out);
10934 case IPV6_VERSION >> 4:
10935 ip6 = (struct ip6_hdr *)iph;
10936 ip6_out = mtod(mout, struct ip6_hdr *);
10938 /* Fill in the IPv6 header for the ABORT */
10939 ip6_out->ip6_flow = ip6->ip6_flow;
10940 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10942 ip6_out->ip6_nxt = IPPROTO_UDP;
10944 ip6_out->ip6_nxt = IPPROTO_SCTP;
10946 ip6_out->ip6_src = ip6->ip6_dst;
10947 ip6_out->ip6_dst = ip6->ip6_src;
10949 * ?? The old code had both the iph len + payload, I think
10950 * this is wrong and would never have worked
10952 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10953 offset_out += sizeof(*ip6_out);
10954 comp_cp = (struct sctp_shutdown_complete_msg *)(
10955 (caddr_t)ip6_out + offset_out);
10959 /* Currently not supported. */
10960 sctp_m_freem(mout);
10964 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
10965 sctp_m_freem(mout);
10968 udp = (struct udphdr *)comp_cp;
10969 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10970 udp->uh_dport = port;
10971 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
10974 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10976 offset_out += sizeof(struct udphdr);
10977 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
10979 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10981 sctp_m_freem(mout);
10984 /* Now copy in and fill in the ABORT tags etc. */
10985 comp_cp->sh.src_port = sh->dest_port;
10986 comp_cp->sh.dest_port = sh->src_port;
10987 comp_cp->sh.checksum = 0;
10988 comp_cp->sh.v_tag = sh->v_tag;
10989 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
10990 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10991 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10994 if (iph_out != NULL) {
10998 mlen = SCTP_BUF_LEN(mout);
10999 bzero(&ro, sizeof ro);
11000 /* set IPv4 length */
11001 iph_out->ip_len = mlen;
11002 #ifdef SCTP_PACKET_LOGGING
11003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11004 sctp_packet_log(mout, mlen);
11007 #if defined(SCTP_WITH_NO_CSUM)
11008 SCTP_STAT_INCR(sctps_sendnocrc);
11010 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
11011 SCTP_STAT_INCR(sctps_sendswcrc);
11013 SCTP_ENABLE_UDP_CSUM(mout);
11015 #if defined(SCTP_WITH_NO_CSUM)
11016 SCTP_STAT_INCR(sctps_sendnocrc);
11018 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11019 mout->m_pkthdr.csum_data = 0;
11020 SCTP_STAT_INCR(sctps_sendhwcrc);
11023 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
11025 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
11027 /* Free the route if we got one back */
11033 if (ip6_out != NULL) {
11034 struct route_in6 ro;
11036 struct ifnet *ifp = NULL;
11038 bzero(&ro, sizeof(ro));
11039 mlen = SCTP_BUF_LEN(mout);
11040 #ifdef SCTP_PACKET_LOGGING
11041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11042 sctp_packet_log(mout, mlen);
11044 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
11046 #if defined(SCTP_WITH_NO_CSUM)
11047 SCTP_STAT_INCR(sctps_sendnocrc);
11049 comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11050 SCTP_STAT_INCR(sctps_sendswcrc);
11052 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
11053 udp->uh_sum = 0xffff;
11056 #if defined(SCTP_WITH_NO_CSUM)
11057 SCTP_STAT_INCR(sctps_sendnocrc);
11059 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11060 mout->m_pkthdr.csum_data = 0;
11061 SCTP_STAT_INCR(sctps_sendhwcrc);
11064 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
11066 /* Free the route if we got one back */
11071 SCTP_STAT_INCR(sctps_sendpackets);
11072 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11073 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11079 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11080 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11085 struct sctp_tmit_chunk *chk;
11086 struct sctp_heartbeat_chunk *hb;
11087 struct timeval now;
11089 SCTP_TCB_LOCK_ASSERT(stcb);
11093 (void)SCTP_GETTIME_TIMEVAL(&now);
11094 switch (net->ro._l_addr.sa.sa_family) {
11106 sctp_alloc_a_chunk(stcb, chk);
11108 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11111 chk->copy_by_ref = 0;
11112 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11113 chk->rec.chunk_id.can_take_data = 1;
11114 chk->asoc = &stcb->asoc;
11115 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11117 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11118 if (chk->data == NULL) {
11119 sctp_free_a_chunk(stcb, chk, so_locked);
11122 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11123 SCTP_BUF_LEN(chk->data) = chk->send_size;
11124 chk->sent = SCTP_DATAGRAM_UNSENT;
11125 chk->snd_count = 0;
11127 atomic_add_int(&chk->whoTo->ref_count, 1);
11128 /* Now we have a mbuf that we can fill in with the details */
11129 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11130 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11131 /* fill out chunk header */
11132 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11133 hb->ch.chunk_flags = 0;
11134 hb->ch.chunk_length = htons(chk->send_size);
11135 /* Fill out hb parameter */
11136 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11137 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11138 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11139 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11140 /* Did our user request this one, put it in */
11141 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family;
11142 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11143 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11145 * we only take from the entropy pool if the address is not
11148 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11149 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11151 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11152 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11154 switch (net->ro._l_addr.sa.sa_family) {
11157 memcpy(hb->heartbeat.hb_info.address,
11158 &net->ro._l_addr.sin.sin_addr,
11159 sizeof(net->ro._l_addr.sin.sin_addr));
11164 memcpy(hb->heartbeat.hb_info.address,
11165 &net->ro._l_addr.sin6.sin6_addr,
11166 sizeof(net->ro._l_addr.sin6.sin6_addr));
11173 net->hb_responded = 0;
11174 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11175 stcb->asoc.ctrl_queue_cnt++;
11176 SCTP_STAT_INCR(sctps_sendheartbeat);
11181 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11184 struct sctp_association *asoc;
11185 struct sctp_ecne_chunk *ecne;
11186 struct sctp_tmit_chunk *chk;
11191 asoc = &stcb->asoc;
11192 SCTP_TCB_LOCK_ASSERT(stcb);
11193 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11194 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11195 /* found a previous ECN_ECHO update it if needed */
11196 uint32_t cnt, ctsn;
11198 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11199 ctsn = ntohl(ecne->tsn);
11200 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11201 ecne->tsn = htonl(high_tsn);
11202 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11204 cnt = ntohl(ecne->num_pkts_since_cwr);
11206 ecne->num_pkts_since_cwr = htonl(cnt);
11210 /* nope could not find one to update so we must build one */
11211 sctp_alloc_a_chunk(stcb, chk);
11215 chk->copy_by_ref = 0;
11216 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11217 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11218 chk->rec.chunk_id.can_take_data = 0;
11219 chk->asoc = &stcb->asoc;
11220 chk->send_size = sizeof(struct sctp_ecne_chunk);
11221 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11222 if (chk->data == NULL) {
11223 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11226 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11227 SCTP_BUF_LEN(chk->data) = chk->send_size;
11228 chk->sent = SCTP_DATAGRAM_UNSENT;
11229 chk->snd_count = 0;
11231 atomic_add_int(&chk->whoTo->ref_count, 1);
11233 stcb->asoc.ecn_echo_cnt_onq++;
11234 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11235 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11236 ecne->ch.chunk_flags = 0;
11237 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11238 ecne->tsn = htonl(high_tsn);
11239 ecne->num_pkts_since_cwr = htonl(1);
11240 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11241 asoc->ctrl_queue_cnt++;
11245 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11246 struct mbuf *m, int iphlen, int bad_crc)
11248 struct sctp_association *asoc;
11249 struct sctp_pktdrop_chunk *drp;
11250 struct sctp_tmit_chunk *chk;
11257 struct ip6_hdr *ip6h;
11260 int fullsz = 0, extra = 0;
11263 struct sctp_chunkhdr *ch, chunk_buf;
11264 unsigned int chk_length;
11269 asoc = &stcb->asoc;
11270 SCTP_TCB_LOCK_ASSERT(stcb);
11271 if (asoc->peer_supports_pktdrop == 0) {
11273 * peer must declare support before I send one.
11277 if (stcb->sctp_socket == NULL) {
11280 sctp_alloc_a_chunk(stcb, chk);
11284 chk->copy_by_ref = 0;
11285 iph = mtod(m, struct ip *);
11287 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11290 switch (iph->ip_v) {
11294 len = chk->send_size = iph->ip_len;
11298 case IPV6_VERSION >> 4:
11300 ip6h = mtod(m, struct ip6_hdr *);
11301 len = chk->send_size = htons(ip6h->ip6_plen);
11307 /* Validate that we do not have an ABORT in here. */
11308 offset = iphlen + sizeof(struct sctphdr);
11309 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11310 sizeof(*ch), (uint8_t *) & chunk_buf);
11311 while (ch != NULL) {
11312 chk_length = ntohs(ch->chunk_length);
11313 if (chk_length < sizeof(*ch)) {
11314 /* break to abort land */
11317 switch (ch->chunk_type) {
11318 case SCTP_PACKET_DROPPED:
11319 case SCTP_ABORT_ASSOCIATION:
11320 case SCTP_INITIATION_ACK:
11322 * We don't respond with an PKT-DROP to an ABORT
11323 * or PKT-DROP. We also do not respond to an
11324 * INIT-ACK, because we can't know if the initiation
11325 * tag is correct or not.
11327 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11332 offset += SCTP_SIZE32(chk_length);
11333 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11334 sizeof(*ch), (uint8_t *) & chunk_buf);
11337 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11338 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11340 * only send 1 mtu worth, trim off the excess on the end.
11342 fullsz = len - extra;
11343 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11346 chk->asoc = &stcb->asoc;
11347 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11348 if (chk->data == NULL) {
11350 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11353 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11354 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11356 sctp_m_freem(chk->data);
11360 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11361 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11362 chk->book_size_scale = 0;
11364 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11365 drp->trunc_len = htons(fullsz);
11367 * Len is already adjusted to size minus overhead above take
11368 * out the pkt_drop chunk itself from it.
11370 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11371 len = chk->send_size;
11373 /* no truncation needed */
11374 drp->ch.chunk_flags = 0;
11375 drp->trunc_len = htons(0);
11378 drp->ch.chunk_flags |= SCTP_BADCRC;
11380 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11381 SCTP_BUF_LEN(chk->data) = chk->send_size;
11382 chk->sent = SCTP_DATAGRAM_UNSENT;
11383 chk->snd_count = 0;
11385 /* we should hit here */
11387 atomic_add_int(&chk->whoTo->ref_count, 1);
11391 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11392 chk->rec.chunk_id.can_take_data = 1;
11393 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11394 drp->ch.chunk_length = htons(chk->send_size);
11395 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11399 drp->bottle_bw = htonl(spc);
11400 if (asoc->my_rwnd) {
11401 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11402 asoc->size_on_all_streams +
11403 asoc->my_rwnd_control_len +
11404 stcb->sctp_socket->so_rcv.sb_cc);
11407 * If my rwnd is 0, possibly from mbuf depletion as well as
11408 * space used, tell the peer there is NO space aka onq == bw
11410 drp->current_onq = htonl(spc);
11414 m_copydata(m, iphlen, len, (caddr_t)datap);
11415 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11416 asoc->ctrl_queue_cnt++;
11420 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11422 struct sctp_association *asoc;
11423 struct sctp_cwr_chunk *cwr;
11424 struct sctp_tmit_chunk *chk;
11426 asoc = &stcb->asoc;
11427 SCTP_TCB_LOCK_ASSERT(stcb);
11431 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11432 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11434 * found a previous CWR queued to same destination
11435 * update it if needed
11439 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11440 ctsn = ntohl(cwr->tsn);
11441 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11442 cwr->tsn = htonl(high_tsn);
11444 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11445 /* Make sure override is carried */
11446 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11451 sctp_alloc_a_chunk(stcb, chk);
11455 chk->copy_by_ref = 0;
11456 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11457 chk->rec.chunk_id.can_take_data = 1;
11458 chk->asoc = &stcb->asoc;
11459 chk->send_size = sizeof(struct sctp_cwr_chunk);
11460 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11461 if (chk->data == NULL) {
11462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11465 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11466 SCTP_BUF_LEN(chk->data) = chk->send_size;
11467 chk->sent = SCTP_DATAGRAM_UNSENT;
11468 chk->snd_count = 0;
11470 atomic_add_int(&chk->whoTo->ref_count, 1);
11471 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11472 cwr->ch.chunk_type = SCTP_ECN_CWR;
11473 cwr->ch.chunk_flags = override;
11474 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11475 cwr->tsn = htonl(high_tsn);
11476 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11477 asoc->ctrl_queue_cnt++;
11481 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11482 int number_entries, uint16_t * list,
11483 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11485 int len, old_len, i;
11486 struct sctp_stream_reset_out_request *req_out;
11487 struct sctp_chunkhdr *ch;
11489 ch = mtod(chk->data, struct sctp_chunkhdr *);
11492 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11494 /* get to new offset for the param. */
11495 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11496 /* now how long will this param be? */
11497 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11498 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11499 req_out->ph.param_length = htons(len);
11500 req_out->request_seq = htonl(seq);
11501 req_out->response_seq = htonl(resp_seq);
11502 req_out->send_reset_at_tsn = htonl(last_sent);
11503 if (number_entries) {
11504 for (i = 0; i < number_entries; i++) {
11505 req_out->list_of_streams[i] = htons(list[i]);
11508 if (SCTP_SIZE32(len) > len) {
11510 * Need to worry about the pad we may end up adding to the
11511 * end. This is easy since the struct is either aligned to 4
11512 * bytes or 2 bytes off.
11514 req_out->list_of_streams[number_entries] = 0;
11516 /* now fix the chunk length */
11517 ch->chunk_length = htons(len + old_len);
11518 chk->book_size = len + old_len;
11519 chk->book_size_scale = 0;
11520 chk->send_size = SCTP_SIZE32(chk->book_size);
11521 SCTP_BUF_LEN(chk->data) = chk->send_size;
11527 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11528 int number_entries, uint16_t * list,
11531 int len, old_len, i;
11532 struct sctp_stream_reset_in_request *req_in;
11533 struct sctp_chunkhdr *ch;
11535 ch = mtod(chk->data, struct sctp_chunkhdr *);
11538 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11540 /* get to new offset for the param. */
11541 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11542 /* now how long will this param be? */
11543 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11544 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11545 req_in->ph.param_length = htons(len);
11546 req_in->request_seq = htonl(seq);
11547 if (number_entries) {
11548 for (i = 0; i < number_entries; i++) {
11549 req_in->list_of_streams[i] = htons(list[i]);
11552 if (SCTP_SIZE32(len) > len) {
11554 * Need to worry about the pad we may end up adding to the
11555 * end. This is easy since the struct is either aligned to 4
11556 * bytes or 2 bytes off.
11558 req_in->list_of_streams[number_entries] = 0;
11560 /* now fix the chunk length */
11561 ch->chunk_length = htons(len + old_len);
11562 chk->book_size = len + old_len;
11563 chk->book_size_scale = 0;
11564 chk->send_size = SCTP_SIZE32(chk->book_size);
11565 SCTP_BUF_LEN(chk->data) = chk->send_size;
11571 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11575 struct sctp_stream_reset_tsn_request *req_tsn;
11576 struct sctp_chunkhdr *ch;
11578 ch = mtod(chk->data, struct sctp_chunkhdr *);
11581 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11583 /* get to new offset for the param. */
11584 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11585 /* now how long will this param be? */
11586 len = sizeof(struct sctp_stream_reset_tsn_request);
11587 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11588 req_tsn->ph.param_length = htons(len);
11589 req_tsn->request_seq = htonl(seq);
11591 /* now fix the chunk length */
11592 ch->chunk_length = htons(len + old_len);
11593 chk->send_size = len + old_len;
11594 chk->book_size = SCTP_SIZE32(chk->send_size);
11595 chk->book_size_scale = 0;
11596 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11601 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11602 uint32_t resp_seq, uint32_t result)
11605 struct sctp_stream_reset_response *resp;
11606 struct sctp_chunkhdr *ch;
11608 ch = mtod(chk->data, struct sctp_chunkhdr *);
11611 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11613 /* get to new offset for the param. */
11614 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11615 /* now how long will this param be? */
11616 len = sizeof(struct sctp_stream_reset_response);
11617 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11618 resp->ph.param_length = htons(len);
11619 resp->response_seq = htonl(resp_seq);
11620 resp->result = ntohl(result);
11622 /* now fix the chunk length */
11623 ch->chunk_length = htons(len + old_len);
11624 chk->book_size = len + old_len;
11625 chk->book_size_scale = 0;
11626 chk->send_size = SCTP_SIZE32(chk->book_size);
11627 SCTP_BUF_LEN(chk->data) = chk->send_size;
11634 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11635 uint32_t resp_seq, uint32_t result,
11636 uint32_t send_una, uint32_t recv_next)
11639 struct sctp_stream_reset_response_tsn *resp;
11640 struct sctp_chunkhdr *ch;
11642 ch = mtod(chk->data, struct sctp_chunkhdr *);
11645 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11647 /* get to new offset for the param. */
11648 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11649 /* now how long will this param be? */
11650 len = sizeof(struct sctp_stream_reset_response_tsn);
11651 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11652 resp->ph.param_length = htons(len);
11653 resp->response_seq = htonl(resp_seq);
11654 resp->result = htonl(result);
11655 resp->senders_next_tsn = htonl(send_una);
11656 resp->receivers_next_tsn = htonl(recv_next);
11658 /* now fix the chunk length */
11659 ch->chunk_length = htons(len + old_len);
11660 chk->book_size = len + old_len;
11661 chk->send_size = SCTP_SIZE32(chk->book_size);
11662 chk->book_size_scale = 0;
11663 SCTP_BUF_LEN(chk->data) = chk->send_size;
11668 sctp_add_a_stream(struct sctp_tmit_chunk *chk,
11673 struct sctp_chunkhdr *ch;
11674 struct sctp_stream_reset_add_strm *addstr;
11676 ch = mtod(chk->data, struct sctp_chunkhdr *);
11677 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11679 /* get to new offset for the param. */
11680 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11681 /* now how long will this param be? */
11682 len = sizeof(struct sctp_stream_reset_add_strm);
11685 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
11686 addstr->ph.param_length = htons(len);
11687 addstr->request_seq = htonl(seq);
11688 addstr->number_of_streams = htons(adding);
11689 addstr->reserved = 0;
11691 /* now fix the chunk length */
11692 ch->chunk_length = htons(len + old_len);
11693 chk->send_size = len + old_len;
11694 chk->book_size = SCTP_SIZE32(chk->send_size);
11695 chk->book_size_scale = 0;
11696 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11701 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11702 int number_entries, uint16_t * list,
11703 uint8_t send_out_req,
11705 uint8_t send_in_req,
11706 uint8_t send_tsn_req,
11707 uint8_t add_stream,
11712 struct sctp_association *asoc;
11713 struct sctp_tmit_chunk *chk;
11714 struct sctp_chunkhdr *ch;
11717 asoc = &stcb->asoc;
11718 if (asoc->stream_reset_outstanding) {
11720 * Already one pending, must get ACK back to clear the flag.
11722 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11725 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11726 (add_stream == 0)) {
11727 /* nothing to do */
11728 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11731 if (send_tsn_req && (send_out_req || send_in_req)) {
11732 /* error, can't do that */
11733 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11736 sctp_alloc_a_chunk(stcb, chk);
11738 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11741 chk->copy_by_ref = 0;
11742 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11743 chk->rec.chunk_id.can_take_data = 0;
11744 chk->asoc = &stcb->asoc;
11745 chk->book_size = sizeof(struct sctp_chunkhdr);
11746 chk->send_size = SCTP_SIZE32(chk->book_size);
11747 chk->book_size_scale = 0;
11749 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11750 if (chk->data == NULL) {
11751 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11752 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11755 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11757 /* setup chunk parameters */
11758 chk->sent = SCTP_DATAGRAM_UNSENT;
11759 chk->snd_count = 0;
11760 if (stcb->asoc.alternate) {
11761 chk->whoTo = stcb->asoc.alternate;
11763 chk->whoTo = stcb->asoc.primary_destination;
11765 atomic_add_int(&chk->whoTo->ref_count, 1);
11766 ch = mtod(chk->data, struct sctp_chunkhdr *);
11767 ch->chunk_type = SCTP_STREAM_RESET;
11768 ch->chunk_flags = 0;
11769 ch->chunk_length = htons(chk->book_size);
11770 SCTP_BUF_LEN(chk->data) = chk->send_size;
11772 seq = stcb->asoc.str_reset_seq_out;
11773 if (send_out_req) {
11774 sctp_add_stream_reset_out(chk, number_entries, list,
11775 seq, resp_seq, (stcb->asoc.sending_seq - 1));
11776 asoc->stream_reset_out_is_outstanding = 1;
11778 asoc->stream_reset_outstanding++;
11781 sctp_add_a_stream(chk, seq, adding);
11783 asoc->stream_reset_outstanding++;
11786 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11787 asoc->stream_reset_outstanding++;
11789 if (send_tsn_req) {
11790 sctp_add_stream_reset_tsn(chk, seq);
11791 asoc->stream_reset_outstanding++;
11793 asoc->str_reset = chk;
11795 /* insert the chunk for sending */
11796 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11799 asoc->ctrl_queue_cnt++;
11800 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11805 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11806 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11809 * Formulate the abort message, and send it back down.
11811 struct mbuf *o_pak;
11813 struct sctp_abort_msg *abm;
11815 struct udphdr *udp;
11816 int iphlen_out, len;
11819 struct ip *iph_out;
11823 struct ip6_hdr *ip6, *ip6_out;
11827 /* don't respond to ABORT with ABORT */
11828 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11830 sctp_m_freem(err_cause);
11833 iph = mtod(m, struct ip *);
11834 switch (iph->ip_v) {
11837 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11841 case IPV6_VERSION >> 4:
11842 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11847 sctp_m_freem(err_cause);
11852 len += sizeof(struct udphdr);
11854 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11855 if (mout == NULL) {
11857 sctp_m_freem(err_cause);
11861 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11862 SCTP_BUF_LEN(mout) = len;
11863 SCTP_BUF_NEXT(mout) = err_cause;
11864 if (m->m_flags & M_FLOWID) {
11865 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
11866 mout->m_flags |= M_FLOWID;
11874 switch (iph->ip_v) {
11877 iph_out = mtod(mout, struct ip *);
11879 /* Fill in the IP header for the ABORT */
11880 iph_out->ip_v = IPVERSION;
11881 iph_out->ip_hl = (sizeof(struct ip) / 4);
11882 iph_out->ip_tos = (u_char)0;
11883 iph_out->ip_id = 0;
11884 iph_out->ip_off = 0;
11885 iph_out->ip_ttl = MAXTTL;
11887 iph_out->ip_p = IPPROTO_UDP;
11889 iph_out->ip_p = IPPROTO_SCTP;
11891 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11892 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11893 /* let IP layer calculate this */
11894 iph_out->ip_sum = 0;
11896 iphlen_out = sizeof(*iph_out);
11897 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
11901 case IPV6_VERSION >> 4:
11902 ip6 = (struct ip6_hdr *)iph;
11903 ip6_out = mtod(mout, struct ip6_hdr *);
11905 /* Fill in the IP6 header for the ABORT */
11906 ip6_out->ip6_flow = ip6->ip6_flow;
11907 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11909 ip6_out->ip6_nxt = IPPROTO_UDP;
11911 ip6_out->ip6_nxt = IPPROTO_SCTP;
11913 ip6_out->ip6_src = ip6->ip6_dst;
11914 ip6_out->ip6_dst = ip6->ip6_src;
11916 iphlen_out = sizeof(*ip6_out);
11917 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
11921 /* Currently not supported */
11922 sctp_m_freem(mout);
11926 udp = (struct udphdr *)abm;
11928 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11929 sctp_m_freem(mout);
11932 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11933 udp->uh_dport = port;
11934 /* set udp->uh_ulen later */
11936 iphlen_out += sizeof(struct udphdr);
11937 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
11939 abm->sh.src_port = sh->dest_port;
11940 abm->sh.dest_port = sh->src_port;
11941 abm->sh.checksum = 0;
11943 abm->sh.v_tag = sh->v_tag;
11944 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
11946 abm->sh.v_tag = htonl(vtag);
11947 abm->msg.ch.chunk_flags = 0;
11949 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11952 struct mbuf *m_tmp = err_cause;
11955 /* get length of the err_cause chain */
11956 while (m_tmp != NULL) {
11957 err_len += SCTP_BUF_LEN(m_tmp);
11958 m_tmp = SCTP_BUF_NEXT(m_tmp);
11960 len = SCTP_BUF_LEN(mout) + err_len;
11962 /* need pad at end of chunk */
11963 uint32_t cpthis = 0;
11966 padlen = 4 - (len % 4);
11967 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11970 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
11972 len = SCTP_BUF_LEN(mout);
11973 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
11976 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11978 sctp_m_freem(mout);
11982 if (iph_out != NULL) {
11986 /* zap the stack pointer to the route */
11987 bzero(&ro, sizeof ro);
11989 udp->uh_ulen = htons(len - sizeof(struct ip));
11990 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11992 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
11993 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
11994 /* set IPv4 length */
11995 iph_out->ip_len = len;
11997 #ifdef SCTP_PACKET_LOGGING
11998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11999 sctp_packet_log(mout, len);
12001 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12003 #if defined(SCTP_WITH_NO_CSUM)
12004 SCTP_STAT_INCR(sctps_sendnocrc);
12006 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
12007 SCTP_STAT_INCR(sctps_sendswcrc);
12009 SCTP_ENABLE_UDP_CSUM(o_pak);
12011 #if defined(SCTP_WITH_NO_CSUM)
12012 SCTP_STAT_INCR(sctps_sendnocrc);
12014 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12015 mout->m_pkthdr.csum_data = 0;
12016 SCTP_STAT_INCR(sctps_sendhwcrc);
12019 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12021 /* Free the route if we got one back */
12027 if (ip6_out != NULL) {
12028 struct route_in6 ro;
12030 struct ifnet *ifp = NULL;
12032 /* zap the stack pointer to the route */
12033 bzero(&ro, sizeof(ro));
12035 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12037 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
12038 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
12039 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12040 #ifdef SCTP_PACKET_LOGGING
12041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12042 sctp_packet_log(mout, len);
12044 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12046 #if defined(SCTP_WITH_NO_CSUM)
12047 SCTP_STAT_INCR(sctps_sendnocrc);
12049 abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12050 SCTP_STAT_INCR(sctps_sendswcrc);
12052 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12053 udp->uh_sum = 0xffff;
12056 #if defined(SCTP_WITH_NO_CSUM)
12057 SCTP_STAT_INCR(sctps_sendnocrc);
12059 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12060 mout->m_pkthdr.csum_data = 0;
12061 SCTP_STAT_INCR(sctps_sendhwcrc);
12064 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
12066 /* Free the route if we got one back */
12071 SCTP_STAT_INCR(sctps_sendpackets);
12072 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12073 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12077 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
12078 uint32_t vrf_id, uint16_t port)
12080 struct mbuf *o_pak;
12081 struct sctphdr *sh, *sh_out;
12082 struct sctp_chunkhdr *ch;
12084 struct udphdr *udp = NULL;
12086 int iphlen_out, len;
12089 struct ip *iph_out;
12093 struct ip6_hdr *ip6, *ip6_out;
12097 iph = mtod(m, struct ip *);
12098 sh = (struct sctphdr *)((caddr_t)iph + iphlen);
12099 switch (iph->ip_v) {
12102 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
12106 case IPV6_VERSION >> 4:
12107 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
12117 len += sizeof(struct udphdr);
12119 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
12120 if (mout == NULL) {
12126 SCTP_BUF_RESV_UF(mout, max_linkhdr);
12127 SCTP_BUF_LEN(mout) = len;
12128 SCTP_BUF_NEXT(mout) = scm;
12129 if (m->m_flags & M_FLOWID) {
12130 mout->m_pkthdr.flowid = m->m_pkthdr.flowid;
12131 mout->m_flags |= M_FLOWID;
12139 switch (iph->ip_v) {
12142 iph_out = mtod(mout, struct ip *);
12144 /* Fill in the IP header for the ABORT */
12145 iph_out->ip_v = IPVERSION;
12146 iph_out->ip_hl = (sizeof(struct ip) / 4);
12147 iph_out->ip_tos = (u_char)0;
12148 iph_out->ip_id = 0;
12149 iph_out->ip_off = 0;
12150 iph_out->ip_ttl = MAXTTL;
12152 iph_out->ip_p = IPPROTO_UDP;
12154 iph_out->ip_p = IPPROTO_SCTP;
12156 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
12157 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
12158 /* let IP layer calculate this */
12159 iph_out->ip_sum = 0;
12161 iphlen_out = sizeof(struct ip);
12162 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
12166 case IPV6_VERSION >> 4:
12167 ip6 = (struct ip6_hdr *)iph;
12168 ip6_out = mtod(mout, struct ip6_hdr *);
12170 /* Fill in the IP6 header for the ABORT */
12171 ip6_out->ip6_flow = ip6->ip6_flow;
12172 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
12174 ip6_out->ip6_nxt = IPPROTO_UDP;
12176 ip6_out->ip6_nxt = IPPROTO_SCTP;
12178 ip6_out->ip6_src = ip6->ip6_dst;
12179 ip6_out->ip6_dst = ip6->ip6_src;
12181 iphlen_out = sizeof(struct ip6_hdr);
12182 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
12186 /* Currently not supported */
12187 sctp_m_freem(mout);
12191 udp = (struct udphdr *)sh_out;
12193 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
12194 sctp_m_freem(mout);
12197 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
12198 udp->uh_dport = port;
12199 /* set udp->uh_ulen later */
12201 iphlen_out += sizeof(struct udphdr);
12202 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
12204 sh_out->src_port = sh->dest_port;
12205 sh_out->dest_port = sh->src_port;
12206 sh_out->v_tag = vtag;
12207 sh_out->checksum = 0;
12209 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
12210 ch->chunk_type = SCTP_OPERATION_ERROR;
12211 ch->chunk_flags = 0;
12214 struct mbuf *m_tmp = scm;
12217 /* get length of the err_cause chain */
12218 while (m_tmp != NULL) {
12219 cause_len += SCTP_BUF_LEN(m_tmp);
12220 m_tmp = SCTP_BUF_NEXT(m_tmp);
12222 len = SCTP_BUF_LEN(mout) + cause_len;
12223 if (cause_len % 4) {
12224 /* need pad at end of chunk */
12225 uint32_t cpthis = 0;
12228 padlen = 4 - (len % 4);
12229 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
12232 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
12234 len = SCTP_BUF_LEN(mout);
12235 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
12238 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
12240 sctp_m_freem(mout);
12244 if (iph_out != NULL) {
12248 /* zap the stack pointer to the route */
12249 bzero(&ro, sizeof ro);
12251 udp->uh_ulen = htons(len - sizeof(struct ip));
12252 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
12254 /* set IPv4 length */
12255 iph_out->ip_len = len;
12257 #ifdef SCTP_PACKET_LOGGING
12258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12259 sctp_packet_log(mout, len);
12261 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12263 #if defined(SCTP_WITH_NO_CSUM)
12264 SCTP_STAT_INCR(sctps_sendnocrc);
12266 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
12267 SCTP_STAT_INCR(sctps_sendswcrc);
12269 SCTP_ENABLE_UDP_CSUM(o_pak);
12271 #if defined(SCTP_WITH_NO_CSUM)
12272 SCTP_STAT_INCR(sctps_sendnocrc);
12274 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12275 mout->m_pkthdr.csum_data = 0;
12276 SCTP_STAT_INCR(sctps_sendhwcrc);
12279 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
12281 /* Free the route if we got one back */
12287 if (ip6_out != NULL) {
12288 struct route_in6 ro;
12290 struct ifnet *ifp = NULL;
12292 /* zap the stack pointer to the route */
12293 bzero(&ro, sizeof(ro));
12295 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12297 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12298 #ifdef SCTP_PACKET_LOGGING
12299 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12300 sctp_packet_log(mout, len);
12302 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12304 #if defined(SCTP_WITH_NO_CSUM)
12305 SCTP_STAT_INCR(sctps_sendnocrc);
12307 sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
12308 SCTP_STAT_INCR(sctps_sendswcrc);
12310 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12311 udp->uh_sum = 0xffff;
12314 #if defined(SCTP_WITH_NO_CSUM)
12315 SCTP_STAT_INCR(sctps_sendnocrc);
12317 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12318 mout->m_pkthdr.csum_data = 0;
12319 SCTP_STAT_INCR(sctps_sendhwcrc);
12322 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, NULL, vrf_id);
12324 /* Free the route if we got one back */
12329 SCTP_STAT_INCR(sctps_sendpackets);
12330 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12331 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12334 static struct mbuf *
12335 sctp_copy_resume(struct uio *uio,
12337 int user_marks_eor,
12340 struct mbuf **new_tail)
12344 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12345 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12347 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12350 *sndout = m_length(m, NULL);
12351 *new_tail = m_last(m);
12357 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12364 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12366 if (sp->data == NULL) {
12367 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12370 sp->tail_mbuf = m_last(sp->data);
12376 static struct sctp_stream_queue_pending *
12377 sctp_copy_it_in(struct sctp_tcb *stcb,
12378 struct sctp_association *asoc,
12379 struct sctp_sndrcvinfo *srcv,
12381 struct sctp_nets *net,
12383 int user_marks_eor,
12387 * This routine must be very careful in its work. Protocol
12388 * processing is up and running so care must be taken to spl...()
12389 * when you need to do something that may effect the stcb/asoc. The
12390 * sb is locked however. When data is copied the protocol processing
12391 * should be enabled since this is a slower operation...
12393 struct sctp_stream_queue_pending *sp = NULL;
12397 /* Now can we send this? */
12398 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12399 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12400 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12401 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12402 /* got data while shutting down */
12403 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12404 *error = ECONNRESET;
12407 sctp_alloc_a_strmoq(stcb, sp);
12409 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12414 sp->sender_all_done = 0;
12415 sp->sinfo_flags = srcv->sinfo_flags;
12416 sp->timetolive = srcv->sinfo_timetolive;
12417 sp->ppid = srcv->sinfo_ppid;
12418 sp->context = srcv->sinfo_context;
12420 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12422 sp->stream = srcv->sinfo_stream;
12423 sp->length = min(uio->uio_resid, max_send_len);
12424 if ((sp->length == (uint32_t) uio->uio_resid) &&
12425 ((user_marks_eor == 0) ||
12426 (srcv->sinfo_flags & SCTP_EOF) ||
12427 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12428 sp->msg_is_complete = 1;
12430 sp->msg_is_complete = 0;
12432 sp->sender_all_done = 0;
12433 sp->some_taken = 0;
12434 sp->put_last_out = 0;
12435 resv_in_first = sizeof(struct sctp_data_chunk);
12436 sp->data = sp->tail_mbuf = NULL;
12437 if (sp->length == 0) {
12441 if (srcv->sinfo_keynumber_valid) {
12442 sp->auth_keyid = srcv->sinfo_keynumber;
12444 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12446 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12447 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12448 sp->holds_key_ref = 1;
12450 *error = sctp_copy_one(sp, uio, resv_in_first);
12453 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12456 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12458 atomic_add_int(&sp->net->ref_count, 1);
12462 sctp_set_prsctp_policy(sp);
12470 sctp_sosend(struct socket *so,
12471 struct sockaddr *addr,
12474 struct mbuf *control,
12479 int error, use_sndinfo = 0;
12480 struct sctp_sndrcvinfo sndrcvninfo;
12481 struct sockaddr *addr_to_use;
12483 #if defined(INET) && defined(INET6)
12484 struct sockaddr_in sin;
12489 /* process cmsg snd/rcv info (maybe a assoc-id) */
12490 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12491 sizeof(sndrcvninfo))) {
12496 addr_to_use = addr;
12497 #if defined(INET) && defined(INET6)
12498 if ((addr) && (addr->sa_family == AF_INET6)) {
12499 struct sockaddr_in6 *sin6;
12501 sin6 = (struct sockaddr_in6 *)addr;
12502 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12503 in6_sin6_2_sin(&sin, sin6);
12504 addr_to_use = (struct sockaddr *)&sin;
12508 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12511 use_sndinfo ? &sndrcvninfo : NULL
12519 sctp_lower_sosend(struct socket *so,
12520 struct sockaddr *addr,
12522 struct mbuf *i_pak,
12523 struct mbuf *control,
12525 struct sctp_sndrcvinfo *srcv
12530 unsigned int sndlen = 0, max_len;
12532 struct mbuf *top = NULL;
12533 int queue_only = 0, queue_only_for_init = 0;
12534 int free_cnt_applied = 0;
12536 int now_filled = 0;
12537 unsigned int inqueue_bytes = 0;
12538 struct sctp_block_entry be;
12539 struct sctp_inpcb *inp;
12540 struct sctp_tcb *stcb = NULL;
12541 struct timeval now;
12542 struct sctp_nets *net;
12543 struct sctp_association *asoc;
12544 struct sctp_inpcb *t_inp;
12545 int user_marks_eor;
12546 int create_lock_applied = 0;
12547 int nagle_applies = 0;
12548 int some_on_control = 0;
12549 int got_all_of_the_send = 0;
12550 int hold_tcblock = 0;
12551 int non_blocking = 0;
12552 uint32_t local_add_more, local_soresv = 0;
12554 uint16_t sinfo_flags;
12555 sctp_assoc_t sinfo_assoc_id;
12562 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12564 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12567 SCTP_RELEASE_PKT(i_pak);
12571 if ((uio == NULL) && (i_pak == NULL)) {
12572 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12575 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12576 atomic_add_int(&inp->total_sends, 1);
12578 if (uio->uio_resid < 0) {
12579 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12582 sndlen = uio->uio_resid;
12584 top = SCTP_HEADER_TO_CHAIN(i_pak);
12585 sndlen = SCTP_HEADER_LEN(i_pak);
12587 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12590 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12591 (inp->sctp_socket->so_qlimit)) {
12592 /* The listener can NOT send */
12593 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12598 * Pre-screen address, if one is given the sin-len
12599 * must be set correctly!
12602 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12604 switch (raddr->sa.sa_family) {
12607 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12608 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12612 port = raddr->sin.sin_port;
12617 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12618 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12622 port = raddr->sin6.sin6_port;
12626 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12627 error = EAFNOSUPPORT;
12634 sinfo_flags = srcv->sinfo_flags;
12635 sinfo_assoc_id = srcv->sinfo_assoc_id;
12636 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12637 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12638 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12642 if (srcv->sinfo_flags)
12643 SCTP_STAT_INCR(sctps_sends_with_flags);
12645 sinfo_flags = inp->def_send.sinfo_flags;
12646 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12648 if (sinfo_flags & SCTP_SENDALL) {
12649 /* its a sendall */
12650 error = sctp_sendall(inp, uio, top, srcv);
12654 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12655 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12659 /* now we must find the assoc */
12660 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12661 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12662 SCTP_INP_RLOCK(inp);
12663 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12665 SCTP_TCB_LOCK(stcb);
12668 SCTP_INP_RUNLOCK(inp);
12669 } else if (sinfo_assoc_id) {
12670 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0);
12673 * Since we did not use findep we must
12674 * increment it, and if we don't find a tcb
12677 SCTP_INP_WLOCK(inp);
12678 SCTP_INP_INCR_REF(inp);
12679 SCTP_INP_WUNLOCK(inp);
12680 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12681 if (stcb == NULL) {
12682 SCTP_INP_WLOCK(inp);
12683 SCTP_INP_DECR_REF(inp);
12684 SCTP_INP_WUNLOCK(inp);
12689 if ((stcb == NULL) && (addr)) {
12690 /* Possible implicit send? */
12691 SCTP_ASOC_CREATE_LOCK(inp);
12692 create_lock_applied = 1;
12693 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12694 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12695 /* Should I really unlock ? */
12696 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12701 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12702 (addr->sa_family == AF_INET6)) {
12703 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12707 SCTP_INP_WLOCK(inp);
12708 SCTP_INP_INCR_REF(inp);
12709 SCTP_INP_WUNLOCK(inp);
12710 /* With the lock applied look again */
12711 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12712 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12713 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12715 if (stcb == NULL) {
12716 SCTP_INP_WLOCK(inp);
12717 SCTP_INP_DECR_REF(inp);
12718 SCTP_INP_WUNLOCK(inp);
12725 if (t_inp != inp) {
12726 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12731 if (stcb == NULL) {
12732 if (addr == NULL) {
12733 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12737 /* We must go ahead and start the INIT process */
12740 if ((sinfo_flags & SCTP_ABORT) ||
12741 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12743 * User asks to abort a non-existant assoc,
12744 * or EOF a non-existant assoc with no data
12746 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12750 /* get an asoc/stcb struct */
12751 vrf_id = inp->def_vrf_id;
12753 if (create_lock_applied == 0) {
12754 panic("Error, should hold create lock and I don't?");
12757 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12760 if (stcb == NULL) {
12761 /* Error is setup for us in the call */
12764 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12765 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12767 * Set the connected flag so we can queue
12770 soisconnecting(so);
12773 if (create_lock_applied) {
12774 SCTP_ASOC_CREATE_UNLOCK(inp);
12775 create_lock_applied = 0;
12777 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12780 * Turn on queue only flag to prevent data from
12784 asoc = &stcb->asoc;
12785 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12786 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12788 /* initialize authentication params for the assoc */
12789 sctp_initialize_auth_params(inp, stcb);
12792 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12793 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7);
12799 /* out with the INIT */
12800 queue_only_for_init = 1;
12802 * we may want to dig in after this call and adjust the MTU
12803 * value. It defaulted to 1500 (constant) but the ro
12804 * structure may now have an update and thus we may need to
12805 * change it BEFORE we append the message.
12809 asoc = &stcb->asoc;
12811 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12812 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12814 net = sctp_findnet(stcb, addr);
12817 if ((net == NULL) ||
12818 ((port != 0) && (port != stcb->rport))) {
12819 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12824 if (stcb->asoc.alternate) {
12825 net = stcb->asoc.alternate;
12827 net = stcb->asoc.primary_destination;
12830 atomic_add_int(&stcb->total_sends, 1);
12831 /* Keep the stcb from being freed under our feet */
12832 atomic_add_int(&asoc->refcnt, 1);
12833 free_cnt_applied = 1;
12835 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12836 if (sndlen > asoc->smallest_mtu) {
12837 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12842 if (SCTP_SO_IS_NBIO(so)
12843 || (flags & MSG_NBIO)
12847 /* would we block? */
12848 if (non_blocking) {
12849 if (hold_tcblock == 0) {
12850 SCTP_TCB_LOCK(stcb);
12853 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12854 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12855 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12856 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12857 if (sndlen > SCTP_SB_LIMIT_SND(so))
12860 error = EWOULDBLOCK;
12863 stcb->asoc.sb_send_resv += sndlen;
12864 SCTP_TCB_UNLOCK(stcb);
12867 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12869 local_soresv = sndlen;
12870 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12871 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12872 error = ECONNRESET;
12875 if (create_lock_applied) {
12876 SCTP_ASOC_CREATE_UNLOCK(inp);
12877 create_lock_applied = 0;
12879 if (asoc->stream_reset_outstanding) {
12881 * Can't queue any data while stream reset is underway.
12883 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12887 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12888 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12891 /* we are now done with all control */
12893 sctp_m_freem(control);
12896 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12897 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12898 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12899 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12900 if (srcv->sinfo_flags & SCTP_ABORT) {
12903 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12904 error = ECONNRESET;
12908 /* Ok, we will attempt a msgsnd :> */
12910 p->td_ru.ru_msgsnd++;
12912 /* Are we aborting? */
12913 if (srcv->sinfo_flags & SCTP_ABORT) {
12915 int tot_demand, tot_out = 0, max_out;
12917 SCTP_STAT_INCR(sctps_sends_with_abort);
12918 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12919 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12920 /* It has to be up before we abort */
12921 /* how big is the user initiated abort? */
12922 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12926 if (hold_tcblock) {
12927 SCTP_TCB_UNLOCK(stcb);
12931 struct mbuf *cntm = NULL;
12933 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAIT, 1, MT_DATA);
12935 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12936 tot_out += SCTP_BUF_LEN(cntm);
12940 /* Must fit in a MTU */
12942 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12943 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12945 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12949 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12952 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12956 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12957 max_out -= sizeof(struct sctp_abort_msg);
12958 if (tot_out > max_out) {
12962 struct sctp_paramhdr *ph;
12964 /* now move forward the data pointer */
12965 ph = mtod(mm, struct sctp_paramhdr *);
12966 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12967 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12969 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12971 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12974 * Here if we can't get his data we
12975 * still abort we just don't get to
12976 * send the users note :-0
12983 SCTP_BUF_NEXT(mm) = top;
12987 if (hold_tcblock == 0) {
12988 SCTP_TCB_LOCK(stcb);
12990 atomic_add_int(&stcb->asoc.refcnt, -1);
12991 free_cnt_applied = 0;
12992 /* release this lock, otherwise we hang on ourselves */
12993 sctp_abort_an_association(stcb->sctp_ep, stcb,
12994 SCTP_RESPONSE_TO_USER_REQ,
12995 mm, SCTP_SO_LOCKED);
12996 /* now relock the stcb so everything is sane */
13000 * In this case top is already chained to mm avoid double
13001 * free, since we free it below if top != NULL and driver
13002 * would free it after sending the packet out
13009 /* Calculate the maximum we can send */
13010 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13011 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13012 if (non_blocking) {
13013 /* we already checked for non-blocking above. */
13016 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13021 if (hold_tcblock) {
13022 SCTP_TCB_UNLOCK(stcb);
13025 /* Is the stream no. valid? */
13026 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
13027 /* Invalid stream number */
13028 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13032 if (asoc->strmout == NULL) {
13033 /* huh? software error */
13034 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13038 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13039 if ((user_marks_eor == 0) &&
13040 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13041 /* It will NEVER fit */
13042 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13046 if ((uio == NULL) && user_marks_eor) {
13048 * We do not support eeor mode for
13049 * sending with mbuf chains (like sendfile).
13051 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13055 if (user_marks_eor) {
13056 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13059 * For non-eeor the whole message must fit in
13060 * the socket send buffer.
13062 local_add_more = sndlen;
13065 if (non_blocking) {
13066 goto skip_preblock;
13068 if (((max_len <= local_add_more) &&
13069 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13071 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13072 /* No room right now ! */
13073 SOCKBUF_LOCK(&so->so_snd);
13074 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13075 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13076 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13077 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13078 (unsigned int)SCTP_SB_LIMIT_SND(so),
13081 stcb->asoc.stream_queue_cnt,
13082 stcb->asoc.chunks_on_out_queue,
13083 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13085 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13088 stcb->block_entry = &be;
13089 error = sbwait(&so->so_snd);
13090 stcb->block_entry = NULL;
13091 if (error || so->so_error || be.error) {
13094 error = so->so_error;
13099 SOCKBUF_UNLOCK(&so->so_snd);
13102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13103 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13104 asoc, stcb->asoc.total_output_queue_size);
13106 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13109 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13111 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13112 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13116 SOCKBUF_UNLOCK(&so->so_snd);
13119 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13123 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13124 * case NOTE: uio will be null when top/mbuf is passed
13127 if (srcv->sinfo_flags & SCTP_EOF) {
13128 got_all_of_the_send = 1;
13131 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13137 struct sctp_stream_queue_pending *sp;
13138 struct sctp_stream_out *strm;
13141 SCTP_TCB_SEND_LOCK(stcb);
13142 if ((asoc->stream_locked) &&
13143 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13144 SCTP_TCB_SEND_UNLOCK(stcb);
13145 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13149 SCTP_TCB_SEND_UNLOCK(stcb);
13151 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13152 if (strm->last_msg_incomplete == 0) {
13154 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13155 if ((sp == NULL) || (error)) {
13158 SCTP_TCB_SEND_LOCK(stcb);
13159 if (sp->msg_is_complete) {
13160 strm->last_msg_incomplete = 0;
13161 asoc->stream_locked = 0;
13164 * Just got locked to this guy in case of an
13167 strm->last_msg_incomplete = 1;
13168 asoc->stream_locked = 1;
13169 asoc->stream_locked_on = srcv->sinfo_stream;
13170 sp->sender_all_done = 0;
13172 sctp_snd_sb_alloc(stcb, sp->length);
13173 atomic_add_int(&asoc->stream_queue_cnt, 1);
13174 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
13175 sp->strseq = strm->next_sequence_sent;
13176 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
13177 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
13178 (uintptr_t) stcb, sp->length,
13179 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
13181 strm->next_sequence_sent++;
13183 SCTP_STAT_INCR(sctps_sends_with_unord);
13185 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13186 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13187 SCTP_TCB_SEND_UNLOCK(stcb);
13189 SCTP_TCB_SEND_LOCK(stcb);
13190 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13191 SCTP_TCB_SEND_UNLOCK(stcb);
13193 /* ???? Huh ??? last msg is gone */
13195 panic("Warning: Last msg marked incomplete, yet nothing left?");
13197 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13198 strm->last_msg_incomplete = 0;
13204 while (uio->uio_resid > 0) {
13205 /* How much room do we have? */
13206 struct mbuf *new_tail, *mm;
13208 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13209 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13213 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13214 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13215 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13218 if (hold_tcblock) {
13219 SCTP_TCB_UNLOCK(stcb);
13222 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13223 if ((mm == NULL) || error) {
13229 /* Update the mbuf and count */
13230 SCTP_TCB_SEND_LOCK(stcb);
13231 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13233 * we need to get out. Peer probably
13237 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13238 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13239 error = ECONNRESET;
13241 SCTP_TCB_SEND_UNLOCK(stcb);
13244 if (sp->tail_mbuf) {
13245 /* tack it to the end */
13246 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13247 sp->tail_mbuf = new_tail;
13249 /* A stolen mbuf */
13251 sp->tail_mbuf = new_tail;
13253 sctp_snd_sb_alloc(stcb, sndout);
13254 atomic_add_int(&sp->length, sndout);
13257 /* Did we reach EOR? */
13258 if ((uio->uio_resid == 0) &&
13259 ((user_marks_eor == 0) ||
13260 (srcv->sinfo_flags & SCTP_EOF) ||
13261 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13262 sp->msg_is_complete = 1;
13264 sp->msg_is_complete = 0;
13266 SCTP_TCB_SEND_UNLOCK(stcb);
13268 if (uio->uio_resid == 0) {
13273 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13275 * This is ugly but we must assure locking
13278 if (hold_tcblock == 0) {
13279 SCTP_TCB_LOCK(stcb);
13282 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13283 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13284 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13285 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13291 SCTP_TCB_UNLOCK(stcb);
13294 /* wait for space now */
13295 if (non_blocking) {
13296 /* Non-blocking io in place out */
13299 /* What about the INIT, send it maybe */
13300 if (queue_only_for_init) {
13301 if (hold_tcblock == 0) {
13302 SCTP_TCB_LOCK(stcb);
13305 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13306 /* a collision took us forward? */
13309 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13310 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13314 if ((net->flight_size > net->cwnd) &&
13315 (asoc->sctp_cmt_on_off == 0)) {
13316 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13318 } else if (asoc->ifp_had_enobuf) {
13319 SCTP_STAT_INCR(sctps_ifnomemqueued);
13320 if (net->flight_size > (2 * net->mtu)) {
13323 asoc->ifp_had_enobuf = 0;
13325 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13326 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13327 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13328 (stcb->asoc.total_flight > 0) &&
13329 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13330 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13333 * Ok, Nagle is set on and we have data outstanding.
13334 * Don't send anything and let SACKs drive out the
13335 * data unless wen have a "full" segment to send.
13337 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13338 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13340 SCTP_STAT_INCR(sctps_naglequeued);
13343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13344 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13345 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13347 SCTP_STAT_INCR(sctps_naglesent);
13350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13352 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13353 nagle_applies, un_sent);
13354 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13355 stcb->asoc.total_flight,
13356 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13358 if (queue_only_for_init)
13359 queue_only_for_init = 0;
13360 if ((queue_only == 0) && (nagle_applies == 0)) {
13362 * need to start chunk output
13363 * before blocking.. note that if
13364 * a lock is already applied, then
13365 * the input via the net is happening
13366 * and I don't need to start output :-D
13368 if (hold_tcblock == 0) {
13369 if (SCTP_TCB_TRYLOCK(stcb)) {
13371 sctp_chunk_output(inp,
13373 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13376 sctp_chunk_output(inp,
13378 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13380 if (hold_tcblock == 1) {
13381 SCTP_TCB_UNLOCK(stcb);
13385 SOCKBUF_LOCK(&so->so_snd);
13387 * This is a bit strange, but I think it will
13388 * work. The total_output_queue_size is locked and
13389 * protected by the TCB_LOCK, which we just released.
13390 * There is a race that can occur between releasing it
13391 * above, and me getting the socket lock, where sacks
13392 * come in but we have not put the SB_WAIT on the
13393 * so_snd buffer to get the wakeup. After the LOCK
13394 * is applied the sack_processing will also need to
13395 * LOCK the so->so_snd to do the actual sowwakeup(). So
13396 * once we have the socket buffer lock if we recheck the
13397 * size we KNOW we will get to sleep safely with the
13398 * wakeup flag in place.
13400 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13401 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13403 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13404 asoc, uio->uio_resid);
13407 stcb->block_entry = &be;
13408 error = sbwait(&so->so_snd);
13409 stcb->block_entry = NULL;
13411 if (error || so->so_error || be.error) {
13414 error = so->so_error;
13419 SOCKBUF_UNLOCK(&so->so_snd);
13422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13423 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13424 asoc, stcb->asoc.total_output_queue_size);
13427 SOCKBUF_UNLOCK(&so->so_snd);
13428 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13432 SCTP_TCB_SEND_LOCK(stcb);
13434 if (sp->msg_is_complete == 0) {
13435 strm->last_msg_incomplete = 1;
13436 asoc->stream_locked = 1;
13437 asoc->stream_locked_on = srcv->sinfo_stream;
13439 sp->sender_all_done = 1;
13440 strm->last_msg_incomplete = 0;
13441 asoc->stream_locked = 0;
13444 SCTP_PRINTF("Huh no sp TSNH?\n");
13445 strm->last_msg_incomplete = 0;
13446 asoc->stream_locked = 0;
13448 SCTP_TCB_SEND_UNLOCK(stcb);
13449 if (uio->uio_resid == 0) {
13450 got_all_of_the_send = 1;
13453 /* We send in a 0, since we do NOT have any locks */
13454 error = sctp_msg_append(stcb, net, top, srcv, 0);
13456 if (srcv->sinfo_flags & SCTP_EOF) {
13458 * This should only happen for Panda for the mbuf
13459 * send case, which does NOT yet support EEOR mode.
13460 * Thus, we can just set this flag to do the proper
13463 got_all_of_the_send = 1;
13471 if ((srcv->sinfo_flags & SCTP_EOF) &&
13472 (got_all_of_the_send == 1) &&
13473 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
13476 SCTP_STAT_INCR(sctps_sends_with_eof);
13478 if (hold_tcblock == 0) {
13479 SCTP_TCB_LOCK(stcb);
13482 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED);
13483 if (TAILQ_EMPTY(&asoc->send_queue) &&
13484 TAILQ_EMPTY(&asoc->sent_queue) &&
13486 if (asoc->locked_on_sending) {
13489 /* there is nothing queued to send, so I'm done... */
13490 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13491 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13492 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13493 struct sctp_nets *netp;
13495 if (stcb->asoc.alternate) {
13496 netp = stcb->asoc.alternate;
13498 netp = stcb->asoc.primary_destination;
13500 /* only send SHUTDOWN the first time through */
13501 sctp_send_shutdown(stcb, netp);
13502 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13503 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13505 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13506 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13507 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13509 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13510 asoc->primary_destination);
13514 * we still got (or just got) data to send, so set
13518 * XXX sockets draft says that SCTP_EOF should be
13519 * sent with no data. currently, we will allow user
13520 * data to be sent first and move to
13523 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13524 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13525 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13526 if (hold_tcblock == 0) {
13527 SCTP_TCB_LOCK(stcb);
13530 if (asoc->locked_on_sending) {
13531 /* Locked to send out the data */
13532 struct sctp_stream_queue_pending *sp;
13534 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13536 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13537 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13540 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13541 if (TAILQ_EMPTY(&asoc->send_queue) &&
13542 TAILQ_EMPTY(&asoc->sent_queue) &&
13543 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13545 if (free_cnt_applied) {
13546 atomic_add_int(&stcb->asoc.refcnt, -1);
13547 free_cnt_applied = 0;
13549 sctp_abort_an_association(stcb->sctp_ep, stcb,
13550 SCTP_RESPONSE_TO_USER_REQ,
13551 NULL, SCTP_SO_LOCKED);
13553 * now relock the stcb so everything
13560 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13561 asoc->primary_destination);
13562 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13567 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13568 some_on_control = 1;
13570 if (queue_only_for_init) {
13571 if (hold_tcblock == 0) {
13572 SCTP_TCB_LOCK(stcb);
13575 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13576 /* a collision took us forward? */
13579 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13580 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13584 if ((net->flight_size > net->cwnd) &&
13585 (stcb->asoc.sctp_cmt_on_off == 0)) {
13586 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13588 } else if (asoc->ifp_had_enobuf) {
13589 SCTP_STAT_INCR(sctps_ifnomemqueued);
13590 if (net->flight_size > (2 * net->mtu)) {
13593 asoc->ifp_had_enobuf = 0;
13595 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13596 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13597 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13598 (stcb->asoc.total_flight > 0) &&
13599 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13600 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13602 * Ok, Nagle is set on and we have data outstanding.
13603 * Don't send anything and let SACKs drive out the
13604 * data unless wen have a "full" segment to send.
13606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13607 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13609 SCTP_STAT_INCR(sctps_naglequeued);
13612 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13613 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13614 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13616 SCTP_STAT_INCR(sctps_naglesent);
13619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13620 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13621 nagle_applies, un_sent);
13622 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13623 stcb->asoc.total_flight,
13624 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13626 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13627 /* we can attempt to send too. */
13628 if (hold_tcblock == 0) {
13630 * If there is activity recv'ing sacks no need to
13633 if (SCTP_TCB_TRYLOCK(stcb)) {
13634 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13638 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13640 } else if ((queue_only == 0) &&
13641 (stcb->asoc.peers_rwnd == 0) &&
13642 (stcb->asoc.total_flight == 0)) {
13643 /* We get to have a probe outstanding */
13644 if (hold_tcblock == 0) {
13646 SCTP_TCB_LOCK(stcb);
13648 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13649 } else if (some_on_control) {
13650 int num_out, reason, frag_point;
13652 /* Here we do control only */
13653 if (hold_tcblock == 0) {
13655 SCTP_TCB_LOCK(stcb);
13657 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13658 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13659 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13661 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13662 queue_only, stcb->asoc.peers_rwnd, un_sent,
13663 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13664 stcb->asoc.total_output_queue_size, error);
13669 if (local_soresv && stcb) {
13670 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13672 if (create_lock_applied) {
13673 SCTP_ASOC_CREATE_UNLOCK(inp);
13675 if ((stcb) && hold_tcblock) {
13676 SCTP_TCB_UNLOCK(stcb);
13678 if (stcb && free_cnt_applied) {
13679 atomic_add_int(&stcb->asoc.refcnt, -1);
13683 if (mtx_owned(&stcb->tcb_mtx)) {
13684 panic("Leaving with tcb mtx owned?");
13686 if (mtx_owned(&stcb->tcb_send_mtx)) {
13687 panic("Leaving with tcb send mtx owned?");
13693 sctp_validate_no_locks(inp);
13695 printf("Warning - inp is NULL so cant validate locks\n");
13702 sctp_m_freem(control);
13709 * generate an AUTHentication chunk, if required
13712 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13713 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13714 struct sctp_tcb *stcb, uint8_t chunk)
13716 struct mbuf *m_auth;
13717 struct sctp_auth_chunk *auth;
13721 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13725 /* sysctl disabled auth? */
13726 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13729 /* peer doesn't do auth... */
13730 if (!stcb->asoc.peer_supports_auth) {
13733 /* does the requested chunk require auth? */
13734 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13737 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13738 if (m_auth == NULL) {
13742 /* reserve some space if this will be the first mbuf */
13744 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13745 /* fill in the AUTH chunk details */
13746 auth = mtod(m_auth, struct sctp_auth_chunk *);
13747 bzero(auth, sizeof(*auth));
13748 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13749 auth->ch.chunk_flags = 0;
13750 chunk_len = sizeof(*auth) +
13751 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13752 auth->ch.chunk_length = htons(chunk_len);
13753 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13754 /* key id and hmac digest will be computed and filled in upon send */
13756 /* save the offset where the auth was inserted into the chain */
13758 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13759 *offset += SCTP_BUF_LEN(cn);
13762 /* update length and return pointer to the auth chunk */
13763 SCTP_BUF_LEN(m_auth) = chunk_len;
13764 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13765 if (auth_ret != NULL)
13773 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13775 struct nd_prefix *pfx = NULL;
13776 struct nd_pfxrouter *pfxrtr = NULL;
13777 struct sockaddr_in6 gw6;
13779 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13782 /* get prefix entry of address */
13783 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13784 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13786 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13787 &src6->sin6_addr, &pfx->ndpr_mask))
13790 /* no prefix entry in the prefix list */
13792 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13793 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13796 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13797 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13799 /* search installed gateway from prefix entry */
13800 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
13801 pfxrtr->pfr_next) {
13802 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13803 gw6.sin6_family = AF_INET6;
13804 gw6.sin6_len = sizeof(struct sockaddr_in6);
13805 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13806 sizeof(struct in6_addr));
13807 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13808 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13809 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13810 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13811 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13812 ro->ro_rt->rt_gateway)) {
13813 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13817 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13824 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13827 struct sockaddr_in *sin, *mask;
13828 struct ifaddr *ifa;
13829 struct in_addr srcnetaddr, gwnetaddr;
13831 if (ro == NULL || ro->ro_rt == NULL ||
13832 sifa->address.sa.sa_family != AF_INET) {
13835 ifa = (struct ifaddr *)sifa->ifa;
13836 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13837 sin = (struct sockaddr_in *)&sifa->address.sin;
13838 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13839 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13840 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13841 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13843 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13844 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13845 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13846 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13847 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13848 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {