2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_indata.h>
52 #include <netinet/sctp_bsd_addr.h>
53 #include <netinet/sctp_input.h>
54 #include <netinet/sctp_crc32.h>
55 #if defined(INET) || defined(INET6)
56 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <machine/in_cksum.h>
60 #include <netinet/in_kdtrace.h>
64 #define SCTP_MAX_GAPS_INARRAY 4
66 uint8_t right_edge; /* mergable on the right edge */
67 uint8_t left_edge; /* mergable on the left edge */
70 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
73 const struct sack_track sack_array[256] = {
74 {0, 0, 0, 0, /* 0x00 */
81 {1, 0, 1, 0, /* 0x01 */
88 {0, 0, 1, 0, /* 0x02 */
95 {1, 0, 1, 0, /* 0x03 */
102 {0, 0, 1, 0, /* 0x04 */
109 {1, 0, 2, 0, /* 0x05 */
116 {0, 0, 1, 0, /* 0x06 */
123 {1, 0, 1, 0, /* 0x07 */
130 {0, 0, 1, 0, /* 0x08 */
137 {1, 0, 2, 0, /* 0x09 */
144 {0, 0, 2, 0, /* 0x0a */
151 {1, 0, 2, 0, /* 0x0b */
158 {0, 0, 1, 0, /* 0x0c */
165 {1, 0, 2, 0, /* 0x0d */
172 {0, 0, 1, 0, /* 0x0e */
179 {1, 0, 1, 0, /* 0x0f */
186 {0, 0, 1, 0, /* 0x10 */
193 {1, 0, 2, 0, /* 0x11 */
200 {0, 0, 2, 0, /* 0x12 */
207 {1, 0, 2, 0, /* 0x13 */
214 {0, 0, 2, 0, /* 0x14 */
221 {1, 0, 3, 0, /* 0x15 */
228 {0, 0, 2, 0, /* 0x16 */
235 {1, 0, 2, 0, /* 0x17 */
242 {0, 0, 1, 0, /* 0x18 */
249 {1, 0, 2, 0, /* 0x19 */
256 {0, 0, 2, 0, /* 0x1a */
263 {1, 0, 2, 0, /* 0x1b */
270 {0, 0, 1, 0, /* 0x1c */
277 {1, 0, 2, 0, /* 0x1d */
284 {0, 0, 1, 0, /* 0x1e */
291 {1, 0, 1, 0, /* 0x1f */
298 {0, 0, 1, 0, /* 0x20 */
305 {1, 0, 2, 0, /* 0x21 */
312 {0, 0, 2, 0, /* 0x22 */
319 {1, 0, 2, 0, /* 0x23 */
326 {0, 0, 2, 0, /* 0x24 */
333 {1, 0, 3, 0, /* 0x25 */
340 {0, 0, 2, 0, /* 0x26 */
347 {1, 0, 2, 0, /* 0x27 */
354 {0, 0, 2, 0, /* 0x28 */
361 {1, 0, 3, 0, /* 0x29 */
368 {0, 0, 3, 0, /* 0x2a */
375 {1, 0, 3, 0, /* 0x2b */
382 {0, 0, 2, 0, /* 0x2c */
389 {1, 0, 3, 0, /* 0x2d */
396 {0, 0, 2, 0, /* 0x2e */
403 {1, 0, 2, 0, /* 0x2f */
410 {0, 0, 1, 0, /* 0x30 */
417 {1, 0, 2, 0, /* 0x31 */
424 {0, 0, 2, 0, /* 0x32 */
431 {1, 0, 2, 0, /* 0x33 */
438 {0, 0, 2, 0, /* 0x34 */
445 {1, 0, 3, 0, /* 0x35 */
452 {0, 0, 2, 0, /* 0x36 */
459 {1, 0, 2, 0, /* 0x37 */
466 {0, 0, 1, 0, /* 0x38 */
473 {1, 0, 2, 0, /* 0x39 */
480 {0, 0, 2, 0, /* 0x3a */
487 {1, 0, 2, 0, /* 0x3b */
494 {0, 0, 1, 0, /* 0x3c */
501 {1, 0, 2, 0, /* 0x3d */
508 {0, 0, 1, 0, /* 0x3e */
515 {1, 0, 1, 0, /* 0x3f */
522 {0, 0, 1, 0, /* 0x40 */
529 {1, 0, 2, 0, /* 0x41 */
536 {0, 0, 2, 0, /* 0x42 */
543 {1, 0, 2, 0, /* 0x43 */
550 {0, 0, 2, 0, /* 0x44 */
557 {1, 0, 3, 0, /* 0x45 */
564 {0, 0, 2, 0, /* 0x46 */
571 {1, 0, 2, 0, /* 0x47 */
578 {0, 0, 2, 0, /* 0x48 */
585 {1, 0, 3, 0, /* 0x49 */
592 {0, 0, 3, 0, /* 0x4a */
599 {1, 0, 3, 0, /* 0x4b */
606 {0, 0, 2, 0, /* 0x4c */
613 {1, 0, 3, 0, /* 0x4d */
620 {0, 0, 2, 0, /* 0x4e */
627 {1, 0, 2, 0, /* 0x4f */
634 {0, 0, 2, 0, /* 0x50 */
641 {1, 0, 3, 0, /* 0x51 */
648 {0, 0, 3, 0, /* 0x52 */
655 {1, 0, 3, 0, /* 0x53 */
662 {0, 0, 3, 0, /* 0x54 */
669 {1, 0, 4, 0, /* 0x55 */
676 {0, 0, 3, 0, /* 0x56 */
683 {1, 0, 3, 0, /* 0x57 */
690 {0, 0, 2, 0, /* 0x58 */
697 {1, 0, 3, 0, /* 0x59 */
704 {0, 0, 3, 0, /* 0x5a */
711 {1, 0, 3, 0, /* 0x5b */
718 {0, 0, 2, 0, /* 0x5c */
725 {1, 0, 3, 0, /* 0x5d */
732 {0, 0, 2, 0, /* 0x5e */
739 {1, 0, 2, 0, /* 0x5f */
746 {0, 0, 1, 0, /* 0x60 */
753 {1, 0, 2, 0, /* 0x61 */
760 {0, 0, 2, 0, /* 0x62 */
767 {1, 0, 2, 0, /* 0x63 */
774 {0, 0, 2, 0, /* 0x64 */
781 {1, 0, 3, 0, /* 0x65 */
788 {0, 0, 2, 0, /* 0x66 */
795 {1, 0, 2, 0, /* 0x67 */
802 {0, 0, 2, 0, /* 0x68 */
809 {1, 0, 3, 0, /* 0x69 */
816 {0, 0, 3, 0, /* 0x6a */
823 {1, 0, 3, 0, /* 0x6b */
830 {0, 0, 2, 0, /* 0x6c */
837 {1, 0, 3, 0, /* 0x6d */
844 {0, 0, 2, 0, /* 0x6e */
851 {1, 0, 2, 0, /* 0x6f */
858 {0, 0, 1, 0, /* 0x70 */
865 {1, 0, 2, 0, /* 0x71 */
872 {0, 0, 2, 0, /* 0x72 */
879 {1, 0, 2, 0, /* 0x73 */
886 {0, 0, 2, 0, /* 0x74 */
893 {1, 0, 3, 0, /* 0x75 */
900 {0, 0, 2, 0, /* 0x76 */
907 {1, 0, 2, 0, /* 0x77 */
914 {0, 0, 1, 0, /* 0x78 */
921 {1, 0, 2, 0, /* 0x79 */
928 {0, 0, 2, 0, /* 0x7a */
935 {1, 0, 2, 0, /* 0x7b */
942 {0, 0, 1, 0, /* 0x7c */
949 {1, 0, 2, 0, /* 0x7d */
956 {0, 0, 1, 0, /* 0x7e */
963 {1, 0, 1, 0, /* 0x7f */
970 {0, 1, 1, 0, /* 0x80 */
977 {1, 1, 2, 0, /* 0x81 */
984 {0, 1, 2, 0, /* 0x82 */
991 {1, 1, 2, 0, /* 0x83 */
998 {0, 1, 2, 0, /* 0x84 */
1005 {1, 1, 3, 0, /* 0x85 */
1012 {0, 1, 2, 0, /* 0x86 */
1019 {1, 1, 2, 0, /* 0x87 */
1026 {0, 1, 2, 0, /* 0x88 */
1033 {1, 1, 3, 0, /* 0x89 */
1040 {0, 1, 3, 0, /* 0x8a */
1047 {1, 1, 3, 0, /* 0x8b */
1054 {0, 1, 2, 0, /* 0x8c */
1061 {1, 1, 3, 0, /* 0x8d */
1068 {0, 1, 2, 0, /* 0x8e */
1075 {1, 1, 2, 0, /* 0x8f */
1082 {0, 1, 2, 0, /* 0x90 */
1089 {1, 1, 3, 0, /* 0x91 */
1096 {0, 1, 3, 0, /* 0x92 */
1103 {1, 1, 3, 0, /* 0x93 */
1110 {0, 1, 3, 0, /* 0x94 */
1117 {1, 1, 4, 0, /* 0x95 */
1124 {0, 1, 3, 0, /* 0x96 */
1131 {1, 1, 3, 0, /* 0x97 */
1138 {0, 1, 2, 0, /* 0x98 */
1145 {1, 1, 3, 0, /* 0x99 */
1152 {0, 1, 3, 0, /* 0x9a */
1159 {1, 1, 3, 0, /* 0x9b */
1166 {0, 1, 2, 0, /* 0x9c */
1173 {1, 1, 3, 0, /* 0x9d */
1180 {0, 1, 2, 0, /* 0x9e */
1187 {1, 1, 2, 0, /* 0x9f */
1194 {0, 1, 2, 0, /* 0xa0 */
1201 {1, 1, 3, 0, /* 0xa1 */
1208 {0, 1, 3, 0, /* 0xa2 */
1215 {1, 1, 3, 0, /* 0xa3 */
1222 {0, 1, 3, 0, /* 0xa4 */
1229 {1, 1, 4, 0, /* 0xa5 */
1236 {0, 1, 3, 0, /* 0xa6 */
1243 {1, 1, 3, 0, /* 0xa7 */
1250 {0, 1, 3, 0, /* 0xa8 */
1257 {1, 1, 4, 0, /* 0xa9 */
1264 {0, 1, 4, 0, /* 0xaa */
1271 {1, 1, 4, 0, /* 0xab */
1278 {0, 1, 3, 0, /* 0xac */
1285 {1, 1, 4, 0, /* 0xad */
1292 {0, 1, 3, 0, /* 0xae */
1299 {1, 1, 3, 0, /* 0xaf */
1306 {0, 1, 2, 0, /* 0xb0 */
1313 {1, 1, 3, 0, /* 0xb1 */
1320 {0, 1, 3, 0, /* 0xb2 */
1327 {1, 1, 3, 0, /* 0xb3 */
1334 {0, 1, 3, 0, /* 0xb4 */
1341 {1, 1, 4, 0, /* 0xb5 */
1348 {0, 1, 3, 0, /* 0xb6 */
1355 {1, 1, 3, 0, /* 0xb7 */
1362 {0, 1, 2, 0, /* 0xb8 */
1369 {1, 1, 3, 0, /* 0xb9 */
1376 {0, 1, 3, 0, /* 0xba */
1383 {1, 1, 3, 0, /* 0xbb */
1390 {0, 1, 2, 0, /* 0xbc */
1397 {1, 1, 3, 0, /* 0xbd */
1404 {0, 1, 2, 0, /* 0xbe */
1411 {1, 1, 2, 0, /* 0xbf */
1418 {0, 1, 1, 0, /* 0xc0 */
1425 {1, 1, 2, 0, /* 0xc1 */
1432 {0, 1, 2, 0, /* 0xc2 */
1439 {1, 1, 2, 0, /* 0xc3 */
1446 {0, 1, 2, 0, /* 0xc4 */
1453 {1, 1, 3, 0, /* 0xc5 */
1460 {0, 1, 2, 0, /* 0xc6 */
1467 {1, 1, 2, 0, /* 0xc7 */
1474 {0, 1, 2, 0, /* 0xc8 */
1481 {1, 1, 3, 0, /* 0xc9 */
1488 {0, 1, 3, 0, /* 0xca */
1495 {1, 1, 3, 0, /* 0xcb */
1502 {0, 1, 2, 0, /* 0xcc */
1509 {1, 1, 3, 0, /* 0xcd */
1516 {0, 1, 2, 0, /* 0xce */
1523 {1, 1, 2, 0, /* 0xcf */
1530 {0, 1, 2, 0, /* 0xd0 */
1537 {1, 1, 3, 0, /* 0xd1 */
1544 {0, 1, 3, 0, /* 0xd2 */
1551 {1, 1, 3, 0, /* 0xd3 */
1558 {0, 1, 3, 0, /* 0xd4 */
1565 {1, 1, 4, 0, /* 0xd5 */
1572 {0, 1, 3, 0, /* 0xd6 */
1579 {1, 1, 3, 0, /* 0xd7 */
1586 {0, 1, 2, 0, /* 0xd8 */
1593 {1, 1, 3, 0, /* 0xd9 */
1600 {0, 1, 3, 0, /* 0xda */
1607 {1, 1, 3, 0, /* 0xdb */
1614 {0, 1, 2, 0, /* 0xdc */
1621 {1, 1, 3, 0, /* 0xdd */
1628 {0, 1, 2, 0, /* 0xde */
1635 {1, 1, 2, 0, /* 0xdf */
1642 {0, 1, 1, 0, /* 0xe0 */
1649 {1, 1, 2, 0, /* 0xe1 */
1656 {0, 1, 2, 0, /* 0xe2 */
1663 {1, 1, 2, 0, /* 0xe3 */
1670 {0, 1, 2, 0, /* 0xe4 */
1677 {1, 1, 3, 0, /* 0xe5 */
1684 {0, 1, 2, 0, /* 0xe6 */
1691 {1, 1, 2, 0, /* 0xe7 */
1698 {0, 1, 2, 0, /* 0xe8 */
1705 {1, 1, 3, 0, /* 0xe9 */
1712 {0, 1, 3, 0, /* 0xea */
1719 {1, 1, 3, 0, /* 0xeb */
1726 {0, 1, 2, 0, /* 0xec */
1733 {1, 1, 3, 0, /* 0xed */
1740 {0, 1, 2, 0, /* 0xee */
1747 {1, 1, 2, 0, /* 0xef */
1754 {0, 1, 1, 0, /* 0xf0 */
1761 {1, 1, 2, 0, /* 0xf1 */
1768 {0, 1, 2, 0, /* 0xf2 */
1775 {1, 1, 2, 0, /* 0xf3 */
1782 {0, 1, 2, 0, /* 0xf4 */
1789 {1, 1, 3, 0, /* 0xf5 */
1796 {0, 1, 2, 0, /* 0xf6 */
1803 {1, 1, 2, 0, /* 0xf7 */
1810 {0, 1, 1, 0, /* 0xf8 */
1817 {1, 1, 2, 0, /* 0xf9 */
1824 {0, 1, 2, 0, /* 0xfa */
1831 {1, 1, 2, 0, /* 0xfb */
1838 {0, 1, 1, 0, /* 0xfc */
1845 {1, 1, 2, 0, /* 0xfd */
1852 {0, 1, 1, 0, /* 0xfe */
1859 {1, 1, 1, 0, /* 0xff */
1870 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1871 struct sctp_scoping *scope,
1874 if ((scope->loopback_scope == 0) &&
1875 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1877 * skip loopback if not in scope *
1881 switch (ifa->address.sa.sa_family) {
1884 if (scope->ipv4_addr_legal) {
1885 struct sockaddr_in *sin;
1887 sin = &ifa->address.sin;
1888 if (sin->sin_addr.s_addr == 0) {
1889 /* not in scope , unspecified */
1892 if ((scope->ipv4_local_scope == 0) &&
1893 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1894 /* private address not in scope */
1904 if (scope->ipv6_addr_legal) {
1905 struct sockaddr_in6 *sin6;
1908 * Must update the flags, bummer, which means any
1909 * IFA locks must now be applied HERE <->
1912 sctp_gather_internal_ifa_flags(ifa);
1914 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1917 /* ok to use deprecated addresses? */
1918 sin6 = &ifa->address.sin6;
1919 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1920 /* skip unspecifed addresses */
1923 if ( /* (local_scope == 0) && */
1924 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1927 if ((scope->site_scope == 0) &&
1928 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1942 static struct mbuf *
1943 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1945 #if defined(INET) || defined(INET6)
1946 struct sctp_paramhdr *paramh;
1951 switch (ifa->address.sa.sa_family) {
1954 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1959 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1965 #if defined(INET) || defined(INET6)
1966 if (M_TRAILINGSPACE(m) >= plen) {
1967 /* easy side we just drop it on the end */
1968 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1971 /* Need more space */
1973 while (SCTP_BUF_NEXT(mret) != NULL) {
1974 mret = SCTP_BUF_NEXT(mret);
1976 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1977 if (SCTP_BUF_NEXT(mret) == NULL) {
1978 /* We are hosed, can't add more addresses */
1981 mret = SCTP_BUF_NEXT(mret);
1982 paramh = mtod(mret, struct sctp_paramhdr *);
1984 /* now add the parameter */
1985 switch (ifa->address.sa.sa_family) {
1989 struct sctp_ipv4addr_param *ipv4p;
1990 struct sockaddr_in *sin;
1992 sin = &ifa->address.sin;
1993 ipv4p = (struct sctp_ipv4addr_param *)paramh;
1994 paramh->param_type = htons(SCTP_IPV4_ADDRESS);
1995 paramh->param_length = htons(plen);
1996 ipv4p->addr = sin->sin_addr.s_addr;
1997 SCTP_BUF_LEN(mret) += plen;
2004 struct sctp_ipv6addr_param *ipv6p;
2005 struct sockaddr_in6 *sin6;
2007 sin6 = &ifa->address.sin6;
2008 ipv6p = (struct sctp_ipv6addr_param *)paramh;
2009 paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2010 paramh->param_length = htons(plen);
2011 memcpy(ipv6p->addr, &sin6->sin6_addr,
2012 sizeof(ipv6p->addr));
2013 /* clear embedded scope in the address */
2014 in6_clearscope((struct in6_addr *)ipv6p->addr);
2015 SCTP_BUF_LEN(mret) += plen;
2031 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2032 struct sctp_scoping *scope,
2033 struct mbuf *m_at, int cnt_inits_to,
2034 uint16_t *padding_len, uint16_t *chunk_len)
2036 struct sctp_vrf *vrf = NULL;
2037 int cnt, limit_out = 0, total_count;
2040 vrf_id = inp->def_vrf_id;
2041 SCTP_IPI_ADDR_RLOCK();
2042 vrf = sctp_find_vrf(vrf_id);
2044 SCTP_IPI_ADDR_RUNLOCK();
2047 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2048 struct sctp_ifa *sctp_ifap;
2049 struct sctp_ifn *sctp_ifnp;
2052 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2054 cnt = SCTP_ADDRESS_LIMIT;
2057 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2058 if ((scope->loopback_scope == 0) &&
2059 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2061 * Skip loopback devices if loopback_scope
2066 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2068 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2069 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2070 &sctp_ifap->address.sin.sin_addr) != 0)) {
2075 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2076 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2077 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2081 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2084 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2088 if (cnt > SCTP_ADDRESS_LIMIT) {
2092 if (cnt > SCTP_ADDRESS_LIMIT) {
2099 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2101 if ((scope->loopback_scope == 0) &&
2102 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2104 * Skip loopback devices if
2105 * loopback_scope not set
2109 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2111 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2112 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2113 &sctp_ifap->address.sin.sin_addr) != 0)) {
2118 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2119 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2120 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2124 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2127 if (sctp_is_address_in_scope(sctp_ifap,
2131 if ((chunk_len != NULL) &&
2132 (padding_len != NULL) &&
2133 (*padding_len > 0)) {
2134 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2135 SCTP_BUF_LEN(m_at) += *padding_len;
2136 *chunk_len += *padding_len;
2139 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2150 if (total_count > SCTP_ADDRESS_LIMIT) {
2151 /* No more addresses */
2159 struct sctp_laddr *laddr;
2162 /* First, how many ? */
2163 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2164 if (laddr->ifa == NULL) {
2167 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2169 * Address being deleted by the system, dont
2173 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2175 * Address being deleted on this ep don't
2180 if (sctp_is_address_in_scope(laddr->ifa,
2187 * To get through a NAT we only list addresses if we have
2188 * more than one. That way if you just bind a single address
2189 * we let the source of the init dictate our address.
2193 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2194 if (laddr->ifa == NULL) {
2197 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2200 if (sctp_is_address_in_scope(laddr->ifa,
2204 if ((chunk_len != NULL) &&
2205 (padding_len != NULL) &&
2206 (*padding_len > 0)) {
2207 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2208 SCTP_BUF_LEN(m_at) += *padding_len;
2209 *chunk_len += *padding_len;
2212 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2214 if (cnt >= SCTP_ADDRESS_LIMIT) {
2220 SCTP_IPI_ADDR_RUNLOCK();
2224 static struct sctp_ifa *
2225 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2226 uint8_t dest_is_loop,
2227 uint8_t dest_is_priv,
2230 uint8_t dest_is_global = 0;
2232 /* dest_is_priv is true if destination is a private address */
2233 /* dest_is_loop is true if destination is a loopback addresses */
2236 * Here we determine if its a preferred address. A preferred address
2237 * means it is the same scope or higher scope then the destination.
2238 * L = loopback, P = private, G = global
2239 * -----------------------------------------
2240 * src | dest | result
2241 * ----------------------------------------
2243 * -----------------------------------------
2244 * P | L | yes-v4 no-v6
2245 * -----------------------------------------
2246 * G | L | yes-v4 no-v6
2247 * -----------------------------------------
2249 * -----------------------------------------
2251 * -----------------------------------------
2253 * -----------------------------------------
2255 * -----------------------------------------
2257 * -----------------------------------------
2259 * -----------------------------------------
2262 if (ifa->address.sa.sa_family != fam) {
2263 /* forget mis-matched family */
2266 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2269 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2270 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2271 /* Ok the address may be ok */
2273 if (fam == AF_INET6) {
2274 /* ok to use deprecated addresses? no lets not! */
2275 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2276 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2279 if (ifa->src_is_priv && !ifa->src_is_loop) {
2281 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2285 if (ifa->src_is_glob) {
2287 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2294 * Now that we know what is what, implement or table this could in
2295 * theory be done slicker (it used to be), but this is
2296 * straightforward and easier to validate :-)
2298 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2299 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2300 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2301 dest_is_loop, dest_is_priv, dest_is_global);
2303 if ((ifa->src_is_loop) && (dest_is_priv)) {
2304 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2307 if ((ifa->src_is_glob) && (dest_is_priv)) {
2308 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2311 if ((ifa->src_is_loop) && (dest_is_global)) {
2312 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2315 if ((ifa->src_is_priv) && (dest_is_global)) {
2316 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2319 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2320 /* its a preferred address */
2324 static struct sctp_ifa *
2325 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2326 uint8_t dest_is_loop,
2327 uint8_t dest_is_priv,
2330 uint8_t dest_is_global = 0;
2333 * Here we determine if its a acceptable address. A acceptable
2334 * address means it is the same scope or higher scope but we can
2335 * allow for NAT which means its ok to have a global dest and a
2338 * L = loopback, P = private, G = global
2339 * -----------------------------------------
2340 * src | dest | result
2341 * -----------------------------------------
2343 * -----------------------------------------
2344 * P | L | yes-v4 no-v6
2345 * -----------------------------------------
2347 * -----------------------------------------
2349 * -----------------------------------------
2351 * -----------------------------------------
2352 * G | P | yes - May not work
2353 * -----------------------------------------
2355 * -----------------------------------------
2356 * P | G | yes - May not work
2357 * -----------------------------------------
2359 * -----------------------------------------
2362 if (ifa->address.sa.sa_family != fam) {
2363 /* forget non matching family */
2364 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2365 ifa->address.sa.sa_family, fam);
2368 /* Ok the address may be ok */
2369 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2371 dest_is_loop, dest_is_priv);
2372 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2376 if (fam == AF_INET6) {
2377 /* ok to use deprecated addresses? */
2378 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2381 if (ifa->src_is_priv) {
2382 /* Special case, linklocal to loop */
2389 * Now that we know what is what, implement our table. This could in
2390 * theory be done slicker (it used to be), but this is
2391 * straightforward and easier to validate :-)
2393 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2396 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2399 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2402 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2405 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2406 /* its an acceptable address */
2411 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2413 struct sctp_laddr *laddr;
2416 /* There are no restrictions, no TCB :-) */
2419 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2420 if (laddr->ifa == NULL) {
2421 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2425 if (laddr->ifa == ifa) {
2426 /* Yes it is on the list */
2435 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2437 struct sctp_laddr *laddr;
2441 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2442 if (laddr->ifa == NULL) {
2443 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2447 if ((laddr->ifa == ifa) && laddr->action == 0)
2456 static struct sctp_ifa *
2457 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2460 int non_asoc_addr_ok,
2461 uint8_t dest_is_priv,
2462 uint8_t dest_is_loop,
2465 struct sctp_laddr *laddr, *starting_point;
2468 struct sctp_ifn *sctp_ifn;
2469 struct sctp_ifa *sctp_ifa, *sifa;
2470 struct sctp_vrf *vrf;
2473 vrf = sctp_find_vrf(vrf_id);
2477 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2478 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2479 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2481 * first question, is the ifn we will emit on in our list, if so, we
2482 * want such an address. Note that we first looked for a preferred
2486 /* is a preferred one on the interface we route out? */
2487 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2489 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2490 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2491 &sctp_ifa->address.sin.sin_addr) != 0)) {
2496 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2497 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2498 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2502 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2503 (non_asoc_addr_ok == 0))
2505 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2510 if (sctp_is_addr_in_ep(inp, sifa)) {
2511 atomic_add_int(&sifa->refcount, 1);
2517 * ok, now we now need to find one on the list of the addresses. We
2518 * can't get one on the emitting interface so let's find first a
2519 * preferred one. If not that an acceptable one otherwise... we
2522 starting_point = inp->next_addr_touse;
2524 if (inp->next_addr_touse == NULL) {
2525 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2528 for (laddr = inp->next_addr_touse; laddr;
2529 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2530 if (laddr->ifa == NULL) {
2531 /* address has been removed */
2534 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2535 /* address is being deleted */
2538 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2542 atomic_add_int(&sifa->refcount, 1);
2545 if (resettotop == 0) {
2546 inp->next_addr_touse = NULL;
2550 inp->next_addr_touse = starting_point;
2553 if (inp->next_addr_touse == NULL) {
2554 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2558 /* ok, what about an acceptable address in the inp */
2559 for (laddr = inp->next_addr_touse; laddr;
2560 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2561 if (laddr->ifa == NULL) {
2562 /* address has been removed */
2565 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2566 /* address is being deleted */
2569 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2573 atomic_add_int(&sifa->refcount, 1);
2576 if (resettotop == 0) {
2577 inp->next_addr_touse = NULL;
2578 goto once_again_too;
2582 * no address bound can be a source for the destination we are in
2590 static struct sctp_ifa *
2591 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2592 struct sctp_tcb *stcb,
2595 uint8_t dest_is_priv,
2596 uint8_t dest_is_loop,
2597 int non_asoc_addr_ok,
2600 struct sctp_laddr *laddr, *starting_point;
2602 struct sctp_ifn *sctp_ifn;
2603 struct sctp_ifa *sctp_ifa, *sifa;
2604 uint8_t start_at_beginning = 0;
2605 struct sctp_vrf *vrf;
2609 * first question, is the ifn we will emit on in our list, if so, we
2612 vrf = sctp_find_vrf(vrf_id);
2616 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2617 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2618 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2621 * first question, is the ifn we will emit on in our list? If so,
2622 * we want that one. First we look for a preferred. Second, we go
2623 * for an acceptable.
2626 /* first try for a preferred address on the ep */
2627 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2629 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2630 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2631 &sctp_ifa->address.sin.sin_addr) != 0)) {
2636 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2637 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2638 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2642 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2644 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2645 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2648 if (((non_asoc_addr_ok == 0) &&
2649 (sctp_is_addr_restricted(stcb, sifa))) ||
2650 (non_asoc_addr_ok &&
2651 (sctp_is_addr_restricted(stcb, sifa)) &&
2652 (!sctp_is_addr_pending(stcb, sifa)))) {
2653 /* on the no-no list */
2656 atomic_add_int(&sifa->refcount, 1);
2660 /* next try for an acceptable address on the ep */
2661 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2663 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2664 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2665 &sctp_ifa->address.sin.sin_addr) != 0)) {
2670 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2671 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2672 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2676 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2678 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2679 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2682 if (((non_asoc_addr_ok == 0) &&
2683 (sctp_is_addr_restricted(stcb, sifa))) ||
2684 (non_asoc_addr_ok &&
2685 (sctp_is_addr_restricted(stcb, sifa)) &&
2686 (!sctp_is_addr_pending(stcb, sifa)))) {
2687 /* on the no-no list */
2690 atomic_add_int(&sifa->refcount, 1);
2697 * if we can't find one like that then we must look at all addresses
2698 * bound to pick one at first preferable then secondly acceptable.
2700 starting_point = stcb->asoc.last_used_address;
2702 if (stcb->asoc.last_used_address == NULL) {
2703 start_at_beginning = 1;
2704 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2706 /* search beginning with the last used address */
2707 for (laddr = stcb->asoc.last_used_address; laddr;
2708 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2709 if (laddr->ifa == NULL) {
2710 /* address has been removed */
2713 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2714 /* address is being deleted */
2717 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2720 if (((non_asoc_addr_ok == 0) &&
2721 (sctp_is_addr_restricted(stcb, sifa))) ||
2722 (non_asoc_addr_ok &&
2723 (sctp_is_addr_restricted(stcb, sifa)) &&
2724 (!sctp_is_addr_pending(stcb, sifa)))) {
2725 /* on the no-no list */
2728 stcb->asoc.last_used_address = laddr;
2729 atomic_add_int(&sifa->refcount, 1);
2732 if (start_at_beginning == 0) {
2733 stcb->asoc.last_used_address = NULL;
2734 goto sctp_from_the_top;
2736 /* now try for any higher scope than the destination */
2737 stcb->asoc.last_used_address = starting_point;
2738 start_at_beginning = 0;
2740 if (stcb->asoc.last_used_address == NULL) {
2741 start_at_beginning = 1;
2742 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2744 /* search beginning with the last used address */
2745 for (laddr = stcb->asoc.last_used_address; laddr;
2746 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2747 if (laddr->ifa == NULL) {
2748 /* address has been removed */
2751 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2752 /* address is being deleted */
2755 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2759 if (((non_asoc_addr_ok == 0) &&
2760 (sctp_is_addr_restricted(stcb, sifa))) ||
2761 (non_asoc_addr_ok &&
2762 (sctp_is_addr_restricted(stcb, sifa)) &&
2763 (!sctp_is_addr_pending(stcb, sifa)))) {
2764 /* on the no-no list */
2767 stcb->asoc.last_used_address = laddr;
2768 atomic_add_int(&sifa->refcount, 1);
2771 if (start_at_beginning == 0) {
2772 stcb->asoc.last_used_address = NULL;
2773 goto sctp_from_the_top2;
2778 static struct sctp_ifa *
2779 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2780 struct sctp_inpcb *inp,
2781 struct sctp_tcb *stcb,
2782 int non_asoc_addr_ok,
2783 uint8_t dest_is_loop,
2784 uint8_t dest_is_priv,
2790 struct sctp_ifa *ifa, *sifa;
2791 int num_eligible_addr = 0;
2793 struct sockaddr_in6 sin6, lsa6;
2795 if (fam == AF_INET6) {
2796 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2797 (void)sa6_recoverscope(&sin6);
2800 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2802 if ((ifa->address.sa.sa_family == AF_INET) &&
2803 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2804 &ifa->address.sin.sin_addr) != 0)) {
2809 if ((ifa->address.sa.sa_family == AF_INET6) &&
2810 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2811 &ifa->address.sin6.sin6_addr) != 0)) {
2815 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2816 (non_asoc_addr_ok == 0))
2818 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2823 if (fam == AF_INET6 &&
2825 sifa->src_is_loop && sifa->src_is_priv) {
2827 * don't allow fe80::1 to be a src on loop ::1, we
2828 * don't list it to the peer so we will get an
2833 if (fam == AF_INET6 &&
2834 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2835 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2837 * link-local <-> link-local must belong to the same
2840 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2841 (void)sa6_recoverscope(&lsa6);
2842 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2849 * Check if the IPv6 address matches to next-hop. In the
2850 * mobile case, old IPv6 address may be not deleted from the
2851 * interface. Then, the interface has previous and new
2852 * addresses. We should use one corresponding to the
2853 * next-hop. (by micchie)
2856 if (stcb && fam == AF_INET6 &&
2857 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2858 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2865 /* Avoid topologically incorrect IPv4 address */
2866 if (stcb && fam == AF_INET &&
2867 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2868 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2874 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2877 if (((non_asoc_addr_ok == 0) &&
2878 (sctp_is_addr_restricted(stcb, sifa))) ||
2879 (non_asoc_addr_ok &&
2880 (sctp_is_addr_restricted(stcb, sifa)) &&
2881 (!sctp_is_addr_pending(stcb, sifa)))) {
2883 * It is restricted for some reason..
2884 * probably not yet added.
2889 if (num_eligible_addr >= addr_wanted) {
2892 num_eligible_addr++;
2899 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2900 struct sctp_inpcb *inp,
2901 struct sctp_tcb *stcb,
2902 int non_asoc_addr_ok,
2903 uint8_t dest_is_loop,
2904 uint8_t dest_is_priv,
2907 struct sctp_ifa *ifa, *sifa;
2908 int num_eligible_addr = 0;
2910 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2912 if ((ifa->address.sa.sa_family == AF_INET) &&
2913 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2914 &ifa->address.sin.sin_addr) != 0)) {
2919 if ((ifa->address.sa.sa_family == AF_INET6) &&
2921 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2922 &ifa->address.sin6.sin6_addr) != 0)) {
2926 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2927 (non_asoc_addr_ok == 0)) {
2930 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2936 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2939 if (((non_asoc_addr_ok == 0) &&
2940 (sctp_is_addr_restricted(stcb, sifa))) ||
2941 (non_asoc_addr_ok &&
2942 (sctp_is_addr_restricted(stcb, sifa)) &&
2943 (!sctp_is_addr_pending(stcb, sifa)))) {
2945 * It is restricted for some reason..
2946 * probably not yet added.
2951 num_eligible_addr++;
2953 return (num_eligible_addr);
2956 static struct sctp_ifa *
2957 sctp_choose_boundall(struct sctp_inpcb *inp,
2958 struct sctp_tcb *stcb,
2959 struct sctp_nets *net,
2962 uint8_t dest_is_priv,
2963 uint8_t dest_is_loop,
2964 int non_asoc_addr_ok,
2967 int cur_addr_num = 0, num_preferred = 0;
2969 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2970 struct sctp_ifa *sctp_ifa, *sifa;
2972 struct sctp_vrf *vrf;
2978 * For boundall we can use any address in the association.
2979 * If non_asoc_addr_ok is set we can use any address (at least in
2980 * theory). So we look for preferred addresses first. If we find one,
2981 * we use it. Otherwise we next try to get an address on the
2982 * interface, which we should be able to do (unless non_asoc_addr_ok
2983 * is false and we are routed out that way). In these cases where we
2984 * can't use the address of the interface we go through all the
2985 * ifn's looking for an address we can use and fill that in. Punting
2986 * means we send back address 0, which will probably cause problems
2987 * actually since then IP will fill in the address of the route ifn,
2988 * which means we probably already rejected it.. i.e. here comes an
2991 vrf = sctp_find_vrf(vrf_id);
2995 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2996 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2997 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2998 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2999 if (sctp_ifn == NULL) {
3000 /* ?? We don't have this guy ?? */
3001 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
3002 goto bound_all_plan_b;
3004 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
3005 ifn_index, sctp_ifn->ifn_name);
3008 cur_addr_num = net->indx_of_eligible_next_to_use;
3010 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3015 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3016 num_preferred, sctp_ifn->ifn_name);
3017 if (num_preferred == 0) {
3019 * no eligible addresses, we must use some other interface
3020 * address if we can find one.
3022 goto bound_all_plan_b;
3025 * Ok we have num_eligible_addr set with how many we can use, this
3026 * may vary from call to call due to addresses being deprecated
3029 if (cur_addr_num >= num_preferred) {
3033 * select the nth address from the list (where cur_addr_num is the
3034 * nth) and 0 is the first one, 1 is the second one etc...
3036 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3038 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3039 dest_is_priv, cur_addr_num, fam, ro);
3041 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3043 atomic_add_int(&sctp_ifa->refcount, 1);
3045 /* save off where the next one we will want */
3046 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3051 * plan_b: Look at all interfaces and find a preferred address. If
3052 * no preferred fall through to plan_c.
3055 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3056 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3057 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3058 sctp_ifn->ifn_name);
3059 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3060 /* wrong base scope */
3061 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3064 if ((sctp_ifn == looked_at) && looked_at) {
3065 /* already looked at this guy */
3066 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3069 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3070 dest_is_loop, dest_is_priv, fam);
3071 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3072 "Found ifn:%p %d preferred source addresses\n",
3073 ifn, num_preferred);
3074 if (num_preferred == 0) {
3075 /* None on this interface. */
3076 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3079 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3080 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3081 num_preferred, (void *)sctp_ifn, cur_addr_num);
3084 * Ok we have num_eligible_addr set with how many we can
3085 * use, this may vary from call to call due to addresses
3086 * being deprecated etc..
3088 if (cur_addr_num >= num_preferred) {
3091 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3092 dest_is_priv, cur_addr_num, fam, ro);
3096 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3097 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3099 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3100 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3101 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3102 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3104 atomic_add_int(&sifa->refcount, 1);
3108 again_with_private_addresses_allowed:
3110 /* plan_c: do we have an acceptable address on the emit interface */
3112 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3113 if (emit_ifn == NULL) {
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3117 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3118 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3120 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3121 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3122 &sctp_ifa->address.sin.sin_addr) != 0)) {
3123 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3128 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3129 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3130 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3131 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3135 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3136 (non_asoc_addr_ok == 0)) {
3137 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3140 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3143 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3147 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3148 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3152 if (((non_asoc_addr_ok == 0) &&
3153 (sctp_is_addr_restricted(stcb, sifa))) ||
3154 (non_asoc_addr_ok &&
3155 (sctp_is_addr_restricted(stcb, sifa)) &&
3156 (!sctp_is_addr_pending(stcb, sifa)))) {
3158 * It is restricted for some reason..
3159 * probably not yet added.
3161 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3166 atomic_add_int(&sifa->refcount, 1);
3171 * plan_d: We are in trouble. No preferred address on the emit
3172 * interface. And not even a preferred address on all interfaces. Go
3173 * out and see if we can find an acceptable address somewhere
3174 * amongst all interfaces.
3176 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3177 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3178 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3179 /* wrong base scope */
3182 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3184 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3185 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3186 &sctp_ifa->address.sin.sin_addr) != 0)) {
3191 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3192 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3193 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3197 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3198 (non_asoc_addr_ok == 0))
3200 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3206 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3210 if (((non_asoc_addr_ok == 0) &&
3211 (sctp_is_addr_restricted(stcb, sifa))) ||
3212 (non_asoc_addr_ok &&
3213 (sctp_is_addr_restricted(stcb, sifa)) &&
3214 (!sctp_is_addr_pending(stcb, sifa)))) {
3216 * It is restricted for some
3217 * reason.. probably not yet added.
3228 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3229 stcb->asoc.scope.ipv4_local_scope = 1;
3231 goto again_with_private_addresses_allowed;
3232 } else if (retried == 1) {
3233 stcb->asoc.scope.ipv4_local_scope = 0;
3241 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3242 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3243 /* wrong base scope */
3246 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3247 struct sctp_ifa *tmp_sifa;
3250 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3251 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3252 &sctp_ifa->address.sin.sin_addr) != 0)) {
3257 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3258 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3259 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3263 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3264 (non_asoc_addr_ok == 0))
3266 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3269 if (tmp_sifa == NULL) {
3272 if (tmp_sifa == sifa) {
3276 if (sctp_is_address_in_scope(tmp_sifa,
3277 &stcb->asoc.scope, 0) == 0) {
3280 if (((non_asoc_addr_ok == 0) &&
3281 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3282 (non_asoc_addr_ok &&
3283 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3284 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3294 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3295 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3296 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3301 atomic_add_int(&sifa->refcount, 1);
3309 /* tcb may be NULL */
3311 sctp_source_address_selection(struct sctp_inpcb *inp,
3312 struct sctp_tcb *stcb,
3314 struct sctp_nets *net,
3315 int non_asoc_addr_ok, uint32_t vrf_id)
3317 struct sctp_ifa *answer;
3318 uint8_t dest_is_priv, dest_is_loop;
3321 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3324 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3329 * - Find the route if needed, cache if I can.
3330 * - Look at interface address in route, Is it in the bound list. If so we
3331 * have the best source.
3332 * - If not we must rotate amongst the addresses.
3336 * Do we need to pay attention to scope. We can have a private address
3337 * or a global address we are sourcing or sending to. So if we draw
3339 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3341 * ------------------------------------------
3342 * source * dest * result
3343 * -----------------------------------------
3344 * <a> Private * Global * NAT
3345 * -----------------------------------------
3346 * <b> Private * Private * No problem
3347 * -----------------------------------------
3348 * <c> Global * Private * Huh, How will this work?
3349 * -----------------------------------------
3350 * <d> Global * Global * No Problem
3351 *------------------------------------------
3352 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3354 *------------------------------------------
3355 * source * dest * result
3356 * -----------------------------------------
3357 * <a> Linklocal * Global *
3358 * -----------------------------------------
3359 * <b> Linklocal * Linklocal * No problem
3360 * -----------------------------------------
3361 * <c> Global * Linklocal * Huh, How will this work?
3362 * -----------------------------------------
3363 * <d> Global * Global * No Problem
3364 *------------------------------------------
3365 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3367 * And then we add to that what happens if there are multiple addresses
3368 * assigned to an interface. Remember the ifa on a ifn is a linked
3369 * list of addresses. So one interface can have more than one IP
3370 * address. What happens if we have both a private and a global
3371 * address? Do we then use context of destination to sort out which
3372 * one is best? And what about NAT's sending P->G may get you a NAT
3373 * translation, or should you select the G thats on the interface in
3378 * - count the number of addresses on the interface.
3379 * - if it is one, no problem except case <c>.
3380 * For <a> we will assume a NAT out there.
3381 * - if there are more than one, then we need to worry about scope P
3382 * or G. We should prefer G -> G and P -> P if possible.
3383 * Then as a secondary fall back to mixed types G->P being a last
3385 * - The above all works for bound all, but bound specific we need to
3386 * use the same concept but instead only consider the bound
3387 * addresses. If the bound set is NOT assigned to the interface then
3388 * we must use rotation amongst the bound addresses..
3390 if (ro->ro_rt == NULL) {
3392 * Need a route to cache.
3394 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3396 if (ro->ro_rt == NULL) {
3399 fam = ro->ro_dst.sa_family;
3400 dest_is_priv = dest_is_loop = 0;
3401 /* Setup our scopes for the destination */
3405 /* Scope based on outbound address */
3406 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3409 /* mark it as local */
3410 net->addr_is_local = 1;
3412 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3419 /* Scope based on outbound address */
3420 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3421 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3423 * If the address is a loopback address, which
3424 * consists of "::1" OR "fe80::1%lo0", we are
3425 * loopback scope. But we don't use dest_is_priv
3426 * (link local addresses).
3430 /* mark it as local */
3431 net->addr_is_local = 1;
3433 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3439 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3440 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3441 SCTP_IPI_ADDR_RLOCK();
3442 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3446 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3447 dest_is_priv, dest_is_loop,
3448 non_asoc_addr_ok, fam);
3449 SCTP_IPI_ADDR_RUNLOCK();
3456 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3457 vrf_id, dest_is_priv,
3459 non_asoc_addr_ok, fam);
3461 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3466 SCTP_IPI_ADDR_RUNLOCK();
3471 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3474 struct sctp_sndinfo sndinfo;
3475 struct sctp_prinfo prinfo;
3476 struct sctp_authinfo authinfo;
3477 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3481 * Independent of how many mbufs, find the c_type inside the control
3482 * structure and copy out the data.
3485 tot_len = SCTP_BUF_LEN(control);
3486 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3487 rem_len = tot_len - off;
3488 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3489 /* There is not enough room for one more. */
3492 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3493 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3494 /* We dont't have a complete CMSG header. */
3497 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3498 /* We don't have the complete CMSG. */
3501 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3502 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3503 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3504 ((c_type == cmh.cmsg_type) ||
3505 ((c_type == SCTP_SNDRCV) &&
3506 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3507 (cmh.cmsg_type == SCTP_PRINFO) ||
3508 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3509 if (c_type == cmh.cmsg_type) {
3510 if (cpsize > INT_MAX) {
3513 if (cmsg_data_len < (int)cpsize) {
3516 /* It is exactly what we want. Copy it out. */
3517 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3520 struct sctp_sndrcvinfo *sndrcvinfo;
3522 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3524 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3527 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3529 switch (cmh.cmsg_type) {
3531 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3534 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3535 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3536 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3537 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3538 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3539 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3542 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3545 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3546 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3547 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3549 sndrcvinfo->sinfo_timetolive = 0;
3551 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3554 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3557 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3558 sndrcvinfo->sinfo_keynumber_valid = 1;
3559 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3572 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3575 struct sctp_initmsg initmsg;
3577 struct sockaddr_in sin;
3580 struct sockaddr_in6 sin6;
3582 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3584 tot_len = SCTP_BUF_LEN(control);
3585 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3586 rem_len = tot_len - off;
3587 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3588 /* There is not enough room for one more. */
3592 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3593 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3594 /* We dont't have a complete CMSG header. */
3598 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3599 /* We don't have the complete CMSG. */
3603 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3604 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3605 if (cmh.cmsg_level == IPPROTO_SCTP) {
3606 switch (cmh.cmsg_type) {
3608 if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3612 m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3613 if (initmsg.sinit_max_attempts)
3614 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3615 if (initmsg.sinit_num_ostreams)
3616 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3617 if (initmsg.sinit_max_instreams)
3618 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3619 if (initmsg.sinit_max_init_timeo)
3620 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3621 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3622 struct sctp_stream_out *tmp_str;
3624 #if defined(SCTP_DETAILED_STR_STATS)
3628 /* Default is NOT correct */
3629 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3630 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3631 SCTP_TCB_UNLOCK(stcb);
3632 SCTP_MALLOC(tmp_str,
3633 struct sctp_stream_out *,
3634 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3636 SCTP_TCB_LOCK(stcb);
3637 if (tmp_str != NULL) {
3638 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3639 stcb->asoc.strmout = tmp_str;
3640 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3642 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3644 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3645 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3646 stcb->asoc.strmout[i].chunks_on_queues = 0;
3647 stcb->asoc.strmout[i].next_mid_ordered = 0;
3648 stcb->asoc.strmout[i].next_mid_unordered = 0;
3649 #if defined(SCTP_DETAILED_STR_STATS)
3650 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3651 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3652 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3655 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3656 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3658 stcb->asoc.strmout[i].sid = i;
3659 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3660 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3661 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3666 case SCTP_DSTADDRV4:
3667 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3671 memset(&sin, 0, sizeof(struct sockaddr_in));
3672 sin.sin_family = AF_INET;
3673 sin.sin_len = sizeof(struct sockaddr_in);
3674 sin.sin_port = stcb->rport;
3675 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3676 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3677 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3678 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3682 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3683 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3690 case SCTP_DSTADDRV6:
3691 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3695 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3696 sin6.sin6_family = AF_INET6;
3697 sin6.sin6_len = sizeof(struct sockaddr_in6);
3698 sin6.sin6_port = stcb->rport;
3699 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3700 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3701 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3706 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3707 in6_sin6_2_sin(&sin, &sin6);
3708 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3709 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3710 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3714 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3715 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3721 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3722 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3736 #if defined(INET) || defined(INET6)
3737 static struct sctp_tcb *
3738 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3740 struct mbuf *control,
3741 struct sctp_nets **net_p,
3745 struct sctp_tcb *stcb;
3746 struct sockaddr *addr;
3748 struct sockaddr_in sin;
3751 struct sockaddr_in6 sin6;
3753 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3755 tot_len = SCTP_BUF_LEN(control);
3756 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3757 rem_len = tot_len - off;
3758 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3759 /* There is not enough room for one more. */
3763 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3764 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3765 /* We dont't have a complete CMSG header. */
3769 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3770 /* We don't have the complete CMSG. */
3774 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3775 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3776 if (cmh.cmsg_level == IPPROTO_SCTP) {
3777 switch (cmh.cmsg_type) {
3779 case SCTP_DSTADDRV4:
3780 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3784 memset(&sin, 0, sizeof(struct sockaddr_in));
3785 sin.sin_family = AF_INET;
3786 sin.sin_len = sizeof(struct sockaddr_in);
3787 sin.sin_port = port;
3788 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3789 addr = (struct sockaddr *)&sin;
3793 case SCTP_DSTADDRV6:
3794 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3798 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3799 sin6.sin6_family = AF_INET6;
3800 sin6.sin6_len = sizeof(struct sockaddr_in6);
3801 sin6.sin6_port = port;
3802 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3804 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3805 in6_sin6_2_sin(&sin, &sin6);
3806 addr = (struct sockaddr *)&sin;
3809 addr = (struct sockaddr *)&sin6;
3817 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3828 static struct mbuf *
3829 sctp_add_cookie(struct mbuf *init, int init_offset,
3830 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3832 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3833 struct sctp_state_cookie *stc;
3834 struct sctp_paramhdr *ph;
3839 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3840 sizeof(struct sctp_paramhdr)), 0,
3841 M_NOWAIT, 1, MT_DATA);
3845 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3846 if (copy_init == NULL) {
3850 #ifdef SCTP_MBUF_LOGGING
3851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3852 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3855 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3857 if (copy_initack == NULL) {
3859 sctp_m_freem(copy_init);
3862 #ifdef SCTP_MBUF_LOGGING
3863 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3864 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3867 /* easy side we just drop it on the end */
3868 ph = mtod(mret, struct sctp_paramhdr *);
3869 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3870 sizeof(struct sctp_paramhdr);
3871 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3872 sizeof(struct sctp_paramhdr));
3873 ph->param_type = htons(SCTP_STATE_COOKIE);
3874 ph->param_length = 0; /* fill in at the end */
3875 /* Fill in the stc cookie data */
3876 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3878 /* tack the INIT and then the INIT-ACK onto the chain */
3880 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3881 cookie_sz += SCTP_BUF_LEN(m_at);
3882 if (SCTP_BUF_NEXT(m_at) == NULL) {
3883 SCTP_BUF_NEXT(m_at) = copy_init;
3887 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3888 cookie_sz += SCTP_BUF_LEN(m_at);
3889 if (SCTP_BUF_NEXT(m_at) == NULL) {
3890 SCTP_BUF_NEXT(m_at) = copy_initack;
3894 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3895 cookie_sz += SCTP_BUF_LEN(m_at);
3896 if (SCTP_BUF_NEXT(m_at) == NULL) {
3900 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3902 /* no space, so free the entire chain */
3906 SCTP_BUF_LEN(sig) = 0;
3907 SCTP_BUF_NEXT(m_at) = sig;
3909 foo = (uint8_t *)(mtod(sig, caddr_t)+sig_offset);
3910 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3912 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3913 cookie_sz += SCTP_SIGNATURE_SIZE;
3914 ph->param_length = htons(cookie_sz);
3920 sctp_get_ect(struct sctp_tcb *stcb)
3922 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3923 return (SCTP_ECT0_BIT);
3929 #if defined(INET) || defined(INET6)
3931 sctp_handle_no_route(struct sctp_tcb *stcb,
3932 struct sctp_nets *net,
3935 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3938 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3939 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3940 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3941 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3942 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3943 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3947 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3948 net->dest_state &= ~SCTP_ADDR_PF;
3952 if (net == stcb->asoc.primary_destination) {
3953 /* need a new primary */
3954 struct sctp_nets *alt;
3956 alt = sctp_find_alternate_net(stcb, net, 0);
3958 if (stcb->asoc.alternate) {
3959 sctp_free_remote_addr(stcb->asoc.alternate);
3961 stcb->asoc.alternate = alt;
3962 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3963 if (net->ro._s_addr) {
3964 sctp_free_ifa(net->ro._s_addr);
3965 net->ro._s_addr = NULL;
3967 net->src_addr_selected = 0;
3976 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3977 struct sctp_tcb *stcb, /* may be NULL */
3978 struct sctp_nets *net,
3979 struct sockaddr *to,
3981 uint32_t auth_offset,
3982 struct sctp_auth_chunk *auth,
3983 uint16_t auth_keyid,
3984 int nofragment_flag,
3991 union sctp_sockstore *over_addr,
3992 uint8_t mflowtype, uint32_t mflowid,
3993 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3994 int so_locked SCTP_UNUSED
4000 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4002 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4003 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4004 * - fill in the HMAC digest of any AUTH chunk in the packet.
4005 * - calculate and fill in the SCTP checksum.
4006 * - prepend an IP address header.
4007 * - if boundall use INADDR_ANY.
4008 * - if boundspecific do source address selection.
4009 * - set fragmentation option for ipV4.
4010 * - On return from IP output, check/adjust mtu size of output
4011 * interface and smallest_mtu size as well.
4013 /* Will need ifdefs around this */
4015 struct sctphdr *sctphdr;
4018 #if defined(INET) || defined(INET6)
4021 #if defined(INET) || defined(INET6)
4023 sctp_route_t *ro = NULL;
4024 struct udphdr *udp = NULL;
4027 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028 struct socket *so = NULL;
4031 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4032 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4036 #if defined(INET) || defined(INET6)
4038 vrf_id = stcb->asoc.vrf_id;
4040 vrf_id = inp->def_vrf_id;
4043 /* fill in the HMAC digest for any AUTH chunk in the packet */
4044 if ((auth != NULL) && (stcb != NULL)) {
4045 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4049 tos_value = net->dscp;
4051 tos_value = stcb->asoc.default_dscp;
4053 tos_value = inp->sctp_ep.default_dscp;
4056 switch (to->sa_family) {
4060 struct ip *ip = NULL;
4061 sctp_route_t iproute;
4064 len = SCTP_MIN_V4_OVERHEAD;
4066 len += sizeof(struct udphdr);
4068 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4071 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4074 SCTP_ALIGN_TO_END(newm, len);
4075 SCTP_BUF_LEN(newm) = len;
4076 SCTP_BUF_NEXT(newm) = m;
4079 m->m_pkthdr.flowid = net->flowid;
4080 M_HASHTYPE_SET(m, net->flowtype);
4082 m->m_pkthdr.flowid = mflowid;
4083 M_HASHTYPE_SET(m, mflowtype);
4085 packet_length = sctp_calculate_len(m);
4086 ip = mtod(m, struct ip *);
4087 ip->ip_v = IPVERSION;
4088 ip->ip_hl = (sizeof(struct ip) >> 2);
4089 if (tos_value == 0) {
4091 * This means especially, that it is not set
4092 * at the SCTP layer. So use the value from
4095 tos_value = inp->ip_inp.inp.inp_ip_tos;
4099 tos_value |= sctp_get_ect(stcb);
4101 if ((nofragment_flag) && (port == 0)) {
4102 ip->ip_off = htons(IP_DF);
4104 ip->ip_off = htons(0);
4106 /* FreeBSD has a function for ip_id's */
4109 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4110 ip->ip_len = htons(packet_length);
4111 ip->ip_tos = tos_value;
4113 ip->ip_p = IPPROTO_UDP;
4115 ip->ip_p = IPPROTO_SCTP;
4120 memset(&iproute, 0, sizeof(iproute));
4121 memcpy(&ro->ro_dst, to, to->sa_len);
4123 ro = (sctp_route_t *)&net->ro;
4125 /* Now the address selection part */
4126 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4128 /* call the routine to select the src address */
4129 if (net && out_of_asoc_ok == 0) {
4130 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4131 sctp_free_ifa(net->ro._s_addr);
4132 net->ro._s_addr = NULL;
4133 net->src_addr_selected = 0;
4139 if (net->src_addr_selected == 0) {
4140 /* Cache the source address */
4141 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4144 net->src_addr_selected = 1;
4146 if (net->ro._s_addr == NULL) {
4147 /* No route to host */
4148 net->src_addr_selected = 0;
4149 sctp_handle_no_route(stcb, net, so_locked);
4150 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4152 return (EHOSTUNREACH);
4154 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4156 if (over_addr == NULL) {
4157 struct sctp_ifa *_lsrc;
4159 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4163 if (_lsrc == NULL) {
4164 sctp_handle_no_route(stcb, net, so_locked);
4165 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4167 return (EHOSTUNREACH);
4169 ip->ip_src = _lsrc->address.sin.sin_addr;
4170 sctp_free_ifa(_lsrc);
4172 ip->ip_src = over_addr->sin.sin_addr;
4173 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4177 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4178 sctp_handle_no_route(stcb, net, so_locked);
4179 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4181 return (EHOSTUNREACH);
4183 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4184 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4185 udp->uh_dport = port;
4186 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4188 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4192 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4194 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4197 sctphdr->src_port = src_port;
4198 sctphdr->dest_port = dest_port;
4199 sctphdr->v_tag = v_tag;
4200 sctphdr->checksum = 0;
4203 * If source address selection fails and we find no
4204 * route then the ip_output should fail as well with
4205 * a NO_ROUTE_TO_HOST type error. We probably should
4206 * catch that somewhere and abort the association
4207 * right away (assuming this is an INIT being sent).
4209 if (ro->ro_rt == NULL) {
4211 * src addr selection failed to find a route
4212 * (or valid source addr), so we can't get
4213 * there from here (yet)!
4215 sctp_handle_no_route(stcb, net, so_locked);
4216 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4218 return (EHOSTUNREACH);
4220 if (ro != &iproute) {
4221 memcpy(&iproute, ro, sizeof(*ro));
4223 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4224 (uint32_t)(ntohl(ip->ip_src.s_addr)));
4225 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4226 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4227 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4230 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4231 /* failed to prepend data, give up */
4232 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4236 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4238 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4239 SCTP_STAT_INCR(sctps_sendswcrc);
4241 SCTP_ENABLE_UDP_CSUM(o_pak);
4244 m->m_pkthdr.csum_flags = CSUM_SCTP;
4245 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4246 SCTP_STAT_INCR(sctps_sendhwcrc);
4248 #ifdef SCTP_PACKET_LOGGING
4249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4250 sctp_packet_log(o_pak);
4252 /* send it out. table id is taken from stcb */
4253 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4254 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4255 so = SCTP_INP_SO(inp);
4256 SCTP_SOCKET_UNLOCK(so, 0);
4259 SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4260 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4261 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4262 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4263 atomic_add_int(&stcb->asoc.refcnt, 1);
4264 SCTP_TCB_UNLOCK(stcb);
4265 SCTP_SOCKET_LOCK(so, 0);
4266 SCTP_TCB_LOCK(stcb);
4267 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4271 UDPSTAT_INC(udps_opackets);
4273 SCTP_STAT_INCR(sctps_sendpackets);
4274 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4276 SCTP_STAT_INCR(sctps_senderrors);
4278 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4280 /* free tempy routes */
4283 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4284 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4287 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4290 mtu -= sizeof(struct udphdr);
4292 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4293 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4297 } else if (ro->ro_rt == NULL) {
4298 /* route was freed */
4299 if (net->ro._s_addr &&
4300 net->src_addr_selected) {
4301 sctp_free_ifa(net->ro._s_addr);
4302 net->ro._s_addr = NULL;
4304 net->src_addr_selected = 0;
4313 uint32_t flowlabel, flowinfo;
4314 struct ip6_hdr *ip6h;
4315 struct route_in6 ip6route;
4317 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4319 struct sockaddr_in6 lsa6_storage;
4321 u_short prev_port = 0;
4325 flowlabel = net->flowlabel;
4327 flowlabel = stcb->asoc.default_flowlabel;
4329 flowlabel = inp->sctp_ep.default_flowlabel;
4331 if (flowlabel == 0) {
4333 * This means especially, that it is not set
4334 * at the SCTP layer. So use the value from
4337 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4339 flowlabel &= 0x000fffff;
4340 len = SCTP_MIN_OVERHEAD;
4342 len += sizeof(struct udphdr);
4344 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4347 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4350 SCTP_ALIGN_TO_END(newm, len);
4351 SCTP_BUF_LEN(newm) = len;
4352 SCTP_BUF_NEXT(newm) = m;
4355 m->m_pkthdr.flowid = net->flowid;
4356 M_HASHTYPE_SET(m, net->flowtype);
4358 m->m_pkthdr.flowid = mflowid;
4359 M_HASHTYPE_SET(m, mflowtype);
4361 packet_length = sctp_calculate_len(m);
4363 ip6h = mtod(m, struct ip6_hdr *);
4364 /* protect *sin6 from overwrite */
4365 sin6 = (struct sockaddr_in6 *)to;
4369 /* KAME hack: embed scopeid */
4370 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4371 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4376 memset(&ip6route, 0, sizeof(ip6route));
4377 ro = (sctp_route_t *)&ip6route;
4378 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4380 ro = (sctp_route_t *)&net->ro;
4383 * We assume here that inp_flow is in host byte
4384 * order within the TCB!
4386 if (tos_value == 0) {
4388 * This means especially, that it is not set
4389 * at the SCTP layer. So use the value from
4392 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4396 tos_value |= sctp_get_ect(stcb);
4400 flowinfo |= tos_value;
4402 flowinfo |= flowlabel;
4403 ip6h->ip6_flow = htonl(flowinfo);
4405 ip6h->ip6_nxt = IPPROTO_UDP;
4407 ip6h->ip6_nxt = IPPROTO_SCTP;
4409 ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4410 ip6h->ip6_dst = sin6->sin6_addr;
4413 * Add SRC address selection here: we can only reuse
4414 * to a limited degree the kame src-addr-sel, since
4415 * we can try their selection but it may not be
4418 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4419 lsa6_tmp.sin6_family = AF_INET6;
4420 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4422 if (net && out_of_asoc_ok == 0) {
4423 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4424 sctp_free_ifa(net->ro._s_addr);
4425 net->ro._s_addr = NULL;
4426 net->src_addr_selected = 0;
4432 if (net->src_addr_selected == 0) {
4433 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4434 /* KAME hack: embed scopeid */
4435 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4436 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4440 /* Cache the source address */
4441 net->ro._s_addr = sctp_source_address_selection(inp,
4447 (void)sa6_recoverscope(sin6);
4448 net->src_addr_selected = 1;
4450 if (net->ro._s_addr == NULL) {
4451 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4452 net->src_addr_selected = 0;
4453 sctp_handle_no_route(stcb, net, so_locked);
4454 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4456 return (EHOSTUNREACH);
4458 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4460 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4461 /* KAME hack: embed scopeid */
4462 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4463 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4467 if (over_addr == NULL) {
4468 struct sctp_ifa *_lsrc;
4470 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4474 if (_lsrc == NULL) {
4475 sctp_handle_no_route(stcb, net, so_locked);
4476 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4478 return (EHOSTUNREACH);
4480 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4481 sctp_free_ifa(_lsrc);
4483 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4484 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4486 (void)sa6_recoverscope(sin6);
4488 lsa6->sin6_port = inp->sctp_lport;
4490 if (ro->ro_rt == NULL) {
4492 * src addr selection failed to find a route
4493 * (or valid source addr), so we can't get
4496 sctp_handle_no_route(stcb, net, so_locked);
4497 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4499 return (EHOSTUNREACH);
4502 * XXX: sa6 may not have a valid sin6_scope_id in
4503 * the non-SCOPEDROUTING case.
4505 memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4506 lsa6_storage.sin6_family = AF_INET6;
4507 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4508 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4509 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4510 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4515 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4516 lsa6_storage.sin6_port = inp->sctp_lport;
4517 lsa6 = &lsa6_storage;
4518 ip6h->ip6_src = lsa6->sin6_addr;
4521 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4522 sctp_handle_no_route(stcb, net, so_locked);
4523 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4525 return (EHOSTUNREACH);
4527 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4528 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4529 udp->uh_dport = port;
4530 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4532 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4534 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4537 sctphdr->src_port = src_port;
4538 sctphdr->dest_port = dest_port;
4539 sctphdr->v_tag = v_tag;
4540 sctphdr->checksum = 0;
4543 * We set the hop limit now since there is a good
4544 * chance that our ro pointer is now filled
4546 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4547 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4550 /* Copy to be sure something bad is not happening */
4551 sin6->sin6_addr = ip6h->ip6_dst;
4552 lsa6->sin6_addr = ip6h->ip6_src;
4555 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4556 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4557 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4558 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4559 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4561 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4563 * preserve the port and scope for link
4566 prev_scope = sin6->sin6_scope_id;
4567 prev_port = sin6->sin6_port;
4570 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4571 /* failed to prepend data, give up */
4573 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4576 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4578 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4579 SCTP_STAT_INCR(sctps_sendswcrc);
4580 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4581 udp->uh_sum = 0xffff;
4584 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4585 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4586 SCTP_STAT_INCR(sctps_sendhwcrc);
4588 /* send it out. table id is taken from stcb */
4589 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4590 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4591 so = SCTP_INP_SO(inp);
4592 SCTP_SOCKET_UNLOCK(so, 0);
4595 #ifdef SCTP_PACKET_LOGGING
4596 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4597 sctp_packet_log(o_pak);
4599 SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4600 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4601 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4602 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4603 atomic_add_int(&stcb->asoc.refcnt, 1);
4604 SCTP_TCB_UNLOCK(stcb);
4605 SCTP_SOCKET_LOCK(so, 0);
4606 SCTP_TCB_LOCK(stcb);
4607 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4611 /* for link local this must be done */
4612 sin6->sin6_scope_id = prev_scope;
4613 sin6->sin6_port = prev_port;
4615 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4617 UDPSTAT_INC(udps_opackets);
4619 SCTP_STAT_INCR(sctps_sendpackets);
4620 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4622 SCTP_STAT_INCR(sctps_senderrors);
4625 /* Now if we had a temp route free it */
4629 * PMTU check versus smallest asoc MTU goes
4632 if (ro->ro_rt == NULL) {
4633 /* Route was freed */
4634 if (net->ro._s_addr &&
4635 net->src_addr_selected) {
4636 sctp_free_ifa(net->ro._s_addr);
4637 net->ro._s_addr = NULL;
4639 net->src_addr_selected = 0;
4641 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4642 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4645 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4648 mtu -= sizeof(struct udphdr);
4650 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4651 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4656 if (ND_IFINFO(ifp)->linkmtu &&
4657 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4658 sctp_mtu_size_reset(inp,
4660 ND_IFINFO(ifp)->linkmtu);
4668 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4669 ((struct sockaddr *)to)->sa_family);
4671 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4678 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4679 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4684 struct mbuf *m, *m_last;
4685 struct sctp_nets *net;
4686 struct sctp_init_chunk *init;
4687 struct sctp_supported_addr_param *sup_addr;
4688 struct sctp_adaptation_layer_indication *ali;
4689 struct sctp_supported_chunk_types_param *pr_supported;
4690 struct sctp_paramhdr *ph;
4691 int cnt_inits_to = 0;
4693 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4695 /* INIT's always go to the primary (and usually ONLY address) */
4696 net = stcb->asoc.primary_destination;
4698 net = TAILQ_FIRST(&stcb->asoc.nets);
4703 /* we confirm any address we send an INIT to */
4704 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4705 (void)sctp_set_primary_addr(stcb, NULL, net);
4707 /* we confirm any address we send an INIT to */
4708 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4710 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4712 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4714 * special hook, if we are sending to link local it will not
4715 * show up in our private address count.
4717 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4721 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4722 /* This case should not happen */
4723 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4726 /* start the INIT timer */
4727 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4729 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4731 /* No memory, INIT timer will re-attempt. */
4732 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4735 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
4737 /* Now lets put the chunk header in place */
4738 init = mtod(m, struct sctp_init_chunk *);
4739 /* now the chunk header */
4740 init->ch.chunk_type = SCTP_INITIATION;
4741 init->ch.chunk_flags = 0;
4742 /* fill in later from mbuf we build */
4743 init->ch.chunk_length = 0;
4744 /* place in my tag */
4745 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4746 /* set up some of the credits. */
4747 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4748 SCTP_MINIMAL_RWND));
4749 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4750 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4751 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4753 /* Adaptation layer indication parameter */
4754 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4755 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
4756 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4757 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4758 ali->ph.param_length = htons(parameter_len);
4759 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
4760 chunk_len += parameter_len;
4764 if (stcb->asoc.ecn_supported == 1) {
4765 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4766 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4767 ph->param_type = htons(SCTP_ECN_CAPABLE);
4768 ph->param_length = htons(parameter_len);
4769 chunk_len += parameter_len;
4772 /* PR-SCTP supported parameter */
4773 if (stcb->asoc.prsctp_supported == 1) {
4774 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4775 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4776 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4777 ph->param_length = htons(parameter_len);
4778 chunk_len += parameter_len;
4781 /* Add NAT friendly parameter. */
4782 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4783 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4784 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4785 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4786 ph->param_length = htons(parameter_len);
4787 chunk_len += parameter_len;
4790 /* And now tell the peer which extensions we support */
4792 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4793 if (stcb->asoc.prsctp_supported == 1) {
4794 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4795 if (stcb->asoc.idata_supported) {
4796 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4799 if (stcb->asoc.auth_supported == 1) {
4800 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4802 if (stcb->asoc.asconf_supported == 1) {
4803 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4804 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4806 if (stcb->asoc.reconfig_supported == 1) {
4807 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4809 if (stcb->asoc.idata_supported) {
4810 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4812 if (stcb->asoc.nrsack_supported == 1) {
4813 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4815 if (stcb->asoc.pktdrop_supported == 1) {
4816 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4819 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4820 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4821 pr_supported->ph.param_length = htons(parameter_len);
4822 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4823 chunk_len += parameter_len;
4825 /* add authentication parameters */
4826 if (stcb->asoc.auth_supported) {
4827 /* attach RANDOM parameter, if available */
4828 if (stcb->asoc.authinfo.random != NULL) {
4829 struct sctp_auth_random *randp;
4831 if (padding_len > 0) {
4832 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4833 chunk_len += padding_len;
4836 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4837 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4838 /* random key already contains the header */
4839 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4840 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4841 chunk_len += parameter_len;
4843 /* add HMAC_ALGO parameter */
4844 if (stcb->asoc.local_hmacs != NULL) {
4845 struct sctp_auth_hmac_algo *hmacs;
4847 if (padding_len > 0) {
4848 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4849 chunk_len += padding_len;
4852 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4853 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
4854 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4855 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4856 hmacs->ph.param_length = htons(parameter_len);
4857 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
4858 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4859 chunk_len += parameter_len;
4861 /* add CHUNKS parameter */
4862 if (stcb->asoc.local_auth_chunks != NULL) {
4863 struct sctp_auth_chunk_list *chunks;
4865 if (padding_len > 0) {
4866 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4867 chunk_len += padding_len;
4870 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4871 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
4872 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4873 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4874 chunks->ph.param_length = htons(parameter_len);
4875 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4876 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4877 chunk_len += parameter_len;
4881 /* now any cookie time extensions */
4882 if (stcb->asoc.cookie_preserve_req) {
4883 struct sctp_cookie_perserve_param *cookie_preserve;
4885 if (padding_len > 0) {
4886 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4887 chunk_len += padding_len;
4890 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
4891 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4892 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4893 cookie_preserve->ph.param_length = htons(parameter_len);
4894 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4895 stcb->asoc.cookie_preserve_req = 0;
4896 chunk_len += parameter_len;
4899 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4902 if (padding_len > 0) {
4903 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4904 chunk_len += padding_len;
4907 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4908 if (stcb->asoc.scope.ipv4_addr_legal) {
4909 parameter_len += (uint16_t)sizeof(uint16_t);
4911 if (stcb->asoc.scope.ipv6_addr_legal) {
4912 parameter_len += (uint16_t)sizeof(uint16_t);
4914 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4915 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4916 sup_addr->ph.param_length = htons(parameter_len);
4918 if (stcb->asoc.scope.ipv4_addr_legal) {
4919 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4921 if (stcb->asoc.scope.ipv6_addr_legal) {
4922 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4924 padding_len = 4 - 2 * i;
4925 chunk_len += parameter_len;
4928 SCTP_BUF_LEN(m) = chunk_len;
4929 /* now the addresses */
4931 * To optimize this we could put the scoping stuff into a structure
4932 * and remove the individual uint8's from the assoc structure. Then
4933 * we could just sifa in the address within the stcb. But for now
4934 * this is a quick hack to get the address stuff teased apart.
4936 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4938 &padding_len, &chunk_len);
4940 init->ch.chunk_length = htons(chunk_len);
4941 if (padding_len > 0) {
4942 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4947 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4948 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
4949 (struct sockaddr *)&net->ro._l_addr,
4950 m, 0, NULL, 0, 0, 0, 0,
4951 inp->sctp_lport, stcb->rport, htonl(0),
4955 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
4956 if (error == ENOBUFS) {
4957 stcb->asoc.ifp_had_enobuf = 1;
4958 SCTP_STAT_INCR(sctps_lowlevelerr);
4961 stcb->asoc.ifp_had_enobuf = 0;
4963 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4964 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4968 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4969 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4972 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4973 * being equal to the beginning of the params i.e. (iphlen +
4974 * sizeof(struct sctp_init_msg) parse through the parameters to the
4975 * end of the mbuf verifying that all parameters are known.
4977 * For unknown parameters build and return a mbuf with
4978 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4979 * processing this chunk stop, and set *abort_processing to 1.
4981 * By having param_offset be pre-set to where parameters begin it is
4982 * hoped that this routine may be reused in the future by new
4985 struct sctp_paramhdr *phdr, params;
4987 struct mbuf *mat, *op_err;
4988 int at, limit, pad_needed;
4989 uint16_t ptype, plen, padded_size;
4992 *abort_processing = 0;
4995 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4998 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4999 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5000 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5001 ptype = ntohs(phdr->param_type);
5002 plen = ntohs(phdr->param_length);
5003 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5004 /* wacked parameter */
5005 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5008 limit -= SCTP_SIZE32(plen);
5010 * All parameters for all chunks that we know/understand are
5011 * listed here. We process them other places and make
5012 * appropriate stop actions per the upper bits. However this
5013 * is the generic routine processor's can call to get back
5014 * an operr.. to either incorporate (init-ack) or send.
5016 padded_size = SCTP_SIZE32(plen);
5018 /* Param's with variable size */
5019 case SCTP_HEARTBEAT_INFO:
5020 case SCTP_STATE_COOKIE:
5021 case SCTP_UNRECOG_PARAM:
5022 case SCTP_ERROR_CAUSE_IND:
5026 /* Param's with variable size within a range */
5027 case SCTP_CHUNK_LIST:
5028 case SCTP_SUPPORTED_CHUNK_EXT:
5029 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5030 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5035 case SCTP_SUPPORTED_ADDRTYPE:
5036 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5037 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5043 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5044 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5049 case SCTP_SET_PRIM_ADDR:
5050 case SCTP_DEL_IP_ADDRESS:
5051 case SCTP_ADD_IP_ADDRESS:
5052 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5053 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5054 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5059 /* Param's with a fixed size */
5060 case SCTP_IPV4_ADDRESS:
5061 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5062 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5067 case SCTP_IPV6_ADDRESS:
5068 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5069 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5074 case SCTP_COOKIE_PRESERVE:
5075 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5076 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5081 case SCTP_HAS_NAT_SUPPORT:
5084 case SCTP_PRSCTP_SUPPORTED:
5085 if (padded_size != sizeof(struct sctp_paramhdr)) {
5086 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5091 case SCTP_ECN_CAPABLE:
5092 if (padded_size != sizeof(struct sctp_paramhdr)) {
5093 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5098 case SCTP_ULP_ADAPTATION:
5099 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5100 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5105 case SCTP_SUCCESS_REPORT:
5106 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5107 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5112 case SCTP_HOSTNAME_ADDRESS:
5114 /* We can NOT handle HOST NAME addresses!! */
5117 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5118 *abort_processing = 1;
5119 if (op_err == NULL) {
5120 /* Ok need to try to get a mbuf */
5122 l_len = SCTP_MIN_OVERHEAD;
5124 l_len = SCTP_MIN_V4_OVERHEAD;
5126 l_len += sizeof(struct sctp_chunkhdr);
5127 l_len += sizeof(struct sctp_gen_error_cause);
5128 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5130 SCTP_BUF_LEN(op_err) = 0;
5132 * Pre-reserve space for IP,
5133 * SCTP, and chunk header.
5136 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5138 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5140 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5141 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5145 /* If we have space */
5146 struct sctp_gen_error_cause cause;
5149 uint32_t cpthis = 0;
5151 pad_needed = 4 - (err_at % 4);
5152 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5153 err_at += pad_needed;
5155 cause.code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5156 cause.length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5157 m_copyback(op_err, err_at, sizeof(struct sctp_gen_error_cause), (caddr_t)&cause);
5158 err_at += sizeof(struct sctp_gen_error_cause);
5159 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5160 if (SCTP_BUF_NEXT(op_err) == NULL) {
5161 sctp_m_freem(op_err);
5170 * we do not recognize the parameter figure out what
5173 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5174 if ((ptype & 0x4000) == 0x4000) {
5175 /* Report bit is set?? */
5176 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5177 if (op_err == NULL) {
5180 /* Ok need to try to get an mbuf */
5182 l_len = SCTP_MIN_OVERHEAD;
5184 l_len = SCTP_MIN_V4_OVERHEAD;
5186 l_len += sizeof(struct sctp_chunkhdr);
5187 l_len += sizeof(struct sctp_paramhdr);
5188 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5190 SCTP_BUF_LEN(op_err) = 0;
5192 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5194 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5196 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5197 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5201 /* If we have space */
5202 struct sctp_paramhdr s;
5205 uint32_t cpthis = 0;
5207 pad_needed = 4 - (err_at % 4);
5208 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5209 err_at += pad_needed;
5211 s.param_type = htons(SCTP_UNRECOG_PARAM);
5212 s.param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5213 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)&s);
5214 err_at += sizeof(struct sctp_paramhdr);
5215 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5216 if (SCTP_BUF_NEXT(op_err) == NULL) {
5217 sctp_m_freem(op_err);
5219 * we are out of memory but
5220 * we still need to have a
5221 * look at what to do (the
5222 * system is in trouble
5226 goto more_processing;
5232 if ((ptype & 0x8000) == 0x0000) {
5233 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5236 /* skip this chunk and continue processing */
5237 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5238 at += SCTP_SIZE32(plen);
5243 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5247 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5248 *abort_processing = 1;
5249 if ((op_err == NULL) && phdr) {
5252 l_len = SCTP_MIN_OVERHEAD;
5254 l_len = SCTP_MIN_V4_OVERHEAD;
5256 l_len += sizeof(struct sctp_chunkhdr);
5257 l_len += (2 * sizeof(struct sctp_paramhdr));
5258 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5260 SCTP_BUF_LEN(op_err) = 0;
5262 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5264 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5266 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5267 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5270 if ((op_err) && phdr) {
5271 struct sctp_paramhdr s;
5274 uint32_t cpthis = 0;
5276 pad_needed = 4 - (err_at % 4);
5277 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5278 err_at += pad_needed;
5280 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5281 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5282 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5283 err_at += sizeof(s);
5284 /* Only copy back the p-hdr that caused the issue */
5285 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5291 sctp_are_there_new_addresses(struct sctp_association *asoc,
5292 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5295 * Given a INIT packet, look through the packet to verify that there
5296 * are NO new addresses. As we go through the parameters add reports
5297 * of any un-understood parameters that require an error. Also we
5298 * must return (1) to drop the packet if we see a un-understood
5299 * parameter that tells us to drop the chunk.
5301 struct sockaddr *sa_touse;
5302 struct sockaddr *sa;
5303 struct sctp_paramhdr *phdr, params;
5304 uint16_t ptype, plen;
5306 struct sctp_nets *net;
5309 struct sockaddr_in sin4, *sa4;
5312 struct sockaddr_in6 sin6, *sa6;
5316 memset(&sin4, 0, sizeof(sin4));
5317 sin4.sin_family = AF_INET;
5318 sin4.sin_len = sizeof(sin4);
5321 memset(&sin6, 0, sizeof(sin6));
5322 sin6.sin6_family = AF_INET6;
5323 sin6.sin6_len = sizeof(sin6);
5325 /* First what about the src address of the pkt ? */
5327 switch (src->sa_family) {
5330 if (asoc->scope.ipv4_addr_legal) {
5337 if (asoc->scope.ipv6_addr_legal) {
5348 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5349 sa = (struct sockaddr *)&net->ro._l_addr;
5350 if (sa->sa_family == src->sa_family) {
5352 if (sa->sa_family == AF_INET) {
5353 struct sockaddr_in *src4;
5355 sa4 = (struct sockaddr_in *)sa;
5356 src4 = (struct sockaddr_in *)src;
5357 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5364 if (sa->sa_family == AF_INET6) {
5365 struct sockaddr_in6 *src6;
5367 sa6 = (struct sockaddr_in6 *)sa;
5368 src6 = (struct sockaddr_in6 *)src;
5369 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5378 /* New address added! no need to look further. */
5382 /* Ok so far lets munge through the rest of the packet */
5383 offset += sizeof(struct sctp_init_chunk);
5384 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5387 ptype = ntohs(phdr->param_type);
5388 plen = ntohs(phdr->param_length);
5391 case SCTP_IPV4_ADDRESS:
5393 struct sctp_ipv4addr_param *p4, p4_buf;
5395 if (plen != sizeof(struct sctp_ipv4addr_param)) {
5398 phdr = sctp_get_next_param(in_initpkt, offset,
5399 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5403 if (asoc->scope.ipv4_addr_legal) {
5404 p4 = (struct sctp_ipv4addr_param *)phdr;
5405 sin4.sin_addr.s_addr = p4->addr;
5406 sa_touse = (struct sockaddr *)&sin4;
5412 case SCTP_IPV6_ADDRESS:
5414 struct sctp_ipv6addr_param *p6, p6_buf;
5416 if (plen != sizeof(struct sctp_ipv6addr_param)) {
5419 phdr = sctp_get_next_param(in_initpkt, offset,
5420 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5424 if (asoc->scope.ipv6_addr_legal) {
5425 p6 = (struct sctp_ipv6addr_param *)phdr;
5426 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5428 sa_touse = (struct sockaddr *)&sin6;
5438 /* ok, sa_touse points to one to check */
5440 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5441 sa = (struct sockaddr *)&net->ro._l_addr;
5442 if (sa->sa_family != sa_touse->sa_family) {
5446 if (sa->sa_family == AF_INET) {
5447 sa4 = (struct sockaddr_in *)sa;
5448 if (sa4->sin_addr.s_addr ==
5449 sin4.sin_addr.s_addr) {
5456 if (sa->sa_family == AF_INET6) {
5457 sa6 = (struct sockaddr_in6 *)sa;
5458 if (SCTP6_ARE_ADDR_EQUAL(
5467 /* New addr added! no need to look further */
5471 offset += SCTP_SIZE32(plen);
5472 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5478 * Given a MBUF chain that was sent into us containing an INIT. Build a
5479 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5480 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5481 * message (i.e. the struct sctp_init_msg).
5484 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5485 struct sctp_nets *src_net, struct mbuf *init_pkt,
5486 int iphlen, int offset,
5487 struct sockaddr *src, struct sockaddr *dst,
5488 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5489 uint8_t mflowtype, uint32_t mflowid,
5490 uint32_t vrf_id, uint16_t port)
5492 struct sctp_association *asoc;
5493 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5494 struct sctp_init_ack_chunk *initack;
5495 struct sctp_adaptation_layer_indication *ali;
5496 struct sctp_supported_chunk_types_param *pr_supported;
5497 struct sctp_paramhdr *ph;
5498 union sctp_sockstore *over_addr;
5499 struct sctp_scoping scp;
5502 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5503 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5504 struct sockaddr_in *sin;
5507 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5508 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5509 struct sockaddr_in6 *sin6;
5511 struct sockaddr *to;
5512 struct sctp_state_cookie stc;
5513 struct sctp_nets *net = NULL;
5514 uint8_t *signature = NULL;
5515 int cnt_inits_to = 0;
5516 uint16_t his_limit, i_want;
5518 int nat_friendly = 0;
5521 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5528 if ((asoc != NULL) &&
5529 (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5530 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5532 * new addresses, out of here in non-cookie-wait
5535 * Send an ABORT, without the new address error
5536 * cause. This looks no different than if no
5537 * listener was present.
5539 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5541 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5542 mflowtype, mflowid, inp->fibnum,
5546 if (src_net != NULL && (src_net->port != port)) {
5548 * change of remote encapsulation port, out of here
5549 * in non-cookie-wait states
5551 * Send an ABORT, without an specific error cause.
5552 * This looks no different than if no listener was
5555 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5556 "Remote encapsulation port changed");
5557 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5558 mflowtype, mflowid, inp->fibnum,
5564 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5565 (offset + sizeof(struct sctp_init_chunk)),
5566 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5569 if (op_err == NULL) {
5570 char msg[SCTP_DIAG_INFO_LEN];
5572 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5573 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5576 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5577 init_chk->init.initiate_tag, op_err,
5578 mflowtype, mflowid, inp->fibnum,
5582 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5584 /* No memory, INIT timer will re-attempt. */
5586 sctp_m_freem(op_err);
5589 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
5593 * We might not overwrite the identification[] completely and on
5594 * some platforms time_entered will contain some padding. Therefore
5595 * zero out the cookie to avoid putting uninitialized memory on the
5598 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5600 /* the time I built cookie */
5601 (void)SCTP_GETTIME_TIMEVAL(&now);
5602 stc.time_entered.tv_sec = now.tv_sec;
5603 stc.time_entered.tv_usec = now.tv_usec;
5605 /* populate any tie tags */
5607 /* unlock before tag selections */
5608 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5609 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5610 stc.cookie_life = asoc->cookie_life;
5611 net = asoc->primary_destination;
5613 stc.tie_tag_my_vtag = 0;
5614 stc.tie_tag_peer_vtag = 0;
5615 /* life I will award this cookie */
5616 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5619 /* copy in the ports for later check */
5620 stc.myport = sh->dest_port;
5621 stc.peerport = sh->src_port;
5624 * If we wanted to honor cookie life extensions, we would add to
5625 * stc.cookie_life. For now we should NOT honor any extension
5627 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5628 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5629 stc.ipv6_addr_legal = 1;
5630 if (SCTP_IPV6_V6ONLY(inp)) {
5631 stc.ipv4_addr_legal = 0;
5633 stc.ipv4_addr_legal = 1;
5636 stc.ipv6_addr_legal = 0;
5637 stc.ipv4_addr_legal = 1;
5642 switch (dst->sa_family) {
5646 /* lookup address */
5647 stc.address[0] = src4->sin_addr.s_addr;
5651 stc.addr_type = SCTP_IPV4_ADDRESS;
5652 /* local from address */
5653 stc.laddress[0] = dst4->sin_addr.s_addr;
5654 stc.laddress[1] = 0;
5655 stc.laddress[2] = 0;
5656 stc.laddress[3] = 0;
5657 stc.laddr_type = SCTP_IPV4_ADDRESS;
5658 /* scope_id is only for v6 */
5660 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5661 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5664 /* Must use the address in this case */
5665 if (sctp_is_address_on_local_host(src, vrf_id)) {
5666 stc.loopback_scope = 1;
5669 stc.local_scope = 0;
5677 stc.addr_type = SCTP_IPV6_ADDRESS;
5678 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5679 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
5680 if (sctp_is_address_on_local_host(src, vrf_id)) {
5681 stc.loopback_scope = 1;
5682 stc.local_scope = 0;
5685 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5686 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5688 * If the new destination or source
5689 * is a LINK_LOCAL we must have
5690 * common both site and local scope.
5691 * Don't set local scope though
5692 * since we must depend on the
5693 * source to be added implicitly. We
5694 * cannot assure just because we
5695 * share one link that all links are
5698 stc.local_scope = 0;
5702 * we start counting for the private
5703 * address stuff at 1. since the
5704 * link local we source from won't
5705 * show up in our scoped count.
5709 * pull out the scope_id from
5712 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5713 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5715 * If the new destination or source
5716 * is SITE_LOCAL then we must have
5717 * site scope in common.
5721 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5722 stc.laddr_type = SCTP_IPV6_ADDRESS;
5732 /* set the scope per the existing tcb */
5735 struct sctp_nets *lnet;
5738 stc.loopback_scope = asoc->scope.loopback_scope;
5739 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5740 stc.site_scope = asoc->scope.site_scope;
5741 stc.local_scope = asoc->scope.local_scope;
5743 /* Why do we not consider IPv4 LL addresses? */
5744 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5745 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5746 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5748 * if we have a LL address, start
5756 /* use the net pointer */
5757 to = (struct sockaddr *)&net->ro._l_addr;
5758 switch (to->sa_family) {
5761 sin = (struct sockaddr_in *)to;
5762 stc.address[0] = sin->sin_addr.s_addr;
5766 stc.addr_type = SCTP_IPV4_ADDRESS;
5767 if (net->src_addr_selected == 0) {
5769 * strange case here, the INIT should have
5770 * did the selection.
5772 net->ro._s_addr = sctp_source_address_selection(inp,
5773 stcb, (sctp_route_t *)&net->ro,
5775 if (net->ro._s_addr == NULL)
5778 net->src_addr_selected = 1;
5781 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5782 stc.laddress[1] = 0;
5783 stc.laddress[2] = 0;
5784 stc.laddress[3] = 0;
5785 stc.laddr_type = SCTP_IPV4_ADDRESS;
5786 /* scope_id is only for v6 */
5792 sin6 = (struct sockaddr_in6 *)to;
5793 memcpy(&stc.address, &sin6->sin6_addr,
5794 sizeof(struct in6_addr));
5795 stc.addr_type = SCTP_IPV6_ADDRESS;
5796 stc.scope_id = sin6->sin6_scope_id;
5797 if (net->src_addr_selected == 0) {
5799 * strange case here, the INIT should have
5800 * done the selection.
5802 net->ro._s_addr = sctp_source_address_selection(inp,
5803 stcb, (sctp_route_t *)&net->ro,
5805 if (net->ro._s_addr == NULL)
5808 net->src_addr_selected = 1;
5810 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5811 sizeof(struct in6_addr));
5812 stc.laddr_type = SCTP_IPV6_ADDRESS;
5817 /* Now lets put the SCTP header in place */
5818 initack = mtod(m, struct sctp_init_ack_chunk *);
5819 /* Save it off for quick ref */
5820 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
5822 memcpy(stc.identification, SCTP_VERSION_STRING,
5823 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5824 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5825 /* now the chunk header */
5826 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5827 initack->ch.chunk_flags = 0;
5828 /* fill in later from mbuf we build */
5829 initack->ch.chunk_length = 0;
5830 /* place in my tag */
5831 if ((asoc != NULL) &&
5832 ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
5833 (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
5834 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
5835 /* re-use the v-tags and init-seq here */
5836 initack->init.initiate_tag = htonl(asoc->my_vtag);
5837 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5839 uint32_t vtag, itsn;
5842 atomic_add_int(&asoc->refcnt, 1);
5843 SCTP_TCB_UNLOCK(stcb);
5845 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5846 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5848 * Got a duplicate vtag on some guy behind a
5849 * nat make sure we don't use it.
5853 initack->init.initiate_tag = htonl(vtag);
5854 /* get a TSN to use too */
5855 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5856 initack->init.initial_tsn = htonl(itsn);
5857 SCTP_TCB_LOCK(stcb);
5858 atomic_add_int(&asoc->refcnt, -1);
5860 SCTP_INP_INCR_REF(inp);
5861 SCTP_INP_RUNLOCK(inp);
5862 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5863 initack->init.initiate_tag = htonl(vtag);
5864 /* get a TSN to use too */
5865 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5866 SCTP_INP_RLOCK(inp);
5867 SCTP_INP_DECR_REF(inp);
5870 /* save away my tag to */
5871 stc.my_vtag = initack->init.initiate_tag;
5873 /* set up some of the credits. */
5874 so = inp->sctp_socket;
5876 /* memory problem */
5880 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5882 /* set what I want */
5883 his_limit = ntohs(init_chk->init.num_inbound_streams);
5884 /* choose what I want */
5886 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5887 i_want = asoc->streamoutcnt;
5889 i_want = asoc->pre_open_streams;
5892 i_want = inp->sctp_ep.pre_open_stream_count;
5894 if (his_limit < i_want) {
5895 /* I Want more :< */
5896 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5898 /* I can have what I want :> */
5899 initack->init.num_outbound_streams = htons(i_want);
5901 /* tell him his limit. */
5902 initack->init.num_inbound_streams =
5903 htons(inp->sctp_ep.max_open_streams_intome);
5905 /* adaptation layer indication parameter */
5906 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5907 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5908 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5909 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5910 ali->ph.param_length = htons(parameter_len);
5911 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5912 chunk_len += parameter_len;
5916 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5917 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5918 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5919 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5920 ph->param_type = htons(SCTP_ECN_CAPABLE);
5921 ph->param_length = htons(parameter_len);
5922 chunk_len += parameter_len;
5925 /* PR-SCTP supported parameter */
5926 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5927 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5928 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5929 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5930 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5931 ph->param_length = htons(parameter_len);
5932 chunk_len += parameter_len;
5935 /* Add NAT friendly parameter */
5937 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5938 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5939 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5940 ph->param_length = htons(parameter_len);
5941 chunk_len += parameter_len;
5944 /* And now tell the peer which extensions we support */
5946 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5947 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5948 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5949 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5950 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5951 ((asoc == NULL) && (inp->idata_supported == 1))) {
5952 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5955 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5956 ((asoc == NULL) && (inp->auth_supported == 1))) {
5957 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5959 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5960 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5961 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5962 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5964 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5965 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5966 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5968 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5969 ((asoc == NULL) && (inp->idata_supported == 1))) {
5970 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5972 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5973 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5974 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5976 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
5977 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
5978 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5981 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5982 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5983 pr_supported->ph.param_length = htons(parameter_len);
5984 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5985 chunk_len += parameter_len;
5988 /* add authentication parameters */
5989 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5990 ((asoc == NULL) && (inp->auth_supported == 1))) {
5991 struct sctp_auth_random *randp;
5992 struct sctp_auth_hmac_algo *hmacs;
5993 struct sctp_auth_chunk_list *chunks;
5995 if (padding_len > 0) {
5996 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5997 chunk_len += padding_len;
6000 /* generate and add RANDOM parameter */
6001 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
6002 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6003 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6004 randp->ph.param_type = htons(SCTP_RANDOM);
6005 randp->ph.param_length = htons(parameter_len);
6006 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6007 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6008 chunk_len += parameter_len;
6010 if (padding_len > 0) {
6011 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6012 chunk_len += padding_len;
6015 /* add HMAC_ALGO parameter */
6016 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
6017 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6018 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6019 (uint8_t *)hmacs->hmac_ids);
6020 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6021 hmacs->ph.param_length = htons(parameter_len);
6022 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6023 chunk_len += parameter_len;
6025 if (padding_len > 0) {
6026 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6027 chunk_len += padding_len;
6030 /* add CHUNKS parameter */
6031 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6032 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6033 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6034 chunks->chunk_types);
6035 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6036 chunks->ph.param_length = htons(parameter_len);
6037 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6038 chunk_len += parameter_len;
6040 SCTP_BUF_LEN(m) = chunk_len;
6042 /* now the addresses */
6044 * To optimize this we could put the scoping stuff into a structure
6045 * and remove the individual uint8's from the stc structure. Then we
6046 * could just sifa in the address within the stc.. but for now this
6047 * is a quick hack to get the address stuff teased apart.
6049 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6050 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6051 scp.loopback_scope = stc.loopback_scope;
6052 scp.ipv4_local_scope = stc.ipv4_scope;
6053 scp.local_scope = stc.local_scope;
6054 scp.site_scope = stc.site_scope;
6055 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6057 &padding_len, &chunk_len);
6058 /* padding_len can only be positive, if no addresses have been added */
6059 if (padding_len > 0) {
6060 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6061 chunk_len += padding_len;
6062 SCTP_BUF_LEN(m) += padding_len;
6066 /* tack on the operational error if present */
6069 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6070 parameter_len += SCTP_BUF_LEN(m_tmp);
6072 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6073 SCTP_BUF_NEXT(m_last) = op_err;
6074 while (SCTP_BUF_NEXT(m_last) != NULL) {
6075 m_last = SCTP_BUF_NEXT(m_last);
6077 chunk_len += parameter_len;
6079 if (padding_len > 0) {
6080 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6081 if (m_last == NULL) {
6082 /* Houston we have a problem, no space */
6086 chunk_len += padding_len;
6089 /* Now we must build a cookie */
6090 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6091 if (m_cookie == NULL) {
6092 /* memory problem */
6096 /* Now append the cookie to the end and update the space/size */
6097 SCTP_BUF_NEXT(m_last) = m_cookie;
6099 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6100 parameter_len += SCTP_BUF_LEN(m_tmp);
6101 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6105 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6106 chunk_len += parameter_len;
6109 * Place in the size, but we don't include the last pad (if any) in
6112 initack->ch.chunk_length = htons(chunk_len);
6115 * Time to sign the cookie, we don't sign over the cookie signature
6116 * though thus we set trailer.
6118 (void)sctp_hmac_m(SCTP_HMAC,
6119 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6120 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6121 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6123 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6124 * here since the timer will drive a retranmission.
6126 if (padding_len > 0) {
6127 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6132 if (stc.loopback_scope) {
6133 over_addr = (union sctp_sockstore *)dst;
6138 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6140 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6143 SCTP_SO_NOT_LOCKED))) {
6144 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6145 if (error == ENOBUFS) {
6147 asoc->ifp_had_enobuf = 1;
6149 SCTP_STAT_INCR(sctps_lowlevelerr);
6153 asoc->ifp_had_enobuf = 0;
6156 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6161 sctp_prune_prsctp(struct sctp_tcb *stcb,
6162 struct sctp_association *asoc,
6163 struct sctp_sndrcvinfo *srcv,
6167 struct sctp_tmit_chunk *chk, *nchk;
6169 SCTP_TCB_LOCK_ASSERT(stcb);
6170 if ((asoc->prsctp_supported) &&
6171 (asoc->sent_queue_cnt_removeable > 0)) {
6172 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6174 * Look for chunks marked with the PR_SCTP flag AND
6175 * the buffer space flag. If the one being sent is
6176 * equal or greater priority then purge the old one
6177 * and free some space.
6179 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6181 * This one is PR-SCTP AND buffer space
6184 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6186 * Lower numbers equates to higher
6187 * priority so if the one we are
6188 * looking at has a larger or equal
6189 * priority we want to drop the data
6190 * and NOT retransmit it.
6194 * We release the book_size
6195 * if the mbuf is here
6200 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6204 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6207 freed_spc += ret_spc;
6208 if (freed_spc >= dataout) {
6211 } /* if chunk was present */
6212 } /* if of sufficient priority */
6213 } /* if chunk has enabled */
6214 } /* tailqforeach */
6216 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6217 /* Here we must move to the sent queue and mark */
6218 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6219 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6222 * We release the book_size
6223 * if the mbuf is here
6227 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6230 freed_spc += ret_spc;
6231 if (freed_spc >= dataout) {
6234 } /* end if chk->data */
6235 } /* end if right class */
6236 } /* end if chk pr-sctp */
6237 } /* tailqforeachsafe (chk) */
6238 } /* if enabled in asoc */
6242 sctp_get_frag_point(struct sctp_tcb *stcb,
6243 struct sctp_association *asoc)
6248 * For endpoints that have both v6 and v4 addresses we must reserve
6249 * room for the ipv6 header, for those that are only dealing with V4
6250 * we use a larger frag point.
6252 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6253 ovh = SCTP_MIN_OVERHEAD;
6255 ovh = SCTP_MIN_V4_OVERHEAD;
6257 ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6258 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6259 siz = asoc->smallest_mtu - ovh;
6261 siz = (stcb->asoc.sctp_frag_point - ovh);
6263 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6265 /* A data chunk MUST fit in a cluster */
6266 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6269 /* adjust for an AUTH chunk if DATA requires auth */
6270 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6271 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6274 /* make it an even word boundary please */
6281 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6284 * We assume that the user wants PR_SCTP_TTL if the user provides a
6285 * positive lifetime but does not specify any PR_SCTP policy.
6287 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6288 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6289 } else if (sp->timetolive > 0) {
6290 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6291 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6295 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6296 case CHUNK_FLAGS_PR_SCTP_BUF:
6298 * Time to live is a priority stored in tv_sec when doing
6299 * the buffer drop thing.
6301 sp->ts.tv_sec = sp->timetolive;
6304 case CHUNK_FLAGS_PR_SCTP_TTL:
6308 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6309 tv.tv_sec = sp->timetolive / 1000;
6310 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6312 * TODO sctp_constants.h needs alternative time
6313 * macros when _KERNEL is undefined.
6315 timevaladd(&sp->ts, &tv);
6318 case CHUNK_FLAGS_PR_SCTP_RTX:
6320 * Time to live is a the number or retransmissions stored in
6323 sp->ts.tv_sec = sp->timetolive;
6327 SCTPDBG(SCTP_DEBUG_USRREQ1,
6328 "Unknown PR_SCTP policy %u.\n",
6329 PR_SCTP_POLICY(sp->sinfo_flags));
6335 sctp_msg_append(struct sctp_tcb *stcb,
6336 struct sctp_nets *net,
6338 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6342 struct sctp_stream_queue_pending *sp = NULL;
6343 struct sctp_stream_out *strm;
6346 * Given an mbuf chain, put it into the association send queue and
6347 * place it on the wheel
6349 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6350 /* Invalid stream number */
6351 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6355 if ((stcb->asoc.stream_locked) &&
6356 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6357 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6361 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6362 /* Now can we send this? */
6363 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6364 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6365 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6366 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6367 /* got data while shutting down */
6368 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6372 sctp_alloc_a_strmoq(stcb, sp);
6374 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6378 sp->sinfo_flags = srcv->sinfo_flags;
6379 sp->timetolive = srcv->sinfo_timetolive;
6380 sp->ppid = srcv->sinfo_ppid;
6381 sp->context = srcv->sinfo_context;
6383 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6385 atomic_add_int(&sp->net->ref_count, 1);
6389 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6390 sp->sid = srcv->sinfo_stream;
6391 sp->msg_is_complete = 1;
6392 sp->sender_all_done = 1;
6395 sp->tail_mbuf = NULL;
6396 sctp_set_prsctp_policy(sp);
6398 * We could in theory (for sendall) sifa the length in, but we would
6399 * still have to hunt through the chain since we need to setup the
6403 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6404 if (SCTP_BUF_NEXT(at) == NULL)
6406 sp->length += SCTP_BUF_LEN(at);
6408 if (srcv->sinfo_keynumber_valid) {
6409 sp->auth_keyid = srcv->sinfo_keynumber;
6411 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6413 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6414 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6415 sp->holds_key_ref = 1;
6417 if (hold_stcb_lock == 0) {
6418 SCTP_TCB_SEND_LOCK(stcb);
6420 sctp_snd_sb_alloc(stcb, sp->length);
6421 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6422 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6423 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6425 if (hold_stcb_lock == 0) {
6426 SCTP_TCB_SEND_UNLOCK(stcb);
6436 static struct mbuf *
6437 sctp_copy_mbufchain(struct mbuf *clonechain,
6438 struct mbuf *outchain,
6439 struct mbuf **endofchain,
6442 uint8_t copy_by_ref)
6445 struct mbuf *appendchain;
6449 if (endofchain == NULL) {
6453 sctp_m_freem(outchain);
6456 if (can_take_mbuf) {
6457 appendchain = clonechain;
6460 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6462 /* Its not in a cluster */
6463 if (*endofchain == NULL) {
6464 /* lets get a mbuf cluster */
6465 if (outchain == NULL) {
6466 /* This is the general case */
6468 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6469 if (outchain == NULL) {
6472 SCTP_BUF_LEN(outchain) = 0;
6473 *endofchain = outchain;
6474 /* get the prepend space */
6475 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6478 * We really should not get a NULL
6484 if (SCTP_BUF_NEXT(m) == NULL) {
6488 m = SCTP_BUF_NEXT(m);
6491 if (*endofchain == NULL) {
6493 * huh, TSNH XXX maybe we
6496 sctp_m_freem(outchain);
6500 /* get the new end of length */
6501 len = (int)M_TRAILINGSPACE(*endofchain);
6503 /* how much is left at the end? */
6504 len = (int)M_TRAILINGSPACE(*endofchain);
6506 /* Find the end of the data, for appending */
6507 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6509 /* Now lets copy it out */
6510 if (len >= sizeofcpy) {
6511 /* It all fits, copy it in */
6512 m_copydata(clonechain, 0, sizeofcpy, cp);
6513 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6515 /* fill up the end of the chain */
6517 m_copydata(clonechain, 0, len, cp);
6518 SCTP_BUF_LEN((*endofchain)) += len;
6519 /* now we need another one */
6522 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6527 SCTP_BUF_NEXT((*endofchain)) = m;
6529 cp = mtod((*endofchain), caddr_t);
6530 m_copydata(clonechain, len, sizeofcpy, cp);
6531 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6535 /* copy the old fashion way */
6536 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6537 #ifdef SCTP_MBUF_LOGGING
6538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6539 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6544 if (appendchain == NULL) {
6547 sctp_m_freem(outchain);
6551 /* tack on to the end */
6552 if (*endofchain != NULL) {
6553 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6557 if (SCTP_BUF_NEXT(m) == NULL) {
6558 SCTP_BUF_NEXT(m) = appendchain;
6561 m = SCTP_BUF_NEXT(m);
6565 * save off the end and update the end-chain position
6569 if (SCTP_BUF_NEXT(m) == NULL) {
6573 m = SCTP_BUF_NEXT(m);
6577 /* save off the end and update the end-chain position */
6580 if (SCTP_BUF_NEXT(m) == NULL) {
6584 m = SCTP_BUF_NEXT(m);
6586 return (appendchain);
6591 sctp_med_chunk_output(struct sctp_inpcb *inp,
6592 struct sctp_tcb *stcb,
6593 struct sctp_association *asoc,
6596 int control_only, int from_where,
6597 struct timeval *now, int *now_filled, int frag_point, int so_locked
6598 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6604 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6605 uint32_t val SCTP_UNUSED)
6607 struct sctp_copy_all *ca;
6610 int added_control = 0;
6611 int un_sent, do_chunk_output = 1;
6612 struct sctp_association *asoc;
6613 struct sctp_nets *net;
6615 ca = (struct sctp_copy_all *)ptr;
6616 if (ca->m == NULL) {
6619 if (ca->inp != inp) {
6623 if (ca->sndlen > 0) {
6624 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6626 /* can't copy so we are done */
6630 #ifdef SCTP_MBUF_LOGGING
6631 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6632 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6638 SCTP_TCB_LOCK_ASSERT(stcb);
6639 if (stcb->asoc.alternate) {
6640 net = stcb->asoc.alternate;
6642 net = stcb->asoc.primary_destination;
6644 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6645 /* Abort this assoc with m as the user defined reason */
6647 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6649 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6650 0, M_NOWAIT, 1, MT_DATA);
6651 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6654 struct sctp_paramhdr *ph;
6656 ph = mtod(m, struct sctp_paramhdr *);
6657 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6658 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
6661 * We add one here to keep the assoc from dis-appearing on
6664 atomic_add_int(&stcb->asoc.refcnt, 1);
6665 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6667 * sctp_abort_an_association calls sctp_free_asoc() free
6668 * association will NOT free it since we incremented the
6669 * refcnt .. we do this to prevent it being freed and things
6670 * getting tricky since we could end up (from free_asoc)
6671 * calling inpcb_free which would get a recursive lock call
6672 * to the iterator lock.. But as a consequence of that the
6673 * stcb will return to us un-locked.. since free_asoc
6674 * returns with either no TCB or the TCB unlocked, we must
6675 * relock.. to unlock in the iterator timer :-0
6677 SCTP_TCB_LOCK(stcb);
6678 atomic_add_int(&stcb->asoc.refcnt, -1);
6679 goto no_chunk_output;
6682 ret = sctp_msg_append(stcb, net, m,
6686 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6687 /* shutdown this assoc */
6688 if (TAILQ_EMPTY(&asoc->send_queue) &&
6689 TAILQ_EMPTY(&asoc->sent_queue) &&
6690 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
6691 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6695 * there is nothing queued to send, so I'm
6698 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
6699 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6700 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6702 * only send SHUTDOWN the first time
6705 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
6706 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6708 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
6709 sctp_stop_timers_for_shutdown(stcb);
6710 sctp_send_shutdown(stcb, net);
6711 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6713 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6714 asoc->primary_destination);
6716 do_chunk_output = 0;
6720 * we still got (or just got) data to send,
6721 * so set SHUTDOWN_PENDING
6724 * XXX sockets draft says that SCTP_EOF
6725 * should be sent with no data. currently,
6726 * we will allow user data to be sent first
6727 * and move to SHUTDOWN-PENDING
6729 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
6730 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6731 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6732 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6733 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
6735 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
6736 if (TAILQ_EMPTY(&asoc->send_queue) &&
6737 TAILQ_EMPTY(&asoc->sent_queue) &&
6738 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6739 struct mbuf *op_err;
6740 char msg[SCTP_DIAG_INFO_LEN];
6743 snprintf(msg, sizeof(msg),
6744 "%s:%d at %s", __FILE__, __LINE__, __func__);
6745 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6747 atomic_add_int(&stcb->asoc.refcnt, 1);
6748 sctp_abort_an_association(stcb->sctp_ep, stcb,
6749 op_err, SCTP_SO_NOT_LOCKED);
6750 atomic_add_int(&stcb->asoc.refcnt, -1);
6751 goto no_chunk_output;
6753 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6754 asoc->primary_destination);
6760 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6761 (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
6763 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6764 (stcb->asoc.total_flight > 0) &&
6765 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6766 do_chunk_output = 0;
6768 if (do_chunk_output)
6769 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6770 else if (added_control) {
6771 int num_out, reason, now_filled = 0;
6775 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6776 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6777 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6788 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6790 struct sctp_copy_all *ca;
6792 ca = (struct sctp_copy_all *)ptr;
6794 * Do a notify here? Kacheong suggests that the notify be done at
6795 * the send time.. so you would push up a notification if any send
6796 * failed. Don't know if this is feasible since the only failures we
6797 * have is "memory" related and if you cannot get an mbuf to send
6798 * the data you surely can't get an mbuf to send up to notify the
6799 * user you can't send the data :->
6802 /* now free everything */
6803 sctp_m_freem(ca->m);
6804 SCTP_FREE(ca, SCTP_M_COPYAL);
6807 static struct mbuf *
6808 sctp_copy_out_all(struct uio *uio, int len)
6810 struct mbuf *ret, *at;
6811 int left, willcpy, cancpy, error;
6813 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6819 SCTP_BUF_LEN(ret) = 0;
6820 /* save space for the data chunk header */
6821 cancpy = (int)M_TRAILINGSPACE(ret);
6822 willcpy = min(cancpy, left);
6825 /* Align data to the end */
6826 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6832 SCTP_BUF_LEN(at) = willcpy;
6833 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6836 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
6837 if (SCTP_BUF_NEXT(at) == NULL) {
6840 at = SCTP_BUF_NEXT(at);
6841 SCTP_BUF_LEN(at) = 0;
6842 cancpy = (int)M_TRAILINGSPACE(at);
6843 willcpy = min(cancpy, left);
6850 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6851 struct sctp_sndrcvinfo *srcv)
6854 struct sctp_copy_all *ca;
6856 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6860 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6863 memset(ca, 0, sizeof(struct sctp_copy_all));
6867 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6870 * take off the sendall flag, it would be bad if we failed to do
6873 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6874 /* get length and mbuf chain */
6876 ca->sndlen = (int)uio->uio_resid;
6877 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6878 if (ca->m == NULL) {
6879 SCTP_FREE(ca, SCTP_M_COPYAL);
6880 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6884 /* Gather the length of the send */
6888 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6889 ca->sndlen += SCTP_BUF_LEN(mat);
6892 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6893 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6894 SCTP_ASOC_ANY_STATE,
6896 sctp_sendall_completes, inp, 1);
6898 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6899 SCTP_FREE(ca, SCTP_M_COPYAL);
6900 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6908 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6910 struct sctp_tmit_chunk *chk, *nchk;
6912 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6913 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6914 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6915 asoc->ctrl_queue_cnt--;
6917 sctp_m_freem(chk->data);
6920 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6926 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6928 struct sctp_association *asoc;
6929 struct sctp_tmit_chunk *chk, *nchk;
6930 struct sctp_asconf_chunk *acp;
6933 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6934 /* find SCTP_ASCONF chunk in queue */
6935 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6937 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6938 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6943 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6944 asoc->ctrl_queue_cnt--;
6946 sctp_m_freem(chk->data);
6949 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6956 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6957 struct sctp_association *asoc,
6958 struct sctp_tmit_chunk **data_list,
6960 struct sctp_nets *net)
6963 struct sctp_tmit_chunk *tp1;
6965 for (i = 0; i < bundle_at; i++) {
6966 /* off of the send queue */
6967 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6968 asoc->send_queue_cnt--;
6971 * Any chunk NOT 0 you zap the time chunk 0 gets
6972 * zapped or set based on if a RTO measurment is
6975 data_list[i]->do_rtt = 0;
6978 data_list[i]->sent_rcv_time = net->last_sent_time;
6979 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6980 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
6981 if (data_list[i]->whoTo == NULL) {
6982 data_list[i]->whoTo = net;
6983 atomic_add_int(&net->ref_count, 1);
6985 /* on to the sent queue */
6986 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6987 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
6988 struct sctp_tmit_chunk *tpp;
6990 /* need to move back */
6992 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6994 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6998 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7001 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7003 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7008 /* This does not lower until the cum-ack passes it */
7009 asoc->sent_queue_cnt++;
7010 if ((asoc->peers_rwnd <= 0) &&
7011 (asoc->total_flight == 0) &&
7013 /* Mark the chunk as being a window probe */
7014 SCTP_STAT_INCR(sctps_windowprobed);
7016 #ifdef SCTP_AUDITING_ENABLED
7017 sctp_audit_log(0xC2, 3);
7019 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7020 data_list[i]->snd_count = 1;
7021 data_list[i]->rec.data.chunk_was_revoked = 0;
7022 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7023 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7024 data_list[i]->whoTo->flight_size,
7025 data_list[i]->book_size,
7026 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7027 data_list[i]->rec.data.tsn);
7029 sctp_flight_size_increase(data_list[i]);
7030 sctp_total_flight_increase(stcb, data_list[i]);
7031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7032 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7033 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7035 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7036 (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7037 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7038 /* SWS sender side engages */
7039 asoc->peers_rwnd = 0;
7042 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7043 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7048 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7049 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7054 struct sctp_tmit_chunk *chk, *nchk;
7056 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7057 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7058 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7059 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7060 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7061 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7062 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7063 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7064 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7065 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7066 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7067 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7068 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7069 /* Stray chunks must be cleaned up */
7071 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7072 asoc->ctrl_queue_cnt--;
7074 sctp_m_freem(chk->data);
7077 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7078 asoc->fwd_tsn_cnt--;
7080 sctp_free_a_chunk(stcb, chk, so_locked);
7081 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7082 /* special handling, we must look into the param */
7083 if (chk != asoc->str_reset) {
7084 goto clean_up_anyway;
7091 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7092 uint32_t space_left, uint32_t frag_point, int eeor_on)
7095 * Make a decision on if I should split a msg into multiple parts.
7096 * This is only asked of incomplete messages.
7100 * If we are doing EEOR we need to always send it if its the
7101 * entire thing, since it might be all the guy is putting in
7104 if (space_left >= length) {
7106 * If we have data outstanding,
7107 * we get another chance when the sack
7108 * arrives to transmit - wait for more data
7110 if (stcb->asoc.total_flight == 0) {
7112 * If nothing is in flight, we zero the
7120 /* You can fill the rest */
7121 return (space_left);
7125 * For those strange folk that make the send buffer
7126 * smaller than our fragmentation point, we can't
7127 * get a full msg in so we have to allow splitting.
7129 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7132 if ((length <= space_left) ||
7133 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7134 /* Sub-optimial residual don't split in non-eeor mode. */
7138 * If we reach here length is larger than the space_left. Do we wish
7139 * to split it for the sake of packet putting together?
7141 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7142 /* Its ok to split it */
7143 return (min(space_left, frag_point));
7145 /* Nope, can't split */
7150 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7151 struct sctp_stream_out *strq,
7152 uint32_t space_left,
7153 uint32_t frag_point,
7158 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7163 /* Move from the stream to the send_queue keeping track of the total */
7164 struct sctp_association *asoc;
7165 struct sctp_stream_queue_pending *sp;
7166 struct sctp_tmit_chunk *chk;
7167 struct sctp_data_chunk *dchkh = NULL;
7168 struct sctp_idata_chunk *ndchkh = NULL;
7169 uint32_t to_move, length;
7171 uint8_t rcv_flags = 0;
7173 uint8_t send_lock_up = 0;
7175 SCTP_TCB_LOCK_ASSERT(stcb);
7178 /* sa_ignore FREED_MEMORY */
7179 sp = TAILQ_FIRST(&strq->outqueue);
7181 if (send_lock_up == 0) {
7182 SCTP_TCB_SEND_LOCK(stcb);
7185 sp = TAILQ_FIRST(&strq->outqueue);
7189 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7190 (stcb->asoc.idata_supported == 0) &&
7191 (strq->last_msg_incomplete)) {
7192 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7194 strq->last_msg_incomplete);
7195 strq->last_msg_incomplete = 0;
7199 SCTP_TCB_SEND_UNLOCK(stcb);
7204 if ((sp->msg_is_complete) && (sp->length == 0)) {
7205 if (sp->sender_all_done) {
7207 * We are doing deferred cleanup. Last time through
7208 * when we took all the data the sender_all_done was
7211 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7212 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7213 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7214 sp->sender_all_done,
7216 sp->msg_is_complete,
7220 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7221 SCTP_TCB_SEND_LOCK(stcb);
7224 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7225 TAILQ_REMOVE(&strq->outqueue, sp, next);
7226 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7227 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7228 (strq->chunks_on_queues == 0) &&
7229 TAILQ_EMPTY(&strq->outqueue)) {
7230 stcb->asoc.trigger_reset = 1;
7233 sctp_free_remote_addr(sp->net);
7237 sctp_m_freem(sp->data);
7240 sctp_free_a_strmoq(stcb, sp, so_locked);
7241 /* we can't be locked to it */
7243 SCTP_TCB_SEND_UNLOCK(stcb);
7246 /* back to get the next msg */
7250 * sender just finished this but still holds a
7258 /* is there some to get */
7259 if (sp->length == 0) {
7264 } else if (sp->discard_rest) {
7265 if (send_lock_up == 0) {
7266 SCTP_TCB_SEND_LOCK(stcb);
7269 /* Whack down the size */
7270 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7271 if ((stcb->sctp_socket != NULL) &&
7272 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7273 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7274 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7277 sctp_m_freem(sp->data);
7279 sp->tail_mbuf = NULL;
7288 some_taken = sp->some_taken;
7290 length = sp->length;
7291 if (sp->msg_is_complete) {
7292 /* The message is complete */
7293 to_move = min(length, frag_point);
7294 if (to_move == length) {
7295 /* All of it fits in the MTU */
7296 if (sp->some_taken) {
7297 rcv_flags |= SCTP_DATA_LAST_FRAG;
7299 rcv_flags |= SCTP_DATA_NOT_FRAG;
7301 sp->put_last_out = 1;
7302 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7303 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7306 /* Not all of it fits, we fragment */
7307 if (sp->some_taken == 0) {
7308 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7313 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7316 * We use a snapshot of length in case it
7317 * is expanding during the compare.
7322 if (to_move >= llen) {
7324 if (send_lock_up == 0) {
7326 * We are taking all of an incomplete msg
7327 * thus we need a send lock.
7329 SCTP_TCB_SEND_LOCK(stcb);
7331 if (sp->msg_is_complete) {
7333 * the sender finished the
7340 if (sp->some_taken == 0) {
7341 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7345 /* Nothing to take. */
7352 /* If we reach here, we can copy out a chunk */
7353 sctp_alloc_a_chunk(stcb, chk);
7355 /* No chunk memory */
7361 * Setup for unordered if needed by looking at the user sent info
7364 if (sp->sinfo_flags & SCTP_UNORDERED) {
7365 rcv_flags |= SCTP_DATA_UNORDERED;
7367 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7368 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7369 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7371 /* clear out the chunk before setting up */
7372 memset(chk, 0, sizeof(*chk));
7373 chk->rec.data.rcv_flags = rcv_flags;
7375 if (to_move >= length) {
7376 /* we think we can steal the whole thing */
7377 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7378 SCTP_TCB_SEND_LOCK(stcb);
7381 if (to_move < sp->length) {
7382 /* bail, it changed */
7385 chk->data = sp->data;
7386 chk->last_mbuf = sp->tail_mbuf;
7387 /* register the stealing */
7388 sp->data = sp->tail_mbuf = NULL;
7393 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7394 chk->last_mbuf = NULL;
7395 if (chk->data == NULL) {
7396 sp->some_taken = some_taken;
7397 sctp_free_a_chunk(stcb, chk, so_locked);
7402 #ifdef SCTP_MBUF_LOGGING
7403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7404 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7407 /* Pull off the data */
7408 m_adj(sp->data, to_move);
7409 /* Now lets work our way down and compact it */
7411 while (m && (SCTP_BUF_LEN(m) == 0)) {
7412 sp->data = SCTP_BUF_NEXT(m);
7413 SCTP_BUF_NEXT(m) = NULL;
7414 if (sp->tail_mbuf == m) {
7416 * Freeing tail? TSNH since
7417 * we supposedly were taking less
7418 * than the sp->length.
7421 panic("Huh, freing tail? - TSNH");
7423 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7424 sp->tail_mbuf = sp->data = NULL;
7433 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7434 chk->copy_by_ref = 1;
7436 chk->copy_by_ref = 0;
7439 * get last_mbuf and counts of mb usage This is ugly but hopefully
7440 * its only one mbuf.
7442 if (chk->last_mbuf == NULL) {
7443 chk->last_mbuf = chk->data;
7444 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7445 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7449 if (to_move > length) {
7450 /*- This should not happen either
7451 * since we always lower to_move to the size
7452 * of sp->length if its larger.
7455 panic("Huh, how can to_move be larger?");
7457 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7461 atomic_subtract_int(&sp->length, to_move);
7463 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
7464 if (M_LEADINGSPACE(chk->data) < leading) {
7465 /* Not enough room for a chunk header, get some */
7468 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
7471 * we're in trouble here. _PREPEND below will free
7472 * all the data if there is no leading space, so we
7473 * must put the data back and restore.
7475 if (send_lock_up == 0) {
7476 SCTP_TCB_SEND_LOCK(stcb);
7479 if (sp->data == NULL) {
7480 /* unsteal the data */
7481 sp->data = chk->data;
7482 sp->tail_mbuf = chk->last_mbuf;
7486 /* reassemble the data */
7488 sp->data = chk->data;
7489 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7491 sp->some_taken = some_taken;
7492 atomic_add_int(&sp->length, to_move);
7495 sctp_free_a_chunk(stcb, chk, so_locked);
7499 SCTP_BUF_LEN(m) = 0;
7500 SCTP_BUF_NEXT(m) = chk->data;
7502 M_ALIGN(chk->data, 4);
7505 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
7506 if (chk->data == NULL) {
7507 /* HELP, TSNH since we assured it would not above? */
7509 panic("prepend failes HELP?");
7511 SCTP_PRINTF("prepend fails HELP?\n");
7512 sctp_free_a_chunk(stcb, chk, so_locked);
7518 sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
7519 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
7520 chk->book_size_scale = 0;
7521 chk->sent = SCTP_DATAGRAM_UNSENT;
7524 chk->asoc = &stcb->asoc;
7525 chk->pad_inplace = 0;
7526 chk->no_fr_allowed = 0;
7527 if (stcb->asoc.idata_supported == 0) {
7528 if (rcv_flags & SCTP_DATA_UNORDERED) {
7529 /* Just use 0. The receiver ignores the values. */
7530 chk->rec.data.mid = 0;
7532 chk->rec.data.mid = strq->next_mid_ordered;
7533 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7534 strq->next_mid_ordered++;
7538 if (rcv_flags & SCTP_DATA_UNORDERED) {
7539 chk->rec.data.mid = strq->next_mid_unordered;
7540 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7541 strq->next_mid_unordered++;
7544 chk->rec.data.mid = strq->next_mid_ordered;
7545 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7546 strq->next_mid_ordered++;
7550 chk->rec.data.sid = sp->sid;
7551 chk->rec.data.ppid = sp->ppid;
7552 chk->rec.data.context = sp->context;
7553 chk->rec.data.doing_fast_retransmit = 0;
7555 chk->rec.data.timetodrop = sp->ts;
7556 chk->flags = sp->act_flags;
7559 chk->whoTo = sp->net;
7560 atomic_add_int(&chk->whoTo->ref_count, 1);
7564 if (sp->holds_key_ref) {
7565 chk->auth_keyid = sp->auth_keyid;
7566 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7567 chk->holds_key_ref = 1;
7569 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
7570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7571 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7572 (uint32_t)(uintptr_t)stcb, sp->length,
7573 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
7576 if (stcb->asoc.idata_supported == 0) {
7577 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7579 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7582 * Put the rest of the things in place now. Size was done earlier in
7583 * previous loop prior to padding.
7586 #ifdef SCTP_ASOCLOG_OF_TSNS
7587 SCTP_TCB_LOCK_ASSERT(stcb);
7588 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7589 asoc->tsn_out_at = 0;
7590 asoc->tsn_out_wrapped = 1;
7592 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
7593 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
7594 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
7595 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7596 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7597 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7598 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7599 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7602 if (stcb->asoc.idata_supported == 0) {
7603 dchkh->ch.chunk_type = SCTP_DATA;
7604 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7605 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
7606 dchkh->dp.sid = htons(strq->sid);
7607 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
7608 dchkh->dp.ppid = chk->rec.data.ppid;
7609 dchkh->ch.chunk_length = htons(chk->send_size);
7611 ndchkh->ch.chunk_type = SCTP_IDATA;
7612 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7613 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
7614 ndchkh->dp.sid = htons(strq->sid);
7615 ndchkh->dp.reserved = htons(0);
7616 ndchkh->dp.mid = htonl(chk->rec.data.mid);
7618 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
7620 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
7622 ndchkh->ch.chunk_length = htons(chk->send_size);
7624 /* Now advance the chk->send_size by the actual pad needed. */
7625 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7630 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7631 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7633 chk->last_mbuf = lm;
7634 chk->pad_inplace = 1;
7636 chk->send_size += pads;
7638 if (PR_SCTP_ENABLED(chk->flags)) {
7639 asoc->pr_sctp_cnt++;
7641 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7642 /* All done pull and kill the message */
7643 if (sp->put_last_out == 0) {
7644 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7645 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7646 sp->sender_all_done,
7648 sp->msg_is_complete,
7652 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7653 SCTP_TCB_SEND_LOCK(stcb);
7656 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7657 TAILQ_REMOVE(&strq->outqueue, sp, next);
7658 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7659 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7660 (strq->chunks_on_queues == 0) &&
7661 TAILQ_EMPTY(&strq->outqueue)) {
7662 stcb->asoc.trigger_reset = 1;
7665 sctp_free_remote_addr(sp->net);
7669 sctp_m_freem(sp->data);
7672 sctp_free_a_strmoq(stcb, sp, so_locked);
7674 asoc->chunks_on_out_queue++;
7675 strq->chunks_on_queues++;
7676 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7677 asoc->send_queue_cnt++;
7680 SCTP_TCB_SEND_UNLOCK(stcb);
7687 sctp_fill_outqueue(struct sctp_tcb *stcb,
7688 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7689 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7694 struct sctp_association *asoc;
7695 struct sctp_stream_out *strq;
7696 uint32_t space_left, moved, total_moved;
7699 SCTP_TCB_LOCK_ASSERT(stcb);
7702 switch (net->ro._l_addr.sa.sa_family) {
7705 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
7710 space_left = net->mtu - SCTP_MIN_OVERHEAD;
7715 space_left = net->mtu;
7718 /* Need an allowance for the data chunk header too */
7719 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7721 /* must make even word boundary */
7722 space_left &= 0xfffffffc;
7723 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7726 while ((space_left > 0) && (strq != NULL)) {
7727 moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
7728 &giveup, eeor_mode, &bail, so_locked);
7729 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
7730 if ((giveup != 0) || (bail != 0)) {
7733 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7734 total_moved += moved;
7735 space_left -= moved;
7736 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
7737 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7741 space_left &= 0xfffffffc;
7746 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7748 if (total_moved == 0) {
7749 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7750 (net == stcb->asoc.primary_destination)) {
7751 /* ran dry for primary network net */
7752 SCTP_STAT_INCR(sctps_primary_randry);
7753 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7754 /* ran dry with CMT on */
7755 SCTP_STAT_INCR(sctps_cmt_randry);
7761 sctp_fix_ecn_echo(struct sctp_association *asoc)
7763 struct sctp_tmit_chunk *chk;
7765 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7766 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7767 chk->sent = SCTP_DATAGRAM_UNSENT;
7773 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7775 struct sctp_association *asoc;
7776 struct sctp_tmit_chunk *chk;
7777 struct sctp_stream_queue_pending *sp;
7784 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7785 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7786 if (sp->net == net) {
7787 sctp_free_remote_addr(sp->net);
7792 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7793 if (chk->whoTo == net) {
7794 sctp_free_remote_addr(chk->whoTo);
7801 sctp_med_chunk_output(struct sctp_inpcb *inp,
7802 struct sctp_tcb *stcb,
7803 struct sctp_association *asoc,
7806 int control_only, int from_where,
7807 struct timeval *now, int *now_filled, int frag_point, int so_locked
7808 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7814 * Ok this is the generic chunk service queue. we must do the
7816 * - Service the stream queue that is next, moving any
7817 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7818 * LAST to the out queue in one pass) and assigning TSN's. This
7819 * only applys though if the peer does not support NDATA. For NDATA
7820 * chunks its ok to not send the entire message ;-)
7821 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
7822 * fomulate and send the low level chunks. Making sure to combine
7823 * any control in the control chunk queue also.
7825 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7826 struct mbuf *outchain, *endoutchain;
7827 struct sctp_tmit_chunk *chk, *nchk;
7829 /* temp arrays for unlinking */
7830 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7831 int no_fragmentflg, error;
7832 unsigned int max_rwnd_per_dest, max_send_per_dest;
7833 int one_chunk, hbflag, skip_data_for_this_net;
7834 int asconf, cookie, no_out_cnt;
7835 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7836 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7838 uint32_t auth_offset = 0;
7839 struct sctp_auth_chunk *auth = NULL;
7840 uint16_t auth_keyid;
7841 int override_ok = 1;
7842 int skip_fill_up = 0;
7843 int data_auth_reqd = 0;
7846 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7853 auth_keyid = stcb->asoc.authinfo.active_keyid;
7854 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7855 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7856 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7861 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7863 * First lets prime the pump. For each destination, if there is room
7864 * in the flight size, attempt to pull an MTU's worth out of the
7865 * stream queues into the general send_queue
7867 #ifdef SCTP_AUDITING_ENABLED
7868 sctp_audit_log(0xC2, 2);
7870 SCTP_TCB_LOCK_ASSERT(stcb);
7877 /* Nothing to possible to send? */
7878 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7879 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7880 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7881 TAILQ_EMPTY(&asoc->send_queue) &&
7882 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
7887 if (asoc->peers_rwnd == 0) {
7888 /* No room in peers rwnd */
7890 if (asoc->total_flight > 0) {
7891 /* we are allowed one chunk in flight */
7895 if (stcb->asoc.ecn_echo_cnt_onq) {
7896 /* Record where a sack goes, if any */
7897 if (no_data_chunks &&
7898 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7899 /* Nothing but ECNe to send - we don't do that */
7900 goto nothing_to_send;
7902 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7903 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7904 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7905 sack_goes_to = chk->whoTo;
7910 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7911 if (stcb->sctp_socket)
7912 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7914 max_send_per_dest = 0;
7915 if (no_data_chunks == 0) {
7916 /* How many non-directed chunks are there? */
7917 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7918 if (chk->whoTo == NULL) {
7920 * We already have non-directed chunks on
7921 * the queue, no need to do a fill-up.
7929 if ((no_data_chunks == 0) &&
7930 (skip_fill_up == 0) &&
7931 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7932 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7934 * This for loop we are in takes in each net, if
7935 * its's got space in cwnd and has data sent to it
7936 * (when CMT is off) then it calls
7937 * sctp_fill_outqueue for the net. This gets data on
7938 * the send queue for that network.
7940 * In sctp_fill_outqueue TSN's are assigned and data
7941 * is copied out of the stream buffers. Note mostly
7942 * copy by reference (we hope).
7944 net->window_probe = 0;
7945 if ((net != stcb->asoc.alternate) &&
7946 ((net->dest_state & SCTP_ADDR_PF) ||
7947 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7948 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7949 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7950 sctp_log_cwnd(stcb, net, 1,
7951 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7955 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7956 (net->flight_size == 0)) {
7957 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7959 if (net->flight_size >= net->cwnd) {
7960 /* skip this network, no room - can't fill */
7961 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7962 sctp_log_cwnd(stcb, net, 3,
7963 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7967 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7968 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7970 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7972 /* memory alloc failure */
7978 /* now service each destination and send out what we can for it */
7979 /* Nothing to send? */
7980 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7981 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7982 TAILQ_EMPTY(&asoc->send_queue)) {
7987 if (asoc->sctp_cmt_on_off > 0) {
7988 /* get the last start point */
7989 start_at = asoc->last_net_cmt_send_started;
7990 if (start_at == NULL) {
7991 /* null so to beginning */
7992 start_at = TAILQ_FIRST(&asoc->nets);
7994 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7995 if (start_at == NULL) {
7996 start_at = TAILQ_FIRST(&asoc->nets);
7999 asoc->last_net_cmt_send_started = start_at;
8001 start_at = TAILQ_FIRST(&asoc->nets);
8003 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8004 if (chk->whoTo == NULL) {
8005 if (asoc->alternate) {
8006 chk->whoTo = asoc->alternate;
8008 chk->whoTo = asoc->primary_destination;
8010 atomic_add_int(&chk->whoTo->ref_count, 1);
8013 old_start_at = NULL;
8014 again_one_more_time:
8015 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8016 /* how much can we send? */
8017 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8018 if (old_start_at && (old_start_at == net)) {
8019 /* through list ocmpletely. */
8023 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8024 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8025 (net->flight_size >= net->cwnd)) {
8027 * Nothing on control or asconf and flight is full,
8028 * we can skip even in the CMT case.
8033 endoutchain = outchain = NULL;
8036 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8037 skip_data_for_this_net = 1;
8039 skip_data_for_this_net = 0;
8041 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8044 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8049 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8059 if (mtu > asoc->peers_rwnd) {
8060 if (asoc->total_flight > 0) {
8061 /* We have a packet in flight somewhere */
8062 r_mtu = asoc->peers_rwnd;
8064 /* We are always allowed to send one MTU out */
8072 /************************/
8073 /* ASCONF transmission */
8074 /************************/
8075 /* Now first lets go through the asconf queue */
8076 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8077 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8080 if (chk->whoTo == NULL) {
8081 if (asoc->alternate == NULL) {
8082 if (asoc->primary_destination != net) {
8086 if (asoc->alternate != net) {
8091 if (chk->whoTo != net) {
8095 if (chk->data == NULL) {
8098 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8099 chk->sent != SCTP_DATAGRAM_RESEND) {
8103 * if no AUTH is yet included and this chunk
8104 * requires it, make sure to account for it. We
8105 * don't apply the size until the AUTH chunk is
8106 * actually added below in case there is no room for
8107 * this chunk. NOTE: we overload the use of "omtu"
8110 if ((auth == NULL) &&
8111 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8112 stcb->asoc.peer_auth_chunks)) {
8113 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8116 /* Here we do NOT factor the r_mtu */
8117 if ((chk->send_size < (int)(mtu - omtu)) ||
8118 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8120 * We probably should glom the mbuf chain
8121 * from the chk->data for control but the
8122 * problem is it becomes yet one more level
8123 * of tracking to do if for some reason
8124 * output fails. Then I have got to
8125 * reconstruct the merged control chain.. el
8126 * yucko.. for now we take the easy way and
8130 * Add an AUTH chunk, if chunk requires it
8131 * save the offset into the chain for AUTH
8133 if ((auth == NULL) &&
8134 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8135 stcb->asoc.peer_auth_chunks))) {
8136 outchain = sctp_add_auth_chunk(outchain,
8141 chk->rec.chunk_id.id);
8142 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8144 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8145 (int)chk->rec.chunk_id.can_take_data,
8146 chk->send_size, chk->copy_by_ref);
8147 if (outchain == NULL) {
8149 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8152 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8153 /* update our MTU size */
8154 if (mtu > (chk->send_size + omtu))
8155 mtu -= (chk->send_size + omtu);
8158 to_out += (chk->send_size + omtu);
8159 /* Do clear IP_DF ? */
8160 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8163 if (chk->rec.chunk_id.can_take_data)
8166 * set hb flag since we can use these for
8172 * should sysctl this: don't bundle data
8173 * with ASCONF since it requires AUTH
8176 chk->sent = SCTP_DATAGRAM_SENT;
8177 if (chk->whoTo == NULL) {
8179 atomic_add_int(&net->ref_count, 1);
8184 * Ok we are out of room but we can
8185 * output without effecting the
8186 * flight size since this little guy
8187 * is a control only packet.
8189 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8191 * do NOT clear the asconf flag as
8192 * it is used to do appropriate
8193 * source address selection.
8195 if (*now_filled == 0) {
8196 (void)SCTP_GETTIME_TIMEVAL(now);
8199 net->last_sent_time = *now;
8201 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8202 (struct sockaddr *)&net->ro._l_addr,
8203 outchain, auth_offset, auth,
8204 stcb->asoc.authinfo.active_keyid,
8205 no_fragmentflg, 0, asconf,
8206 inp->sctp_lport, stcb->rport,
8207 htonl(stcb->asoc.peer_vtag),
8212 * error, we could not
8215 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8216 if (from_where == 0) {
8217 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8219 if (error == ENOBUFS) {
8220 asoc->ifp_had_enobuf = 1;
8221 SCTP_STAT_INCR(sctps_lowlevelerr);
8223 /* error, could not output */
8224 if (error == EHOSTUNREACH) {
8230 sctp_move_chunks_from_net(stcb, net);
8235 asoc->ifp_had_enobuf = 0;
8238 * increase the number we sent, if a
8239 * cookie is sent we don't tell them
8242 outchain = endoutchain = NULL;
8246 *num_out += ctl_cnt;
8247 /* recalc a clean slate and setup */
8248 switch (net->ro._l_addr.sa.sa_family) {
8251 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8256 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8273 /************************/
8274 /* Control transmission */
8275 /************************/
8276 /* Now first lets go through the control queue */
8277 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8278 if ((sack_goes_to) &&
8279 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8280 (chk->whoTo != sack_goes_to)) {
8282 * if we have a sack in queue, and we are
8283 * looking at an ecn echo that is NOT queued
8284 * to where the sack is going..
8286 if (chk->whoTo == net) {
8288 * Don't transmit it to where its
8289 * going (current net)
8292 } else if (sack_goes_to == net) {
8294 * But do transmit it to this
8297 goto skip_net_check;
8300 if (chk->whoTo == NULL) {
8301 if (asoc->alternate == NULL) {
8302 if (asoc->primary_destination != net) {
8306 if (asoc->alternate != net) {
8311 if (chk->whoTo != net) {
8316 if (chk->data == NULL) {
8319 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8321 * It must be unsent. Cookies and ASCONF's
8322 * hang around but there timers will force
8323 * when marked for resend.
8328 * if no AUTH is yet included and this chunk
8329 * requires it, make sure to account for it. We
8330 * don't apply the size until the AUTH chunk is
8331 * actually added below in case there is no room for
8332 * this chunk. NOTE: we overload the use of "omtu"
8335 if ((auth == NULL) &&
8336 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8337 stcb->asoc.peer_auth_chunks)) {
8338 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8341 /* Here we do NOT factor the r_mtu */
8342 if ((chk->send_size <= (int)(mtu - omtu)) ||
8343 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8345 * We probably should glom the mbuf chain
8346 * from the chk->data for control but the
8347 * problem is it becomes yet one more level
8348 * of tracking to do if for some reason
8349 * output fails. Then I have got to
8350 * reconstruct the merged control chain.. el
8351 * yucko.. for now we take the easy way and
8355 * Add an AUTH chunk, if chunk requires it
8356 * save the offset into the chain for AUTH
8358 if ((auth == NULL) &&
8359 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8360 stcb->asoc.peer_auth_chunks))) {
8361 outchain = sctp_add_auth_chunk(outchain,
8366 chk->rec.chunk_id.id);
8367 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8369 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8370 (int)chk->rec.chunk_id.can_take_data,
8371 chk->send_size, chk->copy_by_ref);
8372 if (outchain == NULL) {
8374 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8377 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8378 /* update our MTU size */
8379 if (mtu > (chk->send_size + omtu))
8380 mtu -= (chk->send_size + omtu);
8383 to_out += (chk->send_size + omtu);
8384 /* Do clear IP_DF ? */
8385 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8388 if (chk->rec.chunk_id.can_take_data)
8390 /* Mark things to be removed, if needed */
8391 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8392 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8393 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8394 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8395 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8396 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8397 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8398 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8399 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8400 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8401 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8402 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8405 /* remove these chunks at the end */
8406 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8407 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8408 /* turn off the timer */
8409 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8410 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8412 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8418 * Other chunks, since they have
8419 * timers running (i.e. COOKIE) we
8420 * just "trust" that it gets sent or
8424 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8427 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8429 * Increment ecne send count
8430 * here this means we may be
8431 * over-zealous in our
8432 * counting if the send
8433 * fails, but its the best
8434 * place to do it (we used
8435 * to do it in the queue of
8436 * the chunk, but that did
8437 * not tell how many times
8440 SCTP_STAT_INCR(sctps_sendecne);
8442 chk->sent = SCTP_DATAGRAM_SENT;
8443 if (chk->whoTo == NULL) {
8445 atomic_add_int(&net->ref_count, 1);
8451 * Ok we are out of room but we can
8452 * output without effecting the
8453 * flight size since this little guy
8454 * is a control only packet.
8457 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8459 * do NOT clear the asconf
8460 * flag as it is used to do
8461 * appropriate source
8462 * address selection.
8466 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8469 /* Only HB or ASCONF advances time */
8471 if (*now_filled == 0) {
8472 (void)SCTP_GETTIME_TIMEVAL(now);
8475 net->last_sent_time = *now;
8478 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8479 (struct sockaddr *)&net->ro._l_addr,
8482 stcb->asoc.authinfo.active_keyid,
8483 no_fragmentflg, 0, asconf,
8484 inp->sctp_lport, stcb->rport,
8485 htonl(stcb->asoc.peer_vtag),
8490 * error, we could not
8493 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8494 if (from_where == 0) {
8495 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8497 if (error == ENOBUFS) {
8498 asoc->ifp_had_enobuf = 1;
8499 SCTP_STAT_INCR(sctps_lowlevelerr);
8501 if (error == EHOSTUNREACH) {
8507 sctp_move_chunks_from_net(stcb, net);
8512 asoc->ifp_had_enobuf = 0;
8515 * increase the number we sent, if a
8516 * cookie is sent we don't tell them
8519 outchain = endoutchain = NULL;
8523 *num_out += ctl_cnt;
8524 /* recalc a clean slate and setup */
8525 switch (net->ro._l_addr.sa.sa_family) {
8528 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8533 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8550 /* JRI: if dest is in PF state, do not send data to it */
8551 if ((asoc->sctp_cmt_on_off > 0) &&
8552 (net != stcb->asoc.alternate) &&
8553 (net->dest_state & SCTP_ADDR_PF)) {
8556 if (net->flight_size >= net->cwnd) {
8559 if ((asoc->sctp_cmt_on_off > 0) &&
8560 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8561 (net->flight_size > max_rwnd_per_dest)) {
8565 * We need a specific accounting for the usage of the send
8566 * buffer. We also need to check the number of messages per
8567 * net. For now, this is better than nothing and it disabled
8570 if ((asoc->sctp_cmt_on_off > 0) &&
8571 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8572 (max_send_per_dest > 0) &&
8573 (net->flight_size > max_send_per_dest)) {
8576 /*********************/
8577 /* Data transmission */
8578 /*********************/
8580 * if AUTH for DATA is required and no AUTH has been added
8581 * yet, account for this in the mtu now... if no data can be
8582 * bundled, this adjustment won't matter anyways since the
8583 * packet will be going out...
8585 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8586 stcb->asoc.peer_auth_chunks);
8587 if (data_auth_reqd && (auth == NULL)) {
8588 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8590 /* now lets add any data within the MTU constraints */
8591 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8594 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8595 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8602 if (net->mtu > SCTP_MIN_OVERHEAD)
8603 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8613 if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
8614 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8615 (skip_data_for_this_net == 0)) ||
8617 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8618 if (no_data_chunks) {
8619 /* let only control go out */
8623 if (net->flight_size >= net->cwnd) {
8624 /* skip this net, no room for data */
8628 if ((chk->whoTo != NULL) &&
8629 (chk->whoTo != net)) {
8630 /* Don't send the chunk on this net */
8634 if (asoc->sctp_cmt_on_off == 0) {
8635 if ((asoc->alternate) &&
8636 (asoc->alternate != net) &&
8637 (chk->whoTo == NULL)) {
8639 } else if ((net != asoc->primary_destination) &&
8640 (asoc->alternate == NULL) &&
8641 (chk->whoTo == NULL)) {
8645 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8647 * strange, we have a chunk that is
8648 * to big for its destination and
8649 * yet no fragment ok flag.
8650 * Something went wrong when the
8651 * PMTU changed...we did not mark
8652 * this chunk for some reason?? I
8653 * will fix it here by letting IP
8654 * fragment it for now and printing
8655 * a warning. This really should not
8658 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8659 chk->send_size, mtu);
8660 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8662 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8663 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
8664 struct sctp_data_chunk *dchkh;
8666 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8667 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8669 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8670 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8671 /* ok we will add this one */
8674 * Add an AUTH chunk, if chunk
8675 * requires it, save the offset into
8676 * the chain for AUTH
8678 if (data_auth_reqd) {
8680 outchain = sctp_add_auth_chunk(outchain,
8686 auth_keyid = chk->auth_keyid;
8688 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8689 } else if (override_ok) {
8694 auth_keyid = chk->auth_keyid;
8696 } else if (auth_keyid != chk->auth_keyid) {
8704 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8705 chk->send_size, chk->copy_by_ref);
8706 if (outchain == NULL) {
8707 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8708 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8709 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8712 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8715 /* upate our MTU size */
8716 /* Do clear IP_DF ? */
8717 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8720 /* unsigned subtraction of mtu */
8721 if (mtu > chk->send_size)
8722 mtu -= chk->send_size;
8725 /* unsigned subtraction of r_mtu */
8726 if (r_mtu > chk->send_size)
8727 r_mtu -= chk->send_size;
8731 to_out += chk->send_size;
8732 if ((to_out > mx_mtu) && no_fragmentflg) {
8734 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8736 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8740 chk->window_probe = 0;
8741 data_list[bundle_at++] = chk;
8742 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8745 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8746 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8747 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8749 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8751 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8752 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8762 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8764 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8765 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8766 data_list[0]->window_probe = 1;
8767 net->window_probe = 1;
8773 * Must be sent in order of the
8774 * TSN's (on a network)
8778 } /* for (chunk gather loop for this net) */
8779 } /* if asoc.state OPEN */
8781 /* Is there something to send for this destination? */
8783 /* We may need to start a control timer or two */
8785 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8788 * do NOT clear the asconf flag as it is
8789 * used to do appropriate source address
8794 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8797 /* must start a send timer if data is being sent */
8798 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8800 * no timer running on this destination
8803 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8805 if (bundle_at || hbflag) {
8806 /* For data/asconf and hb set time */
8807 if (*now_filled == 0) {
8808 (void)SCTP_GETTIME_TIMEVAL(now);
8811 net->last_sent_time = *now;
8813 /* Now send it, if there is anything to send :> */
8814 if ((error = sctp_lowlevel_chunk_output(inp,
8817 (struct sockaddr *)&net->ro._l_addr,
8825 inp->sctp_lport, stcb->rport,
8826 htonl(stcb->asoc.peer_vtag),
8830 /* error, we could not output */
8831 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8832 if (from_where == 0) {
8833 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8835 if (error == ENOBUFS) {
8836 asoc->ifp_had_enobuf = 1;
8837 SCTP_STAT_INCR(sctps_lowlevelerr);
8839 if (error == EHOSTUNREACH) {
8841 * Destination went unreachable
8844 sctp_move_chunks_from_net(stcb, net);
8848 * I add this line to be paranoid. As far as
8849 * I can tell the continue, takes us back to
8850 * the top of the for, but just to make sure
8851 * I will reset these again here.
8853 ctl_cnt = bundle_at = 0;
8854 continue; /* This takes us back to the
8855 * for() for the nets. */
8857 asoc->ifp_had_enobuf = 0;
8863 *num_out += (ctl_cnt + bundle_at);
8866 /* setup for a RTO measurement */
8867 tsns_sent = data_list[0]->rec.data.tsn;
8868 /* fill time if not already filled */
8869 if (*now_filled == 0) {
8870 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8872 *now = asoc->time_last_sent;
8874 asoc->time_last_sent = *now;
8876 if (net->rto_needed) {
8877 data_list[0]->do_rtt = 1;
8878 net->rto_needed = 0;
8880 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8881 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8887 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8888 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8891 if (old_start_at == NULL) {
8892 old_start_at = start_at;
8893 start_at = TAILQ_FIRST(&asoc->nets);
8895 goto again_one_more_time;
8899 * At the end there should be no NON timed chunks hanging on this
8902 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8903 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8905 if ((*num_out == 0) && (*reason_code == 0)) {
8910 sctp_clean_up_ctl(stcb, asoc, so_locked);
8915 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8918 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8919 * the control chunk queue.
8921 struct sctp_chunkhdr *hdr;
8922 struct sctp_tmit_chunk *chk;
8923 struct mbuf *mat, *last_mbuf;
8924 uint32_t chunk_length;
8925 uint16_t padding_length;
8927 SCTP_TCB_LOCK_ASSERT(stcb);
8928 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8929 if (op_err == NULL) {
8934 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8935 chunk_length += SCTP_BUF_LEN(mat);
8936 if (SCTP_BUF_NEXT(mat) == NULL) {
8940 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8941 sctp_m_freem(op_err);
8944 padding_length = chunk_length % 4;
8945 if (padding_length != 0) {
8946 padding_length = 4 - padding_length;
8948 if (padding_length != 0) {
8949 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8950 sctp_m_freem(op_err);
8954 sctp_alloc_a_chunk(stcb, chk);
8957 sctp_m_freem(op_err);
8960 chk->copy_by_ref = 0;
8961 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8962 chk->rec.chunk_id.can_take_data = 0;
8964 chk->send_size = (uint16_t)chunk_length;
8965 chk->sent = SCTP_DATAGRAM_UNSENT;
8967 chk->asoc = &stcb->asoc;
8970 hdr = mtod(op_err, struct sctp_chunkhdr *);
8971 hdr->chunk_type = SCTP_OPERATION_ERROR;
8972 hdr->chunk_flags = 0;
8973 hdr->chunk_length = htons(chk->send_size);
8974 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8975 chk->asoc->ctrl_queue_cnt++;
8979 sctp_send_cookie_echo(struct mbuf *m,
8981 struct sctp_tcb *stcb,
8982 struct sctp_nets *net)
8985 * pull out the cookie and put it at the front of the control chunk
8989 struct mbuf *cookie;
8990 struct sctp_paramhdr param, *phdr;
8991 struct sctp_chunkhdr *hdr;
8992 struct sctp_tmit_chunk *chk;
8993 uint16_t ptype, plen;
8995 SCTP_TCB_LOCK_ASSERT(stcb);
8996 /* First find the cookie in the param area */
8998 at = offset + sizeof(struct sctp_init_chunk);
9000 phdr = sctp_get_next_param(m, at, ¶m, sizeof(param));
9004 ptype = ntohs(phdr->param_type);
9005 plen = ntohs(phdr->param_length);
9006 if (ptype == SCTP_STATE_COOKIE) {
9009 /* found the cookie */
9010 if ((pad = (plen % 4))) {
9013 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9014 if (cookie == NULL) {
9018 #ifdef SCTP_MBUF_LOGGING
9019 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9020 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9025 at += SCTP_SIZE32(plen);
9027 /* ok, we got the cookie lets change it into a cookie echo chunk */
9028 /* first the change from param to cookie */
9029 hdr = mtod(cookie, struct sctp_chunkhdr *);
9030 hdr->chunk_type = SCTP_COOKIE_ECHO;
9031 hdr->chunk_flags = 0;
9032 /* get the chunk stuff now and place it in the FRONT of the queue */
9033 sctp_alloc_a_chunk(stcb, chk);
9036 sctp_m_freem(cookie);
9039 chk->copy_by_ref = 0;
9040 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9041 chk->rec.chunk_id.can_take_data = 0;
9042 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9043 chk->send_size = plen;
9044 chk->sent = SCTP_DATAGRAM_UNSENT;
9046 chk->asoc = &stcb->asoc;
9049 atomic_add_int(&chk->whoTo->ref_count, 1);
9050 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9051 chk->asoc->ctrl_queue_cnt++;
9056 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9060 struct sctp_nets *net)
9063 * take a HB request and make it into a HB ack and send it.
9065 struct mbuf *outchain;
9066 struct sctp_chunkhdr *chdr;
9067 struct sctp_tmit_chunk *chk;
9071 /* must have a net pointer */
9074 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9075 if (outchain == NULL) {
9076 /* gak out of memory */
9079 #ifdef SCTP_MBUF_LOGGING
9080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9081 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9084 chdr = mtod(outchain, struct sctp_chunkhdr *);
9085 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9086 chdr->chunk_flags = 0;
9087 if (chk_length % 4) {
9089 uint32_t cpthis = 0;
9092 padlen = 4 - (chk_length % 4);
9093 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9095 sctp_alloc_a_chunk(stcb, chk);
9098 sctp_m_freem(outchain);
9101 chk->copy_by_ref = 0;
9102 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9103 chk->rec.chunk_id.can_take_data = 1;
9105 chk->send_size = chk_length;
9106 chk->sent = SCTP_DATAGRAM_UNSENT;
9108 chk->asoc = &stcb->asoc;
9109 chk->data = outchain;
9111 atomic_add_int(&chk->whoTo->ref_count, 1);
9112 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9113 chk->asoc->ctrl_queue_cnt++;
9117 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9119 /* formulate and queue a cookie-ack back to sender */
9120 struct mbuf *cookie_ack;
9121 struct sctp_chunkhdr *hdr;
9122 struct sctp_tmit_chunk *chk;
9124 SCTP_TCB_LOCK_ASSERT(stcb);
9126 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9127 if (cookie_ack == NULL) {
9131 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9132 sctp_alloc_a_chunk(stcb, chk);
9135 sctp_m_freem(cookie_ack);
9138 chk->copy_by_ref = 0;
9139 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9140 chk->rec.chunk_id.can_take_data = 1;
9142 chk->send_size = sizeof(struct sctp_chunkhdr);
9143 chk->sent = SCTP_DATAGRAM_UNSENT;
9145 chk->asoc = &stcb->asoc;
9146 chk->data = cookie_ack;
9147 if (chk->asoc->last_control_chunk_from != NULL) {
9148 chk->whoTo = chk->asoc->last_control_chunk_from;
9149 atomic_add_int(&chk->whoTo->ref_count, 1);
9153 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9154 hdr->chunk_type = SCTP_COOKIE_ACK;
9155 hdr->chunk_flags = 0;
9156 hdr->chunk_length = htons(chk->send_size);
9157 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9158 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9159 chk->asoc->ctrl_queue_cnt++;
9165 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9167 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9168 struct mbuf *m_shutdown_ack;
9169 struct sctp_shutdown_ack_chunk *ack_cp;
9170 struct sctp_tmit_chunk *chk;
9172 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9173 if (m_shutdown_ack == NULL) {
9177 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9178 sctp_alloc_a_chunk(stcb, chk);
9181 sctp_m_freem(m_shutdown_ack);
9184 chk->copy_by_ref = 0;
9185 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9186 chk->rec.chunk_id.can_take_data = 1;
9188 chk->send_size = sizeof(struct sctp_chunkhdr);
9189 chk->sent = SCTP_DATAGRAM_UNSENT;
9191 chk->asoc = &stcb->asoc;
9192 chk->data = m_shutdown_ack;
9195 atomic_add_int(&chk->whoTo->ref_count, 1);
9197 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9198 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9199 ack_cp->ch.chunk_flags = 0;
9200 ack_cp->ch.chunk_length = htons(chk->send_size);
9201 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9202 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9203 chk->asoc->ctrl_queue_cnt++;
9208 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9210 /* formulate and queue a SHUTDOWN to the sender */
9211 struct mbuf *m_shutdown;
9212 struct sctp_shutdown_chunk *shutdown_cp;
9213 struct sctp_tmit_chunk *chk;
9215 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9216 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9217 /* We already have a SHUTDOWN queued. Reuse it. */
9219 sctp_free_remote_addr(chk->whoTo);
9226 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9227 if (m_shutdown == NULL) {
9231 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9232 sctp_alloc_a_chunk(stcb, chk);
9235 sctp_m_freem(m_shutdown);
9238 chk->copy_by_ref = 0;
9239 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9240 chk->rec.chunk_id.can_take_data = 1;
9242 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9243 chk->sent = SCTP_DATAGRAM_UNSENT;
9245 chk->asoc = &stcb->asoc;
9246 chk->data = m_shutdown;
9249 atomic_add_int(&chk->whoTo->ref_count, 1);
9251 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9252 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9253 shutdown_cp->ch.chunk_flags = 0;
9254 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9255 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9256 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9257 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9258 chk->asoc->ctrl_queue_cnt++;
9260 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9263 atomic_add_int(&chk->whoTo->ref_count, 1);
9265 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9266 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9267 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9273 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9276 * formulate and queue an ASCONF to the peer. ASCONF parameters
9277 * should be queued on the assoc queue.
9279 struct sctp_tmit_chunk *chk;
9280 struct mbuf *m_asconf;
9283 SCTP_TCB_LOCK_ASSERT(stcb);
9285 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9286 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9287 /* can't send a new one if there is one in flight already */
9291 /* compose an ASCONF chunk, maximum length is PMTU */
9292 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9293 if (m_asconf == NULL) {
9297 sctp_alloc_a_chunk(stcb, chk);
9300 sctp_m_freem(m_asconf);
9304 chk->copy_by_ref = 0;
9305 chk->rec.chunk_id.id = SCTP_ASCONF;
9306 chk->rec.chunk_id.can_take_data = 0;
9307 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9308 chk->data = m_asconf;
9309 chk->send_size = len;
9310 chk->sent = SCTP_DATAGRAM_UNSENT;
9312 chk->asoc = &stcb->asoc;
9315 atomic_add_int(&chk->whoTo->ref_count, 1);
9317 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9318 chk->asoc->ctrl_queue_cnt++;
9323 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9326 * formulate and queue a asconf-ack back to sender. the asconf-ack
9327 * must be stored in the tcb.
9329 struct sctp_tmit_chunk *chk;
9330 struct sctp_asconf_ack *ack, *latest_ack;
9332 struct sctp_nets *net = NULL;
9334 SCTP_TCB_LOCK_ASSERT(stcb);
9335 /* Get the latest ASCONF-ACK */
9336 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9337 if (latest_ack == NULL) {
9340 if (latest_ack->last_sent_to != NULL &&
9341 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9342 /* we're doing a retransmission */
9343 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9346 if (stcb->asoc.last_control_chunk_from == NULL) {
9347 if (stcb->asoc.alternate) {
9348 net = stcb->asoc.alternate;
9350 net = stcb->asoc.primary_destination;
9353 net = stcb->asoc.last_control_chunk_from;
9358 if (stcb->asoc.last_control_chunk_from == NULL) {
9359 if (stcb->asoc.alternate) {
9360 net = stcb->asoc.alternate;
9362 net = stcb->asoc.primary_destination;
9365 net = stcb->asoc.last_control_chunk_from;
9368 latest_ack->last_sent_to = net;
9370 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9371 if (ack->data == NULL) {
9375 /* copy the asconf_ack */
9376 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9377 if (m_ack == NULL) {
9378 /* couldn't copy it */
9381 #ifdef SCTP_MBUF_LOGGING
9382 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9383 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9387 sctp_alloc_a_chunk(stcb, chk);
9391 sctp_m_freem(m_ack);
9394 chk->copy_by_ref = 0;
9395 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9396 chk->rec.chunk_id.can_take_data = 1;
9397 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9400 atomic_add_int(&chk->whoTo->ref_count, 1);
9403 chk->send_size = ack->len;
9404 chk->sent = SCTP_DATAGRAM_UNSENT;
9406 chk->asoc = &stcb->asoc;
9408 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9409 chk->asoc->ctrl_queue_cnt++;
9416 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9417 struct sctp_tcb *stcb,
9418 struct sctp_association *asoc,
9419 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9420 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9426 * send out one MTU of retransmission. If fast_retransmit is
9427 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9428 * rwnd. For a Cookie or Asconf in the control chunk queue we
9429 * retransmit them by themselves.
9431 * For data chunks we will pick out the lowest TSN's in the sent_queue
9432 * marked for resend and bundle them all together (up to a MTU of
9433 * destination). The address to send to should have been
9434 * selected/changed where the retransmission was marked (i.e. in FR
9435 * or t3-timeout routines).
9437 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9438 struct sctp_tmit_chunk *chk, *fwd;
9439 struct mbuf *m, *endofchain;
9440 struct sctp_nets *net = NULL;
9441 uint32_t tsns_sent = 0;
9442 int no_fragmentflg, bundle_at, cnt_thru;
9444 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9445 struct sctp_auth_chunk *auth = NULL;
9446 uint32_t auth_offset = 0;
9447 uint16_t auth_keyid;
9448 int override_ok = 1;
9449 int data_auth_reqd = 0;
9452 SCTP_TCB_LOCK_ASSERT(stcb);
9453 tmr_started = ctl_cnt = bundle_at = error = 0;
9458 endofchain = m = NULL;
9459 auth_keyid = stcb->asoc.authinfo.active_keyid;
9460 #ifdef SCTP_AUDITING_ENABLED
9461 sctp_audit_log(0xC3, 1);
9463 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9464 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9465 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9466 asoc->sent_queue_retran_cnt);
9467 asoc->sent_queue_cnt = 0;
9468 asoc->sent_queue_cnt_removeable = 0;
9469 /* send back 0/0 so we enter normal transmission */
9473 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9474 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9475 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9476 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9477 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9480 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9481 if (chk != asoc->str_reset) {
9483 * not eligible for retran if its
9490 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9494 * Add an AUTH chunk, if chunk requires it save the
9495 * offset into the chain for AUTH
9497 if ((auth == NULL) &&
9498 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9499 stcb->asoc.peer_auth_chunks))) {
9500 m = sctp_add_auth_chunk(m, &endofchain,
9501 &auth, &auth_offset,
9503 chk->rec.chunk_id.id);
9504 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9506 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9512 /* do we have control chunks to retransmit? */
9514 /* Start a timer no matter if we succeed or fail */
9515 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9516 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9517 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9518 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9519 chk->snd_count++; /* update our count */
9520 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9521 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9522 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9523 no_fragmentflg, 0, 0,
9524 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9525 chk->whoTo->port, NULL,
9528 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9529 if (error == ENOBUFS) {
9530 asoc->ifp_had_enobuf = 1;
9531 SCTP_STAT_INCR(sctps_lowlevelerr);
9535 asoc->ifp_had_enobuf = 0;
9541 * We don't want to mark the net->sent time here since this
9542 * we use this for HB and retrans cannot measure RTT
9544 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9546 chk->sent = SCTP_DATAGRAM_SENT;
9547 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9551 /* Clean up the fwd-tsn list */
9552 sctp_clean_up_ctl(stcb, asoc, so_locked);
9557 * Ok, it is just data retransmission we need to do or that and a
9558 * fwd-tsn with it all.
9560 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9561 return (SCTP_RETRAN_DONE);
9563 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
9564 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
9565 /* not yet open, resend the cookie and that is it */
9568 #ifdef SCTP_AUDITING_ENABLED
9569 sctp_auditing(20, inp, stcb, NULL);
9571 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9572 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9573 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9574 /* No, not sent to this net or not ready for rtx */
9577 if (chk->data == NULL) {
9578 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9579 chk->rec.data.tsn, chk->snd_count, chk->sent);
9582 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9583 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9584 struct mbuf *op_err;
9585 char msg[SCTP_DIAG_INFO_LEN];
9587 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9588 chk->rec.data.tsn, chk->snd_count);
9589 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9591 atomic_add_int(&stcb->asoc.refcnt, 1);
9592 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9594 SCTP_TCB_LOCK(stcb);
9595 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9596 return (SCTP_RETRAN_EXIT);
9598 /* pick up the net */
9600 switch (net->ro._l_addr.sa.sa_family) {
9603 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9608 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9617 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9618 /* No room in peers rwnd */
9621 tsn = asoc->last_acked_seq + 1;
9622 if (tsn == chk->rec.data.tsn) {
9624 * we make a special exception for this
9625 * case. The peer has no rwnd but is missing
9626 * the lowest chunk.. which is probably what
9627 * is holding up the rwnd.
9629 goto one_chunk_around;
9634 if (asoc->peers_rwnd < mtu) {
9636 if ((asoc->peers_rwnd == 0) &&
9637 (asoc->total_flight == 0)) {
9638 chk->window_probe = 1;
9639 chk->whoTo->window_probe = 1;
9642 #ifdef SCTP_AUDITING_ENABLED
9643 sctp_audit_log(0xC3, 2);
9647 net->fast_retran_ip = 0;
9648 if (chk->rec.data.doing_fast_retransmit == 0) {
9650 * if no FR in progress skip destination that have
9651 * flight_size > cwnd.
9653 if (net->flight_size >= net->cwnd) {
9658 * Mark the destination net to have FR recovery
9662 net->fast_retran_ip = 1;
9666 * if no AUTH is yet included and this chunk requires it,
9667 * make sure to account for it. We don't apply the size
9668 * until the AUTH chunk is actually added below in case
9669 * there is no room for this chunk.
9671 if (data_auth_reqd && (auth == NULL)) {
9672 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9676 if ((chk->send_size <= (mtu - dmtu)) ||
9677 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9678 /* ok we will add this one */
9679 if (data_auth_reqd) {
9681 m = sctp_add_auth_chunk(m,
9687 auth_keyid = chk->auth_keyid;
9689 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9690 } else if (override_ok) {
9691 auth_keyid = chk->auth_keyid;
9693 } else if (chk->auth_keyid != auth_keyid) {
9694 /* different keyid, so done bundling */
9698 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9700 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9703 /* Do clear IP_DF ? */
9704 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9707 /* upate our MTU size */
9708 if (mtu > (chk->send_size + dmtu))
9709 mtu -= (chk->send_size + dmtu);
9712 data_list[bundle_at++] = chk;
9713 if (one_chunk && (asoc->total_flight <= 0)) {
9714 SCTP_STAT_INCR(sctps_windowprobed);
9717 if (one_chunk == 0) {
9719 * now are there anymore forward from chk to pick
9722 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9723 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9724 /* Nope, not for retran */
9727 if (fwd->whoTo != net) {
9728 /* Nope, not the net in question */
9731 if (data_auth_reqd && (auth == NULL)) {
9732 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9735 if (fwd->send_size <= (mtu - dmtu)) {
9736 if (data_auth_reqd) {
9738 m = sctp_add_auth_chunk(m,
9744 auth_keyid = fwd->auth_keyid;
9746 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9747 } else if (override_ok) {
9748 auth_keyid = fwd->auth_keyid;
9750 } else if (fwd->auth_keyid != auth_keyid) {
9758 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9760 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9763 /* Do clear IP_DF ? */
9764 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9767 /* upate our MTU size */
9768 if (mtu > (fwd->send_size + dmtu))
9769 mtu -= (fwd->send_size + dmtu);
9772 data_list[bundle_at++] = fwd;
9773 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9777 /* can't fit so we are done */
9782 /* Is there something to send for this destination? */
9785 * No matter if we fail/or succeed we should start a
9786 * timer. A failure is like a lost IP packet :-)
9788 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9790 * no timer running on this destination
9793 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9796 /* Now lets send it, if there is anything to send :> */
9797 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9798 (struct sockaddr *)&net->ro._l_addr, m,
9799 auth_offset, auth, auth_keyid,
9800 no_fragmentflg, 0, 0,
9801 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9805 /* error, we could not output */
9806 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9807 if (error == ENOBUFS) {
9808 asoc->ifp_had_enobuf = 1;
9809 SCTP_STAT_INCR(sctps_lowlevelerr);
9813 asoc->ifp_had_enobuf = 0;
9820 * We don't want to mark the net->sent time here
9821 * since this we use this for HB and retrans cannot
9824 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9826 /* For auto-close */
9828 if (*now_filled == 0) {
9829 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9830 *now = asoc->time_last_sent;
9833 asoc->time_last_sent = *now;
9835 *cnt_out += bundle_at;
9836 #ifdef SCTP_AUDITING_ENABLED
9837 sctp_audit_log(0xC4, bundle_at);
9840 tsns_sent = data_list[0]->rec.data.tsn;
9842 for (i = 0; i < bundle_at; i++) {
9843 SCTP_STAT_INCR(sctps_sendretransdata);
9844 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9846 * When we have a revoked data, and we
9847 * retransmit it, then we clear the revoked
9848 * flag since this flag dictates if we
9849 * subtracted from the fs
9851 if (data_list[i]->rec.data.chunk_was_revoked) {
9852 /* Deflate the cwnd */
9853 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9854 data_list[i]->rec.data.chunk_was_revoked = 0;
9856 data_list[i]->snd_count++;
9857 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9858 /* record the time */
9859 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9860 if (data_list[i]->book_size_scale) {
9862 * need to double the book size on
9865 data_list[i]->book_size_scale = 0;
9867 * Since we double the booksize, we
9868 * must also double the output queue
9869 * size, since this get shrunk when
9870 * we free by this amount.
9872 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9873 data_list[i]->book_size *= 2;
9877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9878 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9879 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9881 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9882 (uint32_t)(data_list[i]->send_size +
9883 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9886 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9887 data_list[i]->whoTo->flight_size,
9888 data_list[i]->book_size,
9889 (uint32_t)(uintptr_t)data_list[i]->whoTo,
9890 data_list[i]->rec.data.tsn);
9892 sctp_flight_size_increase(data_list[i]);
9893 sctp_total_flight_increase(stcb, data_list[i]);
9894 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9895 /* SWS sender side engages */
9896 asoc->peers_rwnd = 0;
9899 (data_list[i]->rec.data.doing_fast_retransmit)) {
9900 SCTP_STAT_INCR(sctps_sendfastretrans);
9901 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9902 (tmr_started == 0)) {
9904 * ok we just fast-retrans'd
9905 * the lowest TSN, i.e the
9906 * first on the list. In
9907 * this case we want to give
9908 * some more time to get a
9909 * SACK back without a
9912 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9913 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
9914 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9919 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9921 #ifdef SCTP_AUDITING_ENABLED
9922 sctp_auditing(21, inp, stcb, NULL);
9928 if (asoc->sent_queue_retran_cnt <= 0) {
9929 /* all done we have no more to retran */
9930 asoc->sent_queue_retran_cnt = 0;
9934 /* No more room in rwnd */
9937 /* stop the for loop here. we sent out a packet */
9944 sctp_timer_validation(struct sctp_inpcb *inp,
9945 struct sctp_tcb *stcb,
9946 struct sctp_association *asoc)
9948 struct sctp_nets *net;
9950 /* Validate that a timer is running somewhere */
9951 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9952 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9953 /* Here is a timer */
9957 SCTP_TCB_LOCK_ASSERT(stcb);
9958 /* Gak, we did not have a timer somewhere */
9959 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9960 if (asoc->alternate) {
9961 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9963 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9969 sctp_chunk_output(struct sctp_inpcb *inp,
9970 struct sctp_tcb *stcb,
9973 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9979 * Ok this is the generic chunk service queue. we must do the
9981 * - See if there are retransmits pending, if so we must
9983 * - Service the stream queue that is next, moving any
9984 * message (note I must get a complete message i.e.
9985 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9987 * - Check to see if the cwnd/rwnd allows any output, if so we
9988 * go ahead and fomulate and send the low level chunks. Making sure
9989 * to combine any control in the control chunk queue also.
9991 struct sctp_association *asoc;
9992 struct sctp_nets *net;
9993 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
9994 unsigned int burst_cnt = 0;
9998 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10001 unsigned int tot_frs = 0;
10003 asoc = &stcb->asoc;
10005 /* The Nagle algorithm is only applied when handling a send call. */
10006 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10007 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10015 SCTP_TCB_LOCK_ASSERT(stcb);
10017 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10019 if ((un_sent <= 0) &&
10020 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10021 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10022 (asoc->sent_queue_retran_cnt == 0) &&
10023 (asoc->trigger_reset == 0)) {
10024 /* Nothing to do unless there is something to be sent left */
10028 * Do we have something to send, data or control AND a sack timer
10029 * running, if so piggy-back the sack.
10031 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10032 sctp_send_sack(stcb, so_locked);
10033 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10035 while (asoc->sent_queue_retran_cnt) {
10037 * Ok, it is retransmission time only, we send out only ONE
10038 * packet with a single call off to the retran code.
10040 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10042 * Special hook for handling cookiess discarded
10043 * by peer that carried data. Send cookie-ack only
10044 * and then the next call with get the retran's.
10046 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10048 &now, &now_filled, frag_point, so_locked);
10050 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10051 /* if its not from a HB then do it */
10053 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10059 * its from any other place, we don't allow retran
10060 * output (only control)
10065 /* Can't send anymore */
10067 * now lets push out control by calling med-level
10068 * output once. this assures that we WILL send HB's
10071 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10073 &now, &now_filled, frag_point, so_locked);
10074 #ifdef SCTP_AUDITING_ENABLED
10075 sctp_auditing(8, inp, stcb, NULL);
10077 sctp_timer_validation(inp, stcb, asoc);
10082 * The count was off.. retran is not happening so do
10083 * the normal retransmission.
10085 #ifdef SCTP_AUDITING_ENABLED
10086 sctp_auditing(9, inp, stcb, NULL);
10088 if (ret == SCTP_RETRAN_EXIT) {
10093 if (from_where == SCTP_OUTPUT_FROM_T3) {
10094 /* Only one transmission allowed out of a timeout */
10095 #ifdef SCTP_AUDITING_ENABLED
10096 sctp_auditing(10, inp, stcb, NULL);
10098 /* Push out any control */
10099 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10100 &now, &now_filled, frag_point, so_locked);
10103 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10104 /* Hit FR burst limit */
10107 if ((num_out == 0) && (ret == 0)) {
10108 /* No more retrans to send */
10112 #ifdef SCTP_AUDITING_ENABLED
10113 sctp_auditing(12, inp, stcb, NULL);
10115 /* Check for bad destinations, if they exist move chunks around. */
10116 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10117 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10119 * if possible move things off of this address we
10120 * still may send below due to the dormant state but
10121 * we try to find an alternate address to send to
10122 * and if we have one we move all queued data on the
10123 * out wheel to this alternate address.
10125 if (net->ref_count > 1)
10126 sctp_move_chunks_from_net(stcb, net);
10129 * if ((asoc->sat_network) || (net->addr_is_local))
10130 * { burst_limit = asoc->max_burst *
10131 * SCTP_SAT_NETWORK_BURST_INCR; }
10133 if (asoc->max_burst > 0) {
10134 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10135 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10137 * JRS - Use the congestion
10138 * control given in the
10139 * congestion control module
10141 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10143 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10145 SCTP_STAT_INCR(sctps_maxburstqueued);
10147 net->fast_retran_ip = 0;
10149 if (net->flight_size == 0) {
10151 * Should be decaying the
10163 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10164 &reason_code, 0, from_where,
10165 &now, &now_filled, frag_point, so_locked);
10167 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10169 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10171 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10172 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10173 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10177 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10179 tot_out += num_out;
10181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10182 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10183 if (num_out == 0) {
10184 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10189 * When the Nagle algorithm is used, look at how
10190 * much is unsent, then if its smaller than an MTU
10191 * and we have data in flight we stop, except if we
10192 * are handling a fragmented user message.
10194 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10195 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10196 (stcb->asoc.total_flight > 0)) {
10197 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10201 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10202 TAILQ_EMPTY(&asoc->send_queue) &&
10203 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10204 /* Nothing left to send */
10207 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10208 /* Nothing left to send */
10211 } while (num_out &&
10212 ((asoc->max_burst == 0) ||
10213 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10214 (burst_cnt < asoc->max_burst)));
10216 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10217 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10218 SCTP_STAT_INCR(sctps_maxburstqueued);
10219 asoc->burst_limit_applied = 1;
10220 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10221 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10224 asoc->burst_limit_applied = 0;
10227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10228 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10230 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10234 * Now we need to clean up the control chunk chain if a ECNE is on
10235 * it. It must be marked as UNSENT again so next call will continue
10236 * to send it until such time that we get a CWR, to remove it.
10238 if (stcb->asoc.ecn_echo_cnt_onq)
10239 sctp_fix_ecn_echo(asoc);
10241 if (stcb->asoc.trigger_reset) {
10242 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10252 struct sctp_inpcb *inp,
10254 struct sockaddr *addr,
10255 struct mbuf *control,
10260 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10264 if (inp->sctp_socket == NULL) {
10265 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10268 return (sctp_sosend(inp->sctp_socket,
10270 (struct uio *)NULL,
10278 send_forward_tsn(struct sctp_tcb *stcb,
10279 struct sctp_association *asoc)
10281 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10282 struct sctp_forward_tsn_chunk *fwdtsn;
10283 struct sctp_strseq *strseq;
10284 struct sctp_strseq_mid *strseq_m;
10285 uint32_t advance_peer_ack_point;
10286 unsigned int cnt_of_space, i, ovh;
10287 unsigned int space_needed;
10288 unsigned int cnt_of_skipped = 0;
10290 SCTP_TCB_LOCK_ASSERT(stcb);
10291 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10292 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10293 /* mark it to unsent */
10294 chk->sent = SCTP_DATAGRAM_UNSENT;
10295 chk->snd_count = 0;
10296 /* Do we correct its output location? */
10298 sctp_free_remote_addr(chk->whoTo);
10301 goto sctp_fill_in_rest;
10304 /* Ok if we reach here we must build one */
10305 sctp_alloc_a_chunk(stcb, chk);
10309 asoc->fwd_tsn_cnt++;
10310 chk->copy_by_ref = 0;
10312 * We don't do the old thing here since this is used not for on-wire
10313 * but to tell if we are sending a fwd-tsn by the stack during
10314 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10316 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10317 chk->rec.chunk_id.can_take_data = 0;
10321 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10322 if (chk->data == NULL) {
10323 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10326 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10327 chk->sent = SCTP_DATAGRAM_UNSENT;
10328 chk->snd_count = 0;
10329 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10330 asoc->ctrl_queue_cnt++;
10333 * Here we go through and fill out the part that deals with
10334 * stream/seq of the ones we skip.
10336 SCTP_BUF_LEN(chk->data) = 0;
10337 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10338 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10339 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10340 /* no more to look at */
10343 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10344 /* We don't report these */
10349 if (asoc->idata_supported) {
10350 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10351 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10353 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10354 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10356 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10358 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10359 ovh = SCTP_MIN_OVERHEAD;
10361 ovh = SCTP_MIN_V4_OVERHEAD;
10363 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10364 /* trim to a mtu size */
10365 cnt_of_space = asoc->smallest_mtu - ovh;
10367 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10368 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10369 0xff, 0, cnt_of_skipped,
10370 asoc->advanced_peer_ack_point);
10372 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10373 if (cnt_of_space < space_needed) {
10375 * ok we must trim down the chunk by lowering the
10376 * advance peer ack point.
10378 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10379 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10380 0xff, 0xff, cnt_of_space,
10383 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10384 if (asoc->idata_supported) {
10385 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10387 cnt_of_skipped /= sizeof(struct sctp_strseq);
10390 * Go through and find the TSN that will be the one
10393 at = TAILQ_FIRST(&asoc->sent_queue);
10395 for (i = 0; i < cnt_of_skipped; i++) {
10396 tp1 = TAILQ_NEXT(at, sctp_next);
10403 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10404 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10405 0xff, cnt_of_skipped, at->rec.data.tsn,
10406 asoc->advanced_peer_ack_point);
10410 * last now points to last one I can report, update
10414 advance_peer_ack_point = last->rec.data.tsn;
10416 if (asoc->idata_supported) {
10417 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10418 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10420 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10421 cnt_of_skipped * sizeof(struct sctp_strseq);
10424 chk->send_size = space_needed;
10425 /* Setup the chunk */
10426 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10427 fwdtsn->ch.chunk_length = htons(chk->send_size);
10428 fwdtsn->ch.chunk_flags = 0;
10429 if (asoc->idata_supported) {
10430 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10432 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10434 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10435 SCTP_BUF_LEN(chk->data) = chk->send_size;
10438 * Move pointer to after the fwdtsn and transfer to the
10441 if (asoc->idata_supported) {
10442 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10445 strseq = (struct sctp_strseq *)fwdtsn;
10449 * Now populate the strseq list. This is done blindly
10450 * without pulling out duplicate stream info. This is
10451 * inefficent but won't harm the process since the peer will
10452 * look at these in sequence and will thus release anything.
10453 * It could mean we exceed the PMTU and chop off some that
10454 * we could have included.. but this is unlikely (aka 1432/4
10455 * would mean 300+ stream seq's would have to be reported in
10456 * one FWD-TSN. With a bit of work we can later FIX this to
10457 * optimize and pull out duplicates.. but it does add more
10458 * overhead. So for now... not!
10461 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10462 if (i >= cnt_of_skipped) {
10465 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10466 /* We don't report these */
10469 if (at->rec.data.tsn == advance_peer_ack_point) {
10470 at->rec.data.fwd_tsn_cnt = 0;
10472 if (asoc->idata_supported) {
10473 strseq_m->sid = htons(at->rec.data.sid);
10474 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10475 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
10477 strseq_m->flags = 0;
10479 strseq_m->mid = htonl(at->rec.data.mid);
10482 strseq->sid = htons(at->rec.data.sid);
10483 strseq->ssn = htons((uint16_t)at->rec.data.mid);
10492 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10493 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10499 * Queue up a SACK or NR-SACK in the control queue.
10500 * We must first check to see if a SACK or NR-SACK is
10501 * somehow on the control queue.
10502 * If so, we will take and and remove the old one.
10504 struct sctp_association *asoc;
10505 struct sctp_tmit_chunk *chk, *a_chk;
10506 struct sctp_sack_chunk *sack;
10507 struct sctp_nr_sack_chunk *nr_sack;
10508 struct sctp_gap_ack_block *gap_descriptor;
10509 const struct sack_track *selector;
10514 int limit_reached = 0;
10515 unsigned int i, siz, j;
10516 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10519 uint32_t highest_tsn;
10524 if (stcb->asoc.nrsack_supported == 1) {
10525 type = SCTP_NR_SELECTIVE_ACK;
10527 type = SCTP_SELECTIVE_ACK;
10530 asoc = &stcb->asoc;
10531 SCTP_TCB_LOCK_ASSERT(stcb);
10532 if (asoc->last_data_chunk_from == NULL) {
10533 /* Hmm we never received anything */
10536 sctp_slide_mapping_arrays(stcb);
10537 sctp_set_rwnd(stcb, asoc);
10538 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10539 if (chk->rec.chunk_id.id == type) {
10540 /* Hmm, found a sack already on queue, remove it */
10541 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10542 asoc->ctrl_queue_cnt--;
10545 sctp_m_freem(a_chk->data);
10546 a_chk->data = NULL;
10548 if (a_chk->whoTo) {
10549 sctp_free_remote_addr(a_chk->whoTo);
10550 a_chk->whoTo = NULL;
10555 if (a_chk == NULL) {
10556 sctp_alloc_a_chunk(stcb, a_chk);
10557 if (a_chk == NULL) {
10558 /* No memory so we drop the idea, and set a timer */
10559 if (stcb->asoc.delayed_ack) {
10560 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10561 stcb->sctp_ep, stcb, NULL,
10562 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10563 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10564 stcb->sctp_ep, stcb, NULL);
10566 stcb->asoc.send_sack = 1;
10570 a_chk->copy_by_ref = 0;
10571 a_chk->rec.chunk_id.id = type;
10572 a_chk->rec.chunk_id.can_take_data = 1;
10574 /* Clear our pkt counts */
10575 asoc->data_pkts_seen = 0;
10578 a_chk->asoc = asoc;
10579 a_chk->snd_count = 0;
10580 a_chk->send_size = 0; /* fill in later */
10581 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10582 a_chk->whoTo = NULL;
10584 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
10586 * Ok, the destination for the SACK is unreachable, lets see if
10587 * we can select an alternate to asoc->last_data_chunk_from
10589 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10590 if (a_chk->whoTo == NULL) {
10591 /* Nope, no alternate */
10592 a_chk->whoTo = asoc->last_data_chunk_from;
10595 a_chk->whoTo = asoc->last_data_chunk_from;
10597 if (a_chk->whoTo) {
10598 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10600 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10601 highest_tsn = asoc->highest_tsn_inside_map;
10603 highest_tsn = asoc->highest_tsn_inside_nr_map;
10605 if (highest_tsn == asoc->cumulative_tsn) {
10607 if (type == SCTP_SELECTIVE_ACK) {
10608 space_req = sizeof(struct sctp_sack_chunk);
10610 space_req = sizeof(struct sctp_nr_sack_chunk);
10613 /* gaps get a cluster */
10614 space_req = MCLBYTES;
10616 /* Ok now lets formulate a MBUF with our sack */
10617 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10618 if ((a_chk->data == NULL) ||
10619 (a_chk->whoTo == NULL)) {
10620 /* rats, no mbuf memory */
10622 /* was a problem with the destination */
10623 sctp_m_freem(a_chk->data);
10624 a_chk->data = NULL;
10626 sctp_free_a_chunk(stcb, a_chk, so_locked);
10627 /* sa_ignore NO_NULL_CHK */
10628 if (stcb->asoc.delayed_ack) {
10629 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10630 stcb->sctp_ep, stcb, NULL,
10631 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
10632 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10633 stcb->sctp_ep, stcb, NULL);
10635 stcb->asoc.send_sack = 1;
10639 /* ok, lets go through and fill it in */
10640 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10641 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10642 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10643 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10645 limit = mtod(a_chk->data, caddr_t);
10650 if ((asoc->sctp_cmt_on_off > 0) &&
10651 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10653 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10654 * received, then set high bit to 1, else 0. Reset
10657 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10658 asoc->cmt_dac_pkts_rcvd = 0;
10660 #ifdef SCTP_ASOCLOG_OF_TSNS
10661 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10662 stcb->asoc.cumack_log_atsnt++;
10663 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10664 stcb->asoc.cumack_log_atsnt = 0;
10667 /* reset the readers interpretation */
10668 stcb->freed_by_sorcv_sincelast = 0;
10670 if (type == SCTP_SELECTIVE_ACK) {
10671 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10673 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10674 if (highest_tsn > asoc->mapping_array_base_tsn) {
10675 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10677 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10681 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10682 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10683 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10684 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10686 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10690 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10693 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10695 if (((type == SCTP_SELECTIVE_ACK) &&
10696 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10697 ((type == SCTP_NR_SELECTIVE_ACK) &&
10698 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10699 /* we have a gap .. maybe */
10700 for (i = 0; i < siz; i++) {
10701 tsn_map = asoc->mapping_array[i];
10702 if (type == SCTP_SELECTIVE_ACK) {
10703 tsn_map |= asoc->nr_mapping_array[i];
10707 * Clear all bits corresponding to TSNs
10708 * smaller or equal to the cumulative TSN.
10710 tsn_map &= (~0U << (1 - offset));
10712 selector = &sack_array[tsn_map];
10713 if (mergeable && selector->right_edge) {
10715 * Backup, left and right edges were ok to
10721 if (selector->num_entries == 0)
10724 for (j = 0; j < selector->num_entries; j++) {
10725 if (mergeable && selector->right_edge) {
10727 * do a merge by NOT setting
10733 * no merge, set the left
10737 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10739 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10742 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10748 if (selector->left_edge) {
10752 if (limit_reached) {
10753 /* Reached the limit stop */
10759 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10760 (limit_reached == 0)) {
10764 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10765 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10767 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10770 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10773 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10775 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10776 /* we have a gap .. maybe */
10777 for (i = 0; i < siz; i++) {
10778 tsn_map = asoc->nr_mapping_array[i];
10781 * Clear all bits corresponding to
10782 * TSNs smaller or equal to the
10785 tsn_map &= (~0U << (1 - offset));
10787 selector = &sack_array[tsn_map];
10788 if (mergeable && selector->right_edge) {
10790 * Backup, left and right edges were
10793 num_nr_gap_blocks--;
10796 if (selector->num_entries == 0)
10799 for (j = 0; j < selector->num_entries; j++) {
10800 if (mergeable && selector->right_edge) {
10802 * do a merge by NOT
10809 * no merge, set the
10813 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10815 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10816 num_nr_gap_blocks++;
10818 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10824 if (selector->left_edge) {
10828 if (limit_reached) {
10829 /* Reached the limit stop */
10836 /* now we must add any dups we are going to report. */
10837 if ((limit_reached == 0) && (asoc->numduptsns)) {
10838 dup = (uint32_t *)gap_descriptor;
10839 for (i = 0; i < asoc->numduptsns; i++) {
10840 *dup = htonl(asoc->dup_tsns[i]);
10843 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10848 asoc->numduptsns = 0;
10851 * now that the chunk is prepared queue it to the control chunk
10854 if (type == SCTP_SELECTIVE_ACK) {
10855 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
10856 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10857 num_dups * sizeof(int32_t));
10858 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10859 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10860 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10861 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10862 sack->sack.num_dup_tsns = htons(num_dups);
10863 sack->ch.chunk_type = type;
10864 sack->ch.chunk_flags = flags;
10865 sack->ch.chunk_length = htons(a_chk->send_size);
10867 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
10868 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10869 num_dups * sizeof(int32_t));
10870 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10871 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10872 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10873 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10874 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10875 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10876 nr_sack->nr_sack.reserved = 0;
10877 nr_sack->ch.chunk_type = type;
10878 nr_sack->ch.chunk_flags = flags;
10879 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10881 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10882 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10883 asoc->ctrl_queue_cnt++;
10884 asoc->send_sack = 0;
10885 SCTP_STAT_INCR(sctps_sendsacks);
10890 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10891 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10896 struct mbuf *m_abort, *m, *m_last;
10897 struct mbuf *m_out, *m_end = NULL;
10898 struct sctp_abort_chunk *abort;
10899 struct sctp_auth_chunk *auth = NULL;
10900 struct sctp_nets *net;
10902 uint32_t auth_offset = 0;
10904 uint16_t cause_len, chunk_len, padding_len;
10906 SCTP_TCB_LOCK_ASSERT(stcb);
10908 * Add an AUTH chunk, if chunk requires it and save the offset into
10909 * the chain for AUTH
10911 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10912 stcb->asoc.peer_auth_chunks)) {
10913 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10914 stcb, SCTP_ABORT_ASSOCIATION);
10915 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10919 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10920 if (m_abort == NULL) {
10922 sctp_m_freem(m_out);
10925 sctp_m_freem(operr);
10929 /* link in any error */
10930 SCTP_BUF_NEXT(m_abort) = operr;
10933 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10934 cause_len += (uint16_t)SCTP_BUF_LEN(m);
10935 if (SCTP_BUF_NEXT(m) == NULL) {
10939 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10940 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
10941 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10942 if (m_out == NULL) {
10943 /* NO Auth chunk prepended, so reserve space in front */
10944 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10947 /* Put AUTH chunk at the front of the chain */
10948 SCTP_BUF_NEXT(m_end) = m_abort;
10950 if (stcb->asoc.alternate) {
10951 net = stcb->asoc.alternate;
10953 net = stcb->asoc.primary_destination;
10955 /* Fill in the ABORT chunk header. */
10956 abort = mtod(m_abort, struct sctp_abort_chunk *);
10957 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10958 if (stcb->asoc.peer_vtag == 0) {
10959 /* This happens iff the assoc is in COOKIE-WAIT state. */
10960 vtag = stcb->asoc.my_vtag;
10961 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10963 vtag = stcb->asoc.peer_vtag;
10964 abort->ch.chunk_flags = 0;
10966 abort->ch.chunk_length = htons(chunk_len);
10967 /* Add padding, if necessary. */
10968 if (padding_len > 0) {
10969 if ((m_last == NULL) ||
10970 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
10971 sctp_m_freem(m_out);
10975 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10976 (struct sockaddr *)&net->ro._l_addr,
10977 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10978 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10979 stcb->asoc.primary_destination->port, NULL,
10982 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10983 if (error == ENOBUFS) {
10984 stcb->asoc.ifp_had_enobuf = 1;
10985 SCTP_STAT_INCR(sctps_lowlevelerr);
10988 stcb->asoc.ifp_had_enobuf = 0;
10990 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10994 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10995 struct sctp_nets *net,
10998 /* formulate and SEND a SHUTDOWN-COMPLETE */
10999 struct mbuf *m_shutdown_comp;
11000 struct sctp_shutdown_complete_chunk *shutdown_complete;
11005 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11006 if (m_shutdown_comp == NULL) {
11010 if (reflect_vtag) {
11011 flags = SCTP_HAD_NO_TCB;
11012 vtag = stcb->asoc.my_vtag;
11015 vtag = stcb->asoc.peer_vtag;
11017 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11018 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11019 shutdown_complete->ch.chunk_flags = flags;
11020 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11021 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11022 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11023 (struct sockaddr *)&net->ro._l_addr,
11024 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11025 stcb->sctp_ep->sctp_lport, stcb->rport,
11029 SCTP_SO_NOT_LOCKED))) {
11030 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11031 if (error == ENOBUFS) {
11032 stcb->asoc.ifp_had_enobuf = 1;
11033 SCTP_STAT_INCR(sctps_lowlevelerr);
11036 stcb->asoc.ifp_had_enobuf = 0;
11038 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11043 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11044 struct sctphdr *sh, uint32_t vtag,
11045 uint8_t type, struct mbuf *cause,
11046 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11047 uint32_t vrf_id, uint16_t port)
11049 struct mbuf *o_pak;
11051 struct sctphdr *shout;
11052 struct sctp_chunkhdr *ch;
11053 #if defined(INET) || defined(INET6)
11054 struct udphdr *udp;
11056 int ret, len, cause_len, padding_len;
11058 struct sockaddr_in *src_sin, *dst_sin;
11062 struct sockaddr_in6 *src_sin6, *dst_sin6;
11063 struct ip6_hdr *ip6;
11066 /* Compute the length of the cause and add final padding. */
11068 if (cause != NULL) {
11069 struct mbuf *m_at, *m_last = NULL;
11071 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11072 if (SCTP_BUF_NEXT(m_at) == NULL)
11074 cause_len += SCTP_BUF_LEN(m_at);
11076 padding_len = cause_len % 4;
11077 if (padding_len != 0) {
11078 padding_len = 4 - padding_len;
11080 if (padding_len != 0) {
11081 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11082 sctp_m_freem(cause);
11089 /* Get an mbuf for the header. */
11090 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11091 switch (dst->sa_family) {
11094 len += sizeof(struct ip);
11099 len += sizeof(struct ip6_hdr);
11105 #if defined(INET) || defined(INET6)
11107 len += sizeof(struct udphdr);
11110 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11111 if (mout == NULL) {
11113 sctp_m_freem(cause);
11117 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11118 SCTP_BUF_LEN(mout) = len;
11119 SCTP_BUF_NEXT(mout) = cause;
11120 M_SETFIB(mout, fibnum);
11121 mout->m_pkthdr.flowid = mflowid;
11122 M_HASHTYPE_SET(mout, mflowtype);
11129 switch (dst->sa_family) {
11132 src_sin = (struct sockaddr_in *)src;
11133 dst_sin = (struct sockaddr_in *)dst;
11134 ip = mtod(mout, struct ip *);
11135 ip->ip_v = IPVERSION;
11136 ip->ip_hl = (sizeof(struct ip) >> 2);
11138 ip->ip_off = htons(IP_DF);
11140 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11142 ip->ip_p = IPPROTO_UDP;
11144 ip->ip_p = IPPROTO_SCTP;
11146 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11147 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11149 len = sizeof(struct ip);
11150 shout = (struct sctphdr *)((caddr_t)ip + len);
11155 src_sin6 = (struct sockaddr_in6 *)src;
11156 dst_sin6 = (struct sockaddr_in6 *)dst;
11157 ip6 = mtod(mout, struct ip6_hdr *);
11158 ip6->ip6_flow = htonl(0x60000000);
11159 if (V_ip6_auto_flowlabel) {
11160 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11162 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11164 ip6->ip6_nxt = IPPROTO_UDP;
11166 ip6->ip6_nxt = IPPROTO_SCTP;
11168 ip6->ip6_src = dst_sin6->sin6_addr;
11169 ip6->ip6_dst = src_sin6->sin6_addr;
11170 len = sizeof(struct ip6_hdr);
11171 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11176 shout = mtod(mout, struct sctphdr *);
11179 #if defined(INET) || defined(INET6)
11181 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11182 sctp_m_freem(mout);
11185 udp = (struct udphdr *)shout;
11186 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11187 udp->uh_dport = port;
11189 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11190 sizeof(struct sctphdr) +
11191 sizeof(struct sctp_chunkhdr) +
11192 cause_len + padding_len));
11193 len += sizeof(struct udphdr);
11194 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11199 shout->src_port = sh->dest_port;
11200 shout->dest_port = sh->src_port;
11201 shout->checksum = 0;
11203 shout->v_tag = htonl(vtag);
11205 shout->v_tag = sh->v_tag;
11207 len += sizeof(struct sctphdr);
11208 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11209 ch->chunk_type = type;
11211 ch->chunk_flags = 0;
11213 ch->chunk_flags = SCTP_HAD_NO_TCB;
11215 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11216 len += sizeof(struct sctp_chunkhdr);
11217 len += cause_len + padding_len;
11219 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11220 sctp_m_freem(mout);
11223 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11224 switch (dst->sa_family) {
11229 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11234 ip->ip_len = htons(len);
11236 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11237 SCTP_STAT_INCR(sctps_sendswcrc);
11239 SCTP_ENABLE_UDP_CSUM(o_pak);
11242 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11243 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11244 SCTP_STAT_INCR(sctps_sendhwcrc);
11246 #ifdef SCTP_PACKET_LOGGING
11247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11248 sctp_packet_log(o_pak);
11251 SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11252 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11257 ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11259 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11260 SCTP_STAT_INCR(sctps_sendswcrc);
11261 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11262 udp->uh_sum = 0xffff;
11265 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11266 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11267 SCTP_STAT_INCR(sctps_sendhwcrc);
11269 #ifdef SCTP_PACKET_LOGGING
11270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11271 sctp_packet_log(o_pak);
11274 SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11275 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11279 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11281 sctp_m_freem(mout);
11282 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11285 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
11287 UDPSTAT_INC(udps_opackets);
11289 SCTP_STAT_INCR(sctps_sendpackets);
11290 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11291 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11293 SCTP_STAT_INCR(sctps_senderrors);
11299 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11300 struct sctphdr *sh,
11301 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11302 uint32_t vrf_id, uint16_t port)
11304 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11305 mflowtype, mflowid, fibnum,
11310 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11311 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11316 struct sctp_tmit_chunk *chk;
11317 struct sctp_heartbeat_chunk *hb;
11318 struct timeval now;
11320 SCTP_TCB_LOCK_ASSERT(stcb);
11324 (void)SCTP_GETTIME_TIMEVAL(&now);
11325 switch (net->ro._l_addr.sa.sa_family) {
11337 sctp_alloc_a_chunk(stcb, chk);
11339 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11343 chk->copy_by_ref = 0;
11344 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11345 chk->rec.chunk_id.can_take_data = 1;
11347 chk->asoc = &stcb->asoc;
11348 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11350 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11351 if (chk->data == NULL) {
11352 sctp_free_a_chunk(stcb, chk, so_locked);
11355 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11356 SCTP_BUF_LEN(chk->data) = chk->send_size;
11357 chk->sent = SCTP_DATAGRAM_UNSENT;
11358 chk->snd_count = 0;
11360 atomic_add_int(&chk->whoTo->ref_count, 1);
11361 /* Now we have a mbuf that we can fill in with the details */
11362 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11363 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11364 /* fill out chunk header */
11365 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11366 hb->ch.chunk_flags = 0;
11367 hb->ch.chunk_length = htons(chk->send_size);
11368 /* Fill out hb parameter */
11369 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11370 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11371 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11372 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11373 /* Did our user request this one, put it in */
11374 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
11375 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11376 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11378 * we only take from the entropy pool if the address is not
11381 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11382 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11384 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11385 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11387 switch (net->ro._l_addr.sa.sa_family) {
11390 memcpy(hb->heartbeat.hb_info.address,
11391 &net->ro._l_addr.sin.sin_addr,
11392 sizeof(net->ro._l_addr.sin.sin_addr));
11397 memcpy(hb->heartbeat.hb_info.address,
11398 &net->ro._l_addr.sin6.sin6_addr,
11399 sizeof(net->ro._l_addr.sin6.sin6_addr));
11404 sctp_m_freem(chk->data);
11407 sctp_free_a_chunk(stcb, chk, so_locked);
11411 net->hb_responded = 0;
11412 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11413 stcb->asoc.ctrl_queue_cnt++;
11414 SCTP_STAT_INCR(sctps_sendheartbeat);
11419 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11422 struct sctp_association *asoc;
11423 struct sctp_ecne_chunk *ecne;
11424 struct sctp_tmit_chunk *chk;
11429 asoc = &stcb->asoc;
11430 SCTP_TCB_LOCK_ASSERT(stcb);
11431 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11432 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11433 /* found a previous ECN_ECHO update it if needed */
11434 uint32_t cnt, ctsn;
11436 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11437 ctsn = ntohl(ecne->tsn);
11438 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11439 ecne->tsn = htonl(high_tsn);
11440 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11442 cnt = ntohl(ecne->num_pkts_since_cwr);
11444 ecne->num_pkts_since_cwr = htonl(cnt);
11448 /* nope could not find one to update so we must build one */
11449 sctp_alloc_a_chunk(stcb, chk);
11453 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11454 chk->copy_by_ref = 0;
11455 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11456 chk->rec.chunk_id.can_take_data = 0;
11458 chk->asoc = &stcb->asoc;
11459 chk->send_size = sizeof(struct sctp_ecne_chunk);
11460 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11461 if (chk->data == NULL) {
11462 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11465 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11466 SCTP_BUF_LEN(chk->data) = chk->send_size;
11467 chk->sent = SCTP_DATAGRAM_UNSENT;
11468 chk->snd_count = 0;
11470 atomic_add_int(&chk->whoTo->ref_count, 1);
11472 stcb->asoc.ecn_echo_cnt_onq++;
11473 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11474 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11475 ecne->ch.chunk_flags = 0;
11476 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11477 ecne->tsn = htonl(high_tsn);
11478 ecne->num_pkts_since_cwr = htonl(1);
11479 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11480 asoc->ctrl_queue_cnt++;
11484 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11485 struct mbuf *m, int len, int iphlen, int bad_crc)
11487 struct sctp_association *asoc;
11488 struct sctp_pktdrop_chunk *drp;
11489 struct sctp_tmit_chunk *chk;
11495 struct sctp_chunkhdr *ch, chunk_buf;
11496 unsigned int chk_length;
11501 asoc = &stcb->asoc;
11502 SCTP_TCB_LOCK_ASSERT(stcb);
11503 if (asoc->pktdrop_supported == 0) {
11505 * peer must declare support before I send one.
11509 if (stcb->sctp_socket == NULL) {
11512 sctp_alloc_a_chunk(stcb, chk);
11516 chk->copy_by_ref = 0;
11517 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11518 chk->rec.chunk_id.can_take_data = 1;
11521 chk->send_size = len;
11522 /* Validate that we do not have an ABORT in here. */
11523 offset = iphlen + sizeof(struct sctphdr);
11524 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11525 sizeof(*ch), (uint8_t *)&chunk_buf);
11526 while (ch != NULL) {
11527 chk_length = ntohs(ch->chunk_length);
11528 if (chk_length < sizeof(*ch)) {
11529 /* break to abort land */
11532 switch (ch->chunk_type) {
11533 case SCTP_PACKET_DROPPED:
11534 case SCTP_ABORT_ASSOCIATION:
11535 case SCTP_INITIATION_ACK:
11537 * We don't respond with an PKT-DROP to an ABORT
11538 * or PKT-DROP. We also do not respond to an
11539 * INIT-ACK, because we can't know if the initiation
11540 * tag is correct or not.
11542 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11547 offset += SCTP_SIZE32(chk_length);
11548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11549 sizeof(*ch), (uint8_t *)&chunk_buf);
11552 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11553 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11555 * only send 1 mtu worth, trim off the excess on the end.
11558 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11561 chk->asoc = &stcb->asoc;
11562 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11563 if (chk->data == NULL) {
11565 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11568 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11569 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11571 sctp_m_freem(chk->data);
11575 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11576 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11577 chk->book_size_scale = 0;
11579 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11580 drp->trunc_len = htons(fullsz);
11582 * Len is already adjusted to size minus overhead above take
11583 * out the pkt_drop chunk itself from it.
11585 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
11586 len = chk->send_size;
11588 /* no truncation needed */
11589 drp->ch.chunk_flags = 0;
11590 drp->trunc_len = htons(0);
11593 drp->ch.chunk_flags |= SCTP_BADCRC;
11595 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11596 SCTP_BUF_LEN(chk->data) = chk->send_size;
11597 chk->sent = SCTP_DATAGRAM_UNSENT;
11598 chk->snd_count = 0;
11600 /* we should hit here */
11602 atomic_add_int(&chk->whoTo->ref_count, 1);
11606 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11607 drp->ch.chunk_length = htons(chk->send_size);
11608 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11612 drp->bottle_bw = htonl(spc);
11613 if (asoc->my_rwnd) {
11614 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11615 asoc->size_on_all_streams +
11616 asoc->my_rwnd_control_len +
11617 stcb->sctp_socket->so_rcv.sb_cc);
11620 * If my rwnd is 0, possibly from mbuf depletion as well as
11621 * space used, tell the peer there is NO space aka onq == bw
11623 drp->current_onq = htonl(spc);
11627 m_copydata(m, iphlen, len, (caddr_t)datap);
11628 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11629 asoc->ctrl_queue_cnt++;
11633 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11635 struct sctp_association *asoc;
11636 struct sctp_cwr_chunk *cwr;
11637 struct sctp_tmit_chunk *chk;
11639 SCTP_TCB_LOCK_ASSERT(stcb);
11643 asoc = &stcb->asoc;
11644 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11645 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11647 * found a previous CWR queued to same destination
11648 * update it if needed
11652 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11653 ctsn = ntohl(cwr->tsn);
11654 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11655 cwr->tsn = htonl(high_tsn);
11657 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11658 /* Make sure override is carried */
11659 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11664 sctp_alloc_a_chunk(stcb, chk);
11668 chk->copy_by_ref = 0;
11669 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11670 chk->rec.chunk_id.can_take_data = 1;
11672 chk->asoc = &stcb->asoc;
11673 chk->send_size = sizeof(struct sctp_cwr_chunk);
11674 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11675 if (chk->data == NULL) {
11676 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11679 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11680 SCTP_BUF_LEN(chk->data) = chk->send_size;
11681 chk->sent = SCTP_DATAGRAM_UNSENT;
11682 chk->snd_count = 0;
11684 atomic_add_int(&chk->whoTo->ref_count, 1);
11685 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11686 cwr->ch.chunk_type = SCTP_ECN_CWR;
11687 cwr->ch.chunk_flags = override;
11688 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11689 cwr->tsn = htonl(high_tsn);
11690 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11691 asoc->ctrl_queue_cnt++;
11695 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
11696 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11698 uint16_t len, old_len, i;
11699 struct sctp_stream_reset_out_request *req_out;
11700 struct sctp_chunkhdr *ch;
11702 int number_entries = 0;
11704 ch = mtod(chk->data, struct sctp_chunkhdr *);
11705 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11706 /* get to new offset for the param. */
11707 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11708 /* now how long will this param be? */
11709 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11710 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11711 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11712 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11716 if (number_entries == 0) {
11719 if (number_entries == stcb->asoc.streamoutcnt) {
11720 number_entries = 0;
11722 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11723 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11725 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11726 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11727 req_out->ph.param_length = htons(len);
11728 req_out->request_seq = htonl(seq);
11729 req_out->response_seq = htonl(resp_seq);
11730 req_out->send_reset_at_tsn = htonl(last_sent);
11732 if (number_entries) {
11733 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11734 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11735 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11736 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11737 req_out->list_of_streams[at] = htons(i);
11739 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11740 if (at >= number_entries) {
11746 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11747 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11750 if (SCTP_SIZE32(len) > len) {
11752 * Need to worry about the pad we may end up adding to the
11753 * end. This is easy since the struct is either aligned to 4
11754 * bytes or 2 bytes off.
11756 req_out->list_of_streams[number_entries] = 0;
11758 /* now fix the chunk length */
11759 ch->chunk_length = htons(len + old_len);
11760 chk->book_size = len + old_len;
11761 chk->book_size_scale = 0;
11762 chk->send_size = SCTP_SIZE32(chk->book_size);
11763 SCTP_BUF_LEN(chk->data) = chk->send_size;
11768 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11769 int number_entries, uint16_t *list,
11772 uint16_t len, old_len, i;
11773 struct sctp_stream_reset_in_request *req_in;
11774 struct sctp_chunkhdr *ch;
11776 ch = mtod(chk->data, struct sctp_chunkhdr *);
11777 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11779 /* get to new offset for the param. */
11780 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11781 /* now how long will this param be? */
11782 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11783 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11784 req_in->ph.param_length = htons(len);
11785 req_in->request_seq = htonl(seq);
11786 if (number_entries) {
11787 for (i = 0; i < number_entries; i++) {
11788 req_in->list_of_streams[i] = htons(list[i]);
11791 if (SCTP_SIZE32(len) > len) {
11793 * Need to worry about the pad we may end up adding to the
11794 * end. This is easy since the struct is either aligned to 4
11795 * bytes or 2 bytes off.
11797 req_in->list_of_streams[number_entries] = 0;
11799 /* now fix the chunk length */
11800 ch->chunk_length = htons(len + old_len);
11801 chk->book_size = len + old_len;
11802 chk->book_size_scale = 0;
11803 chk->send_size = SCTP_SIZE32(chk->book_size);
11804 SCTP_BUF_LEN(chk->data) = chk->send_size;
11809 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11812 uint16_t len, old_len;
11813 struct sctp_stream_reset_tsn_request *req_tsn;
11814 struct sctp_chunkhdr *ch;
11816 ch = mtod(chk->data, struct sctp_chunkhdr *);
11817 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11819 /* get to new offset for the param. */
11820 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11821 /* now how long will this param be? */
11822 len = sizeof(struct sctp_stream_reset_tsn_request);
11823 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11824 req_tsn->ph.param_length = htons(len);
11825 req_tsn->request_seq = htonl(seq);
11827 /* now fix the chunk length */
11828 ch->chunk_length = htons(len + old_len);
11829 chk->send_size = len + old_len;
11830 chk->book_size = SCTP_SIZE32(chk->send_size);
11831 chk->book_size_scale = 0;
11832 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11837 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11838 uint32_t resp_seq, uint32_t result)
11840 uint16_t len, old_len;
11841 struct sctp_stream_reset_response *resp;
11842 struct sctp_chunkhdr *ch;
11844 ch = mtod(chk->data, struct sctp_chunkhdr *);
11845 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11847 /* get to new offset for the param. */
11848 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11849 /* now how long will this param be? */
11850 len = sizeof(struct sctp_stream_reset_response);
11851 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11852 resp->ph.param_length = htons(len);
11853 resp->response_seq = htonl(resp_seq);
11854 resp->result = ntohl(result);
11856 /* now fix the chunk length */
11857 ch->chunk_length = htons(len + old_len);
11858 chk->book_size = len + old_len;
11859 chk->book_size_scale = 0;
11860 chk->send_size = SCTP_SIZE32(chk->book_size);
11861 SCTP_BUF_LEN(chk->data) = chk->send_size;
11866 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
11867 struct sctp_stream_reset_list *ent,
11870 struct sctp_association *asoc;
11871 struct sctp_tmit_chunk *chk;
11872 struct sctp_chunkhdr *ch;
11874 asoc = &stcb->asoc;
11877 * Reset our last reset action to the new one IP -> response
11878 * (PERFORMED probably). This assures that if we fail to send, a
11879 * retran from the peer will get the new response.
11881 asoc->last_reset_action[0] = response;
11882 if (asoc->stream_reset_outstanding) {
11885 sctp_alloc_a_chunk(stcb, chk);
11887 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11890 chk->copy_by_ref = 0;
11891 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11892 chk->rec.chunk_id.can_take_data = 0;
11894 chk->asoc = &stcb->asoc;
11895 chk->book_size = sizeof(struct sctp_chunkhdr);
11896 chk->send_size = SCTP_SIZE32(chk->book_size);
11897 chk->book_size_scale = 0;
11898 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11899 if (chk->data == NULL) {
11900 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11901 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11904 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11905 /* setup chunk parameters */
11906 chk->sent = SCTP_DATAGRAM_UNSENT;
11907 chk->snd_count = 0;
11908 if (stcb->asoc.alternate) {
11909 chk->whoTo = stcb->asoc.alternate;
11911 chk->whoTo = stcb->asoc.primary_destination;
11913 ch = mtod(chk->data, struct sctp_chunkhdr *);
11914 ch->chunk_type = SCTP_STREAM_RESET;
11915 ch->chunk_flags = 0;
11916 ch->chunk_length = htons(chk->book_size);
11917 atomic_add_int(&chk->whoTo->ref_count, 1);
11918 SCTP_BUF_LEN(chk->data) = chk->send_size;
11919 sctp_add_stream_reset_result(chk, ent->seq, response);
11920 /* insert the chunk for sending */
11921 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11924 asoc->ctrl_queue_cnt++;
11928 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11929 uint32_t resp_seq, uint32_t result,
11930 uint32_t send_una, uint32_t recv_next)
11932 uint16_t len, old_len;
11933 struct sctp_stream_reset_response_tsn *resp;
11934 struct sctp_chunkhdr *ch;
11936 ch = mtod(chk->data, struct sctp_chunkhdr *);
11937 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11939 /* get to new offset for the param. */
11940 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11941 /* now how long will this param be? */
11942 len = sizeof(struct sctp_stream_reset_response_tsn);
11943 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11944 resp->ph.param_length = htons(len);
11945 resp->response_seq = htonl(resp_seq);
11946 resp->result = htonl(result);
11947 resp->senders_next_tsn = htonl(send_una);
11948 resp->receivers_next_tsn = htonl(recv_next);
11950 /* now fix the chunk length */
11951 ch->chunk_length = htons(len + old_len);
11952 chk->book_size = len + old_len;
11953 chk->send_size = SCTP_SIZE32(chk->book_size);
11954 chk->book_size_scale = 0;
11955 SCTP_BUF_LEN(chk->data) = chk->send_size;
11960 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11964 uint16_t len, old_len;
11965 struct sctp_chunkhdr *ch;
11966 struct sctp_stream_reset_add_strm *addstr;
11968 ch = mtod(chk->data, struct sctp_chunkhdr *);
11969 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11971 /* get to new offset for the param. */
11972 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11973 /* now how long will this param be? */
11974 len = sizeof(struct sctp_stream_reset_add_strm);
11977 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11978 addstr->ph.param_length = htons(len);
11979 addstr->request_seq = htonl(seq);
11980 addstr->number_of_streams = htons(adding);
11981 addstr->reserved = 0;
11983 /* now fix the chunk length */
11984 ch->chunk_length = htons(len + old_len);
11985 chk->send_size = len + old_len;
11986 chk->book_size = SCTP_SIZE32(chk->send_size);
11987 chk->book_size_scale = 0;
11988 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11993 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11997 uint16_t len, old_len;
11998 struct sctp_chunkhdr *ch;
11999 struct sctp_stream_reset_add_strm *addstr;
12001 ch = mtod(chk->data, struct sctp_chunkhdr *);
12002 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12004 /* get to new offset for the param. */
12005 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12006 /* now how long will this param be? */
12007 len = sizeof(struct sctp_stream_reset_add_strm);
12009 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12010 addstr->ph.param_length = htons(len);
12011 addstr->request_seq = htonl(seq);
12012 addstr->number_of_streams = htons(adding);
12013 addstr->reserved = 0;
12015 /* now fix the chunk length */
12016 ch->chunk_length = htons(len + old_len);
12017 chk->send_size = len + old_len;
12018 chk->book_size = SCTP_SIZE32(chk->send_size);
12019 chk->book_size_scale = 0;
12020 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12025 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12027 struct sctp_association *asoc;
12028 struct sctp_tmit_chunk *chk;
12029 struct sctp_chunkhdr *ch;
12032 asoc = &stcb->asoc;
12033 asoc->trigger_reset = 0;
12034 if (asoc->stream_reset_outstanding) {
12037 sctp_alloc_a_chunk(stcb, chk);
12039 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12042 chk->copy_by_ref = 0;
12043 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12044 chk->rec.chunk_id.can_take_data = 0;
12046 chk->asoc = &stcb->asoc;
12047 chk->book_size = sizeof(struct sctp_chunkhdr);
12048 chk->send_size = SCTP_SIZE32(chk->book_size);
12049 chk->book_size_scale = 0;
12050 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12051 if (chk->data == NULL) {
12052 sctp_free_a_chunk(stcb, chk, so_locked);
12053 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12056 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12058 /* setup chunk parameters */
12059 chk->sent = SCTP_DATAGRAM_UNSENT;
12060 chk->snd_count = 0;
12061 if (stcb->asoc.alternate) {
12062 chk->whoTo = stcb->asoc.alternate;
12064 chk->whoTo = stcb->asoc.primary_destination;
12066 ch = mtod(chk->data, struct sctp_chunkhdr *);
12067 ch->chunk_type = SCTP_STREAM_RESET;
12068 ch->chunk_flags = 0;
12069 ch->chunk_length = htons(chk->book_size);
12070 atomic_add_int(&chk->whoTo->ref_count, 1);
12071 SCTP_BUF_LEN(chk->data) = chk->send_size;
12072 seq = stcb->asoc.str_reset_seq_out;
12073 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12075 asoc->stream_reset_outstanding++;
12077 m_freem(chk->data);
12079 sctp_free_a_chunk(stcb, chk, so_locked);
12082 asoc->str_reset = chk;
12083 /* insert the chunk for sending */
12084 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12087 asoc->ctrl_queue_cnt++;
12089 if (stcb->asoc.send_sack) {
12090 sctp_send_sack(stcb, so_locked);
12092 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12097 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12098 uint16_t number_entries, uint16_t *list,
12099 uint8_t send_in_req,
12100 uint8_t send_tsn_req,
12101 uint8_t add_stream,
12103 uint16_t adding_i, uint8_t peer_asked)
12105 struct sctp_association *asoc;
12106 struct sctp_tmit_chunk *chk;
12107 struct sctp_chunkhdr *ch;
12108 int can_send_out_req = 0;
12111 asoc = &stcb->asoc;
12112 if (asoc->stream_reset_outstanding) {
12114 * Already one pending, must get ACK back to clear the flag.
12116 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12119 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12120 (add_stream == 0)) {
12121 /* nothing to do */
12122 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12125 if (send_tsn_req && send_in_req) {
12126 /* error, can't do that */
12127 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12129 } else if (send_in_req) {
12130 can_send_out_req = 1;
12132 if (number_entries > (MCLBYTES -
12133 SCTP_MIN_OVERHEAD -
12134 sizeof(struct sctp_chunkhdr) -
12135 sizeof(struct sctp_stream_reset_out_request)) /
12136 sizeof(uint16_t)) {
12137 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12140 sctp_alloc_a_chunk(stcb, chk);
12142 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12145 chk->copy_by_ref = 0;
12146 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12147 chk->rec.chunk_id.can_take_data = 0;
12149 chk->asoc = &stcb->asoc;
12150 chk->book_size = sizeof(struct sctp_chunkhdr);
12151 chk->send_size = SCTP_SIZE32(chk->book_size);
12152 chk->book_size_scale = 0;
12153 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12154 if (chk->data == NULL) {
12155 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12156 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12159 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12161 /* setup chunk parameters */
12162 chk->sent = SCTP_DATAGRAM_UNSENT;
12163 chk->snd_count = 0;
12164 if (stcb->asoc.alternate) {
12165 chk->whoTo = stcb->asoc.alternate;
12167 chk->whoTo = stcb->asoc.primary_destination;
12169 atomic_add_int(&chk->whoTo->ref_count, 1);
12170 ch = mtod(chk->data, struct sctp_chunkhdr *);
12171 ch->chunk_type = SCTP_STREAM_RESET;
12172 ch->chunk_flags = 0;
12173 ch->chunk_length = htons(chk->book_size);
12174 SCTP_BUF_LEN(chk->data) = chk->send_size;
12176 seq = stcb->asoc.str_reset_seq_out;
12177 if (can_send_out_req) {
12180 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12183 asoc->stream_reset_outstanding++;
12186 if ((add_stream & 1) &&
12187 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12188 /* Need to allocate more */
12189 struct sctp_stream_out *oldstream;
12190 struct sctp_stream_queue_pending *sp, *nsp;
12192 #if defined(SCTP_DETAILED_STR_STATS)
12196 oldstream = stcb->asoc.strmout;
12197 /* get some more */
12198 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12199 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12201 if (stcb->asoc.strmout == NULL) {
12204 stcb->asoc.strmout = oldstream;
12205 /* Turn off the bit */
12206 x = add_stream & 0xfe;
12211 * Ok now we proceed with copying the old out stuff and
12212 * initializing the new stuff.
12214 SCTP_TCB_SEND_LOCK(stcb);
12215 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12216 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12217 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12218 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12219 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12220 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12221 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12222 stcb->asoc.strmout[i].sid = i;
12223 stcb->asoc.strmout[i].state = oldstream[i].state;
12224 /* FIX ME FIX ME */
12226 * This should be a SS_COPY operation FIX ME STREAM
12229 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12230 /* now anything on those queues? */
12231 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12232 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12233 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12237 /* now the new streams */
12238 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12239 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12240 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12241 stcb->asoc.strmout[i].chunks_on_queues = 0;
12242 #if defined(SCTP_DETAILED_STR_STATS)
12243 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12244 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12245 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12248 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12249 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12251 stcb->asoc.strmout[i].next_mid_ordered = 0;
12252 stcb->asoc.strmout[i].next_mid_unordered = 0;
12253 stcb->asoc.strmout[i].sid = i;
12254 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12255 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12256 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12258 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12259 SCTP_FREE(oldstream, SCTP_M_STRMO);
12260 SCTP_TCB_SEND_UNLOCK(stcb);
12263 if ((add_stream & 1) && (adding_o > 0)) {
12264 asoc->strm_pending_add_size = adding_o;
12265 asoc->peer_req_out = peer_asked;
12266 sctp_add_an_out_stream(chk, seq, adding_o);
12268 asoc->stream_reset_outstanding++;
12270 if ((add_stream & 2) && (adding_i > 0)) {
12271 sctp_add_an_in_stream(chk, seq, adding_i);
12273 asoc->stream_reset_outstanding++;
12276 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12278 asoc->stream_reset_outstanding++;
12280 if (send_tsn_req) {
12281 sctp_add_stream_reset_tsn(chk, seq);
12282 asoc->stream_reset_outstanding++;
12284 asoc->str_reset = chk;
12285 /* insert the chunk for sending */
12286 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12289 asoc->ctrl_queue_cnt++;
12290 if (stcb->asoc.send_sack) {
12291 sctp_send_sack(stcb, SCTP_SO_LOCKED);
12293 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12298 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12299 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12300 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12301 uint32_t vrf_id, uint16_t port)
12303 /* Don't respond to an ABORT with an ABORT. */
12304 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12306 sctp_m_freem(cause);
12309 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12310 mflowtype, mflowid, fibnum,
12316 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12317 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12318 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12319 uint32_t vrf_id, uint16_t port)
12321 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12322 mflowtype, mflowid, fibnum,
12327 static struct mbuf *
12328 sctp_copy_resume(struct uio *uio,
12330 int user_marks_eor,
12333 struct mbuf **new_tail)
12337 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12338 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12340 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12343 *sndout = m_length(m, NULL);
12344 *new_tail = m_last(m);
12350 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12354 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12356 if (sp->data == NULL) {
12357 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12361 sp->tail_mbuf = m_last(sp->data);
12367 static struct sctp_stream_queue_pending *
12368 sctp_copy_it_in(struct sctp_tcb *stcb,
12369 struct sctp_association *asoc,
12370 struct sctp_sndrcvinfo *srcv,
12372 struct sctp_nets *net,
12374 int user_marks_eor,
12379 * This routine must be very careful in its work. Protocol
12380 * processing is up and running so care must be taken to spl...()
12381 * when you need to do something that may effect the stcb/asoc. The
12382 * sb is locked however. When data is copied the protocol processing
12383 * should be enabled since this is a slower operation...
12385 struct sctp_stream_queue_pending *sp = NULL;
12389 /* Now can we send this? */
12390 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
12391 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12392 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12393 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12394 /* got data while shutting down */
12395 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12396 *error = ECONNRESET;
12399 sctp_alloc_a_strmoq(stcb, sp);
12401 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12406 sp->sender_all_done = 0;
12407 sp->sinfo_flags = srcv->sinfo_flags;
12408 sp->timetolive = srcv->sinfo_timetolive;
12409 sp->ppid = srcv->sinfo_ppid;
12410 sp->context = srcv->sinfo_context;
12412 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12414 sp->sid = srcv->sinfo_stream;
12415 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
12416 if ((sp->length == (uint32_t)uio->uio_resid) &&
12417 ((user_marks_eor == 0) ||
12418 (srcv->sinfo_flags & SCTP_EOF) ||
12419 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12420 sp->msg_is_complete = 1;
12422 sp->msg_is_complete = 0;
12424 sp->sender_all_done = 0;
12425 sp->some_taken = 0;
12426 sp->put_last_out = 0;
12427 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
12428 sp->data = sp->tail_mbuf = NULL;
12429 if (sp->length == 0) {
12432 if (srcv->sinfo_keynumber_valid) {
12433 sp->auth_keyid = srcv->sinfo_keynumber;
12435 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12437 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12438 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12439 sp->holds_key_ref = 1;
12441 *error = sctp_copy_one(sp, uio, resv_in_first);
12444 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12447 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12449 atomic_add_int(&sp->net->ref_count, 1);
12453 sctp_set_prsctp_policy(sp);
12461 sctp_sosend(struct socket *so,
12462 struct sockaddr *addr,
12465 struct mbuf *control,
12470 int error, use_sndinfo = 0;
12471 struct sctp_sndrcvinfo sndrcvninfo;
12472 struct sockaddr *addr_to_use;
12473 #if defined(INET) && defined(INET6)
12474 struct sockaddr_in sin;
12478 /* process cmsg snd/rcv info (maybe a assoc-id) */
12479 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12480 sizeof(sndrcvninfo))) {
12485 addr_to_use = addr;
12486 #if defined(INET) && defined(INET6)
12487 if ((addr) && (addr->sa_family == AF_INET6)) {
12488 struct sockaddr_in6 *sin6;
12490 sin6 = (struct sockaddr_in6 *)addr;
12491 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12492 in6_sin6_2_sin(&sin, sin6);
12493 addr_to_use = (struct sockaddr *)&sin;
12497 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12500 use_sndinfo ? &sndrcvninfo : NULL
12508 sctp_lower_sosend(struct socket *so,
12509 struct sockaddr *addr,
12511 struct mbuf *i_pak,
12512 struct mbuf *control,
12514 struct sctp_sndrcvinfo *srcv
12519 unsigned int sndlen = 0, max_len;
12521 struct mbuf *top = NULL;
12522 int queue_only = 0, queue_only_for_init = 0;
12523 int free_cnt_applied = 0;
12525 int now_filled = 0;
12526 unsigned int inqueue_bytes = 0;
12527 struct sctp_block_entry be;
12528 struct sctp_inpcb *inp;
12529 struct sctp_tcb *stcb = NULL;
12530 struct timeval now;
12531 struct sctp_nets *net;
12532 struct sctp_association *asoc;
12533 struct sctp_inpcb *t_inp;
12534 int user_marks_eor;
12535 int create_lock_applied = 0;
12536 int nagle_applies = 0;
12537 int some_on_control = 0;
12538 int got_all_of_the_send = 0;
12539 int hold_tcblock = 0;
12540 int non_blocking = 0;
12541 uint32_t local_add_more, local_soresv = 0;
12543 uint16_t sinfo_flags;
12544 sctp_assoc_t sinfo_assoc_id;
12551 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12553 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12556 SCTP_RELEASE_PKT(i_pak);
12560 if ((uio == NULL) && (i_pak == NULL)) {
12561 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12564 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12565 atomic_add_int(&inp->total_sends, 1);
12567 if (uio->uio_resid < 0) {
12568 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12571 sndlen = (unsigned int)uio->uio_resid;
12573 top = SCTP_HEADER_TO_CHAIN(i_pak);
12574 sndlen = SCTP_HEADER_LEN(i_pak);
12576 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12579 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12580 SCTP_IS_LISTENING(inp)) {
12581 /* The listener can NOT send */
12582 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12587 * Pre-screen address, if one is given the sin-len
12588 * must be set correctly!
12591 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12593 switch (raddr->sa.sa_family) {
12596 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12597 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12601 port = raddr->sin.sin_port;
12606 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12607 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12611 port = raddr->sin6.sin6_port;
12615 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12616 error = EAFNOSUPPORT;
12623 sinfo_flags = srcv->sinfo_flags;
12624 sinfo_assoc_id = srcv->sinfo_assoc_id;
12625 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12626 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12627 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12631 if (srcv->sinfo_flags)
12632 SCTP_STAT_INCR(sctps_sends_with_flags);
12634 sinfo_flags = inp->def_send.sinfo_flags;
12635 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12637 if (sinfo_flags & SCTP_SENDALL) {
12638 /* its a sendall */
12639 error = sctp_sendall(inp, uio, top, srcv);
12643 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12644 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12648 /* now we must find the assoc */
12649 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12650 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12651 SCTP_INP_RLOCK(inp);
12652 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12654 SCTP_TCB_LOCK(stcb);
12657 SCTP_INP_RUNLOCK(inp);
12658 } else if (sinfo_assoc_id) {
12659 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
12660 if (stcb != NULL) {
12665 * Since we did not use findep we must
12666 * increment it, and if we don't find a tcb
12669 SCTP_INP_WLOCK(inp);
12670 SCTP_INP_INCR_REF(inp);
12671 SCTP_INP_WUNLOCK(inp);
12672 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12673 if (stcb == NULL) {
12674 SCTP_INP_WLOCK(inp);
12675 SCTP_INP_DECR_REF(inp);
12676 SCTP_INP_WUNLOCK(inp);
12681 if ((stcb == NULL) && (addr)) {
12682 /* Possible implicit send? */
12683 SCTP_ASOC_CREATE_LOCK(inp);
12684 create_lock_applied = 1;
12685 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12686 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12687 /* Should I really unlock ? */
12688 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12693 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12694 (addr->sa_family == AF_INET6)) {
12695 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12699 SCTP_INP_WLOCK(inp);
12700 SCTP_INP_INCR_REF(inp);
12701 SCTP_INP_WUNLOCK(inp);
12702 /* With the lock applied look again */
12703 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12704 #if defined(INET) || defined(INET6)
12705 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12706 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12709 if (stcb == NULL) {
12710 SCTP_INP_WLOCK(inp);
12711 SCTP_INP_DECR_REF(inp);
12712 SCTP_INP_WUNLOCK(inp);
12719 if (t_inp != inp) {
12720 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12725 if (stcb == NULL) {
12726 if (addr == NULL) {
12727 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12731 /* We must go ahead and start the INIT process */
12734 if ((sinfo_flags & SCTP_ABORT) ||
12735 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12737 * User asks to abort a non-existant assoc,
12738 * or EOF a non-existant assoc with no data
12740 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12744 /* get an asoc/stcb struct */
12745 vrf_id = inp->def_vrf_id;
12747 if (create_lock_applied == 0) {
12748 panic("Error, should hold create lock and I don't?");
12751 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12752 inp->sctp_ep.pre_open_stream_count,
12755 if (stcb == NULL) {
12756 /* Error is setup for us in the call */
12759 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12760 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12762 * Set the connected flag so we can queue
12765 soisconnecting(so);
12768 if (create_lock_applied) {
12769 SCTP_ASOC_CREATE_UNLOCK(inp);
12770 create_lock_applied = 0;
12772 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12775 * Turn on queue only flag to prevent data from
12779 asoc = &stcb->asoc;
12780 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
12781 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12783 /* initialize authentication params for the assoc */
12784 sctp_initialize_auth_params(inp, stcb);
12787 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12788 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
12789 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
12795 /* out with the INIT */
12796 queue_only_for_init = 1;
12798 * we may want to dig in after this call and adjust the MTU
12799 * value. It defaulted to 1500 (constant) but the ro
12800 * structure may now have an update and thus we may need to
12801 * change it BEFORE we append the message.
12805 asoc = &stcb->asoc;
12807 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12808 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12810 net = sctp_findnet(stcb, addr);
12813 if ((net == NULL) ||
12814 ((port != 0) && (port != stcb->rport))) {
12815 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12820 if (stcb->asoc.alternate) {
12821 net = stcb->asoc.alternate;
12823 net = stcb->asoc.primary_destination;
12826 atomic_add_int(&stcb->total_sends, 1);
12827 /* Keep the stcb from being freed under our feet */
12828 atomic_add_int(&asoc->refcnt, 1);
12829 free_cnt_applied = 1;
12831 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12832 if (sndlen > asoc->smallest_mtu) {
12833 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12838 if (SCTP_SO_IS_NBIO(so)
12839 || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
12843 /* would we block? */
12844 if (non_blocking) {
12847 if (hold_tcblock == 0) {
12848 SCTP_TCB_LOCK(stcb);
12851 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
12852 if (user_marks_eor == 0) {
12857 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12858 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12859 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12860 if (sndlen > SCTP_SB_LIMIT_SND(so))
12863 error = EWOULDBLOCK;
12866 stcb->asoc.sb_send_resv += sndlen;
12867 SCTP_TCB_UNLOCK(stcb);
12870 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12872 local_soresv = sndlen;
12873 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12874 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12875 error = ECONNRESET;
12878 if (create_lock_applied) {
12879 SCTP_ASOC_CREATE_UNLOCK(inp);
12880 create_lock_applied = 0;
12882 /* Is the stream no. valid? */
12883 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12884 /* Invalid stream number */
12885 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12889 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12890 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12892 * Can't queue any data while stream reset is underway.
12894 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12899 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12902 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12903 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
12906 /* we are now done with all control */
12908 sctp_m_freem(control);
12911 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
12912 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12913 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12914 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12915 if (srcv->sinfo_flags & SCTP_ABORT) {
12918 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12919 error = ECONNRESET;
12923 /* Ok, we will attempt a msgsnd :> */
12925 p->td_ru.ru_msgsnd++;
12927 /* Are we aborting? */
12928 if (srcv->sinfo_flags & SCTP_ABORT) {
12930 int tot_demand, tot_out = 0, max_out;
12932 SCTP_STAT_INCR(sctps_sends_with_abort);
12933 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12934 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
12935 /* It has to be up before we abort */
12936 /* how big is the user initiated abort? */
12937 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12941 if (hold_tcblock) {
12942 SCTP_TCB_UNLOCK(stcb);
12946 struct mbuf *cntm = NULL;
12948 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12950 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12951 tot_out += SCTP_BUF_LEN(cntm);
12955 /* Must fit in a MTU */
12957 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12958 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12960 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12964 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
12967 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12971 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12972 max_out -= sizeof(struct sctp_abort_msg);
12973 if (tot_out > max_out) {
12977 struct sctp_paramhdr *ph;
12979 /* now move forward the data pointer */
12980 ph = mtod(mm, struct sctp_paramhdr *);
12981 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12982 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
12984 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12986 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12989 * Here if we can't get his data we
12990 * still abort we just don't get to
12991 * send the users note :-0
12998 SCTP_BUF_NEXT(mm) = top;
13002 if (hold_tcblock == 0) {
13003 SCTP_TCB_LOCK(stcb);
13005 atomic_add_int(&stcb->asoc.refcnt, -1);
13006 free_cnt_applied = 0;
13007 /* release this lock, otherwise we hang on ourselves */
13008 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13009 /* now relock the stcb so everything is sane */
13013 * In this case top is already chained to mm avoid double
13014 * free, since we free it below if top != NULL and driver
13015 * would free it after sending the packet out
13022 /* Calculate the maximum we can send */
13023 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13024 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13025 if (non_blocking) {
13026 /* we already checked for non-blocking above. */
13029 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13034 if (hold_tcblock) {
13035 SCTP_TCB_UNLOCK(stcb);
13038 if (asoc->strmout == NULL) {
13039 /* huh? software error */
13040 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13045 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13046 if ((user_marks_eor == 0) &&
13047 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13048 /* It will NEVER fit */
13049 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13053 if ((uio == NULL) && user_marks_eor) {
13055 * We do not support eeor mode for
13056 * sending with mbuf chains (like sendfile).
13058 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13063 if (user_marks_eor) {
13064 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13067 * For non-eeor the whole message must fit in
13068 * the socket send buffer.
13070 local_add_more = sndlen;
13073 if (non_blocking) {
13074 goto skip_preblock;
13076 if (((max_len <= local_add_more) &&
13077 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13079 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13080 /* No room right now ! */
13081 SOCKBUF_LOCK(&so->so_snd);
13082 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13083 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13084 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13085 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13086 (unsigned int)SCTP_SB_LIMIT_SND(so),
13089 stcb->asoc.stream_queue_cnt,
13090 stcb->asoc.chunks_on_out_queue,
13091 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13092 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13093 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13096 stcb->block_entry = &be;
13097 error = sbwait(&so->so_snd);
13098 stcb->block_entry = NULL;
13099 if (error || so->so_error || be.error) {
13102 error = so->so_error;
13107 SOCKBUF_UNLOCK(&so->so_snd);
13110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13111 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13112 asoc, stcb->asoc.total_output_queue_size);
13114 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13115 SOCKBUF_UNLOCK(&so->so_snd);
13118 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13120 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13121 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13125 SOCKBUF_UNLOCK(&so->so_snd);
13129 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13133 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13134 * case NOTE: uio will be null when top/mbuf is passed
13137 if (srcv->sinfo_flags & SCTP_EOF) {
13138 got_all_of_the_send = 1;
13141 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13147 struct sctp_stream_queue_pending *sp;
13148 struct sctp_stream_out *strm;
13151 SCTP_TCB_SEND_LOCK(stcb);
13152 if ((asoc->stream_locked) &&
13153 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13154 SCTP_TCB_SEND_UNLOCK(stcb);
13155 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13159 SCTP_TCB_SEND_UNLOCK(stcb);
13161 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13162 if (strm->last_msg_incomplete == 0) {
13164 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13168 SCTP_TCB_SEND_LOCK(stcb);
13169 if (sp->msg_is_complete) {
13170 strm->last_msg_incomplete = 0;
13171 asoc->stream_locked = 0;
13174 * Just got locked to this guy in case of an
13177 strm->last_msg_incomplete = 1;
13178 if (stcb->asoc.idata_supported == 0) {
13179 asoc->stream_locked = 1;
13180 asoc->stream_locked_on = srcv->sinfo_stream;
13182 sp->sender_all_done = 0;
13184 sctp_snd_sb_alloc(stcb, sp->length);
13185 atomic_add_int(&asoc->stream_queue_cnt, 1);
13186 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13187 SCTP_STAT_INCR(sctps_sends_with_unord);
13189 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13190 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13191 SCTP_TCB_SEND_UNLOCK(stcb);
13193 SCTP_TCB_SEND_LOCK(stcb);
13194 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13195 SCTP_TCB_SEND_UNLOCK(stcb);
13197 /* ???? Huh ??? last msg is gone */
13199 panic("Warning: Last msg marked incomplete, yet nothing left?");
13201 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13202 strm->last_msg_incomplete = 0;
13208 while (uio->uio_resid > 0) {
13209 /* How much room do we have? */
13210 struct mbuf *new_tail, *mm;
13212 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13213 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13214 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13218 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13219 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13220 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13223 if (hold_tcblock) {
13224 SCTP_TCB_UNLOCK(stcb);
13227 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13228 if ((mm == NULL) || error) {
13234 /* Update the mbuf and count */
13235 SCTP_TCB_SEND_LOCK(stcb);
13236 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13238 * we need to get out. Peer probably
13242 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13243 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13244 error = ECONNRESET;
13246 SCTP_TCB_SEND_UNLOCK(stcb);
13249 if (sp->tail_mbuf) {
13250 /* tack it to the end */
13251 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13252 sp->tail_mbuf = new_tail;
13254 /* A stolen mbuf */
13256 sp->tail_mbuf = new_tail;
13258 sctp_snd_sb_alloc(stcb, sndout);
13259 atomic_add_int(&sp->length, sndout);
13261 if (srcv->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
13262 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
13265 /* Did we reach EOR? */
13266 if ((uio->uio_resid == 0) &&
13267 ((user_marks_eor == 0) ||
13268 (srcv->sinfo_flags & SCTP_EOF) ||
13269 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13270 sp->msg_is_complete = 1;
13272 sp->msg_is_complete = 0;
13274 SCTP_TCB_SEND_UNLOCK(stcb);
13276 if (uio->uio_resid == 0) {
13281 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13283 * This is ugly but we must assure locking
13286 if (hold_tcblock == 0) {
13287 SCTP_TCB_LOCK(stcb);
13290 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13291 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13292 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13293 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13299 SCTP_TCB_UNLOCK(stcb);
13302 /* wait for space now */
13303 if (non_blocking) {
13304 /* Non-blocking io in place out */
13307 /* What about the INIT, send it maybe */
13308 if (queue_only_for_init) {
13309 if (hold_tcblock == 0) {
13310 SCTP_TCB_LOCK(stcb);
13313 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13314 /* a collision took us forward? */
13317 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13318 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13322 if ((net->flight_size > net->cwnd) &&
13323 (asoc->sctp_cmt_on_off == 0)) {
13324 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13326 } else if (asoc->ifp_had_enobuf) {
13327 SCTP_STAT_INCR(sctps_ifnomemqueued);
13328 if (net->flight_size > (2 * net->mtu)) {
13331 asoc->ifp_had_enobuf = 0;
13333 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13334 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13335 (stcb->asoc.total_flight > 0) &&
13336 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13337 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13340 * Ok, Nagle is set on and we have data outstanding.
13341 * Don't send anything and let SACKs drive out the
13342 * data unless we have a "full" segment to send.
13344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13345 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13347 SCTP_STAT_INCR(sctps_naglequeued);
13350 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13351 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13352 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13354 SCTP_STAT_INCR(sctps_naglesent);
13357 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13359 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13360 nagle_applies, un_sent);
13361 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13362 stcb->asoc.total_flight,
13363 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13365 if (queue_only_for_init)
13366 queue_only_for_init = 0;
13367 if ((queue_only == 0) && (nagle_applies == 0)) {
13369 * need to start chunk output
13370 * before blocking.. note that if
13371 * a lock is already applied, then
13372 * the input via the net is happening
13373 * and I don't need to start output :-D
13375 if (hold_tcblock == 0) {
13376 if (SCTP_TCB_TRYLOCK(stcb)) {
13378 sctp_chunk_output(inp,
13380 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13383 sctp_chunk_output(inp,
13385 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13387 if (hold_tcblock == 1) {
13388 SCTP_TCB_UNLOCK(stcb);
13392 SOCKBUF_LOCK(&so->so_snd);
13394 * This is a bit strange, but I think it will
13395 * work. The total_output_queue_size is locked and
13396 * protected by the TCB_LOCK, which we just released.
13397 * There is a race that can occur between releasing it
13398 * above, and me getting the socket lock, where sacks
13399 * come in but we have not put the SB_WAIT on the
13400 * so_snd buffer to get the wakeup. After the LOCK
13401 * is applied the sack_processing will also need to
13402 * LOCK the so->so_snd to do the actual sowwakeup(). So
13403 * once we have the socket buffer lock if we recheck the
13404 * size we KNOW we will get to sleep safely with the
13405 * wakeup flag in place.
13407 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13408 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
13409 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13411 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13412 asoc, (size_t)uio->uio_resid);
13415 stcb->block_entry = &be;
13416 error = sbwait(&so->so_snd);
13417 stcb->block_entry = NULL;
13419 if (error || so->so_error || be.error) {
13422 error = so->so_error;
13427 SOCKBUF_UNLOCK(&so->so_snd);
13431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13432 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13433 asoc, stcb->asoc.total_output_queue_size);
13436 SOCKBUF_UNLOCK(&so->so_snd);
13437 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13441 SCTP_TCB_SEND_LOCK(stcb);
13442 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13443 SCTP_TCB_SEND_UNLOCK(stcb);
13447 if (sp->msg_is_complete == 0) {
13448 strm->last_msg_incomplete = 1;
13449 if (stcb->asoc.idata_supported == 0) {
13450 asoc->stream_locked = 1;
13451 asoc->stream_locked_on = srcv->sinfo_stream;
13454 sp->sender_all_done = 1;
13455 strm->last_msg_incomplete = 0;
13456 asoc->stream_locked = 0;
13459 SCTP_PRINTF("Huh no sp TSNH?\n");
13460 strm->last_msg_incomplete = 0;
13461 asoc->stream_locked = 0;
13463 SCTP_TCB_SEND_UNLOCK(stcb);
13464 if (uio->uio_resid == 0) {
13465 got_all_of_the_send = 1;
13468 /* We send in a 0, since we do NOT have any locks */
13469 error = sctp_msg_append(stcb, net, top, srcv, 0);
13471 if (srcv->sinfo_flags & SCTP_EOF) {
13473 * This should only happen for Panda for the mbuf
13474 * send case, which does NOT yet support EEOR mode.
13475 * Thus, we can just set this flag to do the proper
13478 got_all_of_the_send = 1;
13486 if ((srcv->sinfo_flags & SCTP_EOF) &&
13487 (got_all_of_the_send == 1)) {
13488 SCTP_STAT_INCR(sctps_sends_with_eof);
13490 if (hold_tcblock == 0) {
13491 SCTP_TCB_LOCK(stcb);
13494 if (TAILQ_EMPTY(&asoc->send_queue) &&
13495 TAILQ_EMPTY(&asoc->sent_queue) &&
13496 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
13497 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13500 /* there is nothing queued to send, so I'm done... */
13501 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
13502 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13503 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13504 struct sctp_nets *netp;
13506 /* only send SHUTDOWN the first time through */
13507 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13508 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13510 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
13511 sctp_stop_timers_for_shutdown(stcb);
13512 if (stcb->asoc.alternate) {
13513 netp = stcb->asoc.alternate;
13515 netp = stcb->asoc.primary_destination;
13517 sctp_send_shutdown(stcb, netp);
13518 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13520 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13521 asoc->primary_destination);
13525 * we still got (or just got) data to send, so set
13529 * XXX sockets draft says that SCTP_EOF should be
13530 * sent with no data. currently, we will allow user
13531 * data to be sent first and move to
13534 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
13535 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13536 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13537 if (hold_tcblock == 0) {
13538 SCTP_TCB_LOCK(stcb);
13541 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13542 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
13544 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
13545 if (TAILQ_EMPTY(&asoc->send_queue) &&
13546 TAILQ_EMPTY(&asoc->sent_queue) &&
13547 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13548 struct mbuf *op_err;
13549 char msg[SCTP_DIAG_INFO_LEN];
13552 if (free_cnt_applied) {
13553 atomic_add_int(&stcb->asoc.refcnt, -1);
13554 free_cnt_applied = 0;
13556 snprintf(msg, sizeof(msg),
13557 "%s:%d at %s", __FILE__, __LINE__, __func__);
13558 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13560 sctp_abort_an_association(stcb->sctp_ep, stcb,
13561 op_err, SCTP_SO_LOCKED);
13563 * now relock the stcb so everything
13570 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13571 asoc->primary_destination);
13572 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13577 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13578 some_on_control = 1;
13580 if (queue_only_for_init) {
13581 if (hold_tcblock == 0) {
13582 SCTP_TCB_LOCK(stcb);
13585 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13586 /* a collision took us forward? */
13589 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13590 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13594 if ((net->flight_size > net->cwnd) &&
13595 (stcb->asoc.sctp_cmt_on_off == 0)) {
13596 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13598 } else if (asoc->ifp_had_enobuf) {
13599 SCTP_STAT_INCR(sctps_ifnomemqueued);
13600 if (net->flight_size > (2 * net->mtu)) {
13603 asoc->ifp_had_enobuf = 0;
13605 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13606 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13607 (stcb->asoc.total_flight > 0) &&
13608 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13609 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13611 * Ok, Nagle is set on and we have data outstanding.
13612 * Don't send anything and let SACKs drive out the
13613 * data unless wen have a "full" segment to send.
13615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13616 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13618 SCTP_STAT_INCR(sctps_naglequeued);
13621 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13622 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13623 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13625 SCTP_STAT_INCR(sctps_naglesent);
13628 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13629 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13630 nagle_applies, un_sent);
13631 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13632 stcb->asoc.total_flight,
13633 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13635 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13636 /* we can attempt to send too. */
13637 if (hold_tcblock == 0) {
13639 * If there is activity recv'ing sacks no need to
13642 if (SCTP_TCB_TRYLOCK(stcb)) {
13643 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13647 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13649 } else if ((queue_only == 0) &&
13650 (stcb->asoc.peers_rwnd == 0) &&
13651 (stcb->asoc.total_flight == 0)) {
13652 /* We get to have a probe outstanding */
13653 if (hold_tcblock == 0) {
13655 SCTP_TCB_LOCK(stcb);
13657 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13658 } else if (some_on_control) {
13659 int num_out, reason, frag_point;
13661 /* Here we do control only */
13662 if (hold_tcblock == 0) {
13664 SCTP_TCB_LOCK(stcb);
13666 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13667 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13668 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13670 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13671 queue_only, stcb->asoc.peers_rwnd, un_sent,
13672 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13673 stcb->asoc.total_output_queue_size, error);
13678 if (local_soresv && stcb) {
13679 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13681 if (create_lock_applied) {
13682 SCTP_ASOC_CREATE_UNLOCK(inp);
13684 if ((stcb) && hold_tcblock) {
13685 SCTP_TCB_UNLOCK(stcb);
13687 if (stcb && free_cnt_applied) {
13688 atomic_add_int(&stcb->asoc.refcnt, -1);
13692 if (mtx_owned(&stcb->tcb_mtx)) {
13693 panic("Leaving with tcb mtx owned?");
13695 if (mtx_owned(&stcb->tcb_send_mtx)) {
13696 panic("Leaving with tcb send mtx owned?");
13704 sctp_m_freem(control);
13711 * generate an AUTHentication chunk, if required
13714 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13715 struct sctp_auth_chunk **auth_ret, uint32_t *offset,
13716 struct sctp_tcb *stcb, uint8_t chunk)
13718 struct mbuf *m_auth;
13719 struct sctp_auth_chunk *auth;
13723 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13727 if (stcb->asoc.auth_supported == 0) {
13730 /* does the requested chunk require auth? */
13731 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13734 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13735 if (m_auth == NULL) {
13739 /* reserve some space if this will be the first mbuf */
13741 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13742 /* fill in the AUTH chunk details */
13743 auth = mtod(m_auth, struct sctp_auth_chunk *);
13744 memset(auth, 0, sizeof(*auth));
13745 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13746 auth->ch.chunk_flags = 0;
13747 chunk_len = sizeof(*auth) +
13748 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13749 auth->ch.chunk_length = htons(chunk_len);
13750 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13751 /* key id and hmac digest will be computed and filled in upon send */
13753 /* save the offset where the auth was inserted into the chain */
13755 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13756 *offset += SCTP_BUF_LEN(cn);
13759 /* update length and return pointer to the auth chunk */
13760 SCTP_BUF_LEN(m_auth) = chunk_len;
13761 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13762 if (auth_ret != NULL)
13770 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
13772 struct nd_prefix *pfx = NULL;
13773 struct nd_pfxrouter *pfxrtr = NULL;
13774 struct sockaddr_in6 gw6;
13776 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13779 /* get prefix entry of address */
13781 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13782 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13784 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13785 &src6->sin6_addr, &pfx->ndpr_mask))
13788 /* no prefix entry in the prefix list */
13791 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13792 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13796 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13797 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13799 /* search installed gateway from prefix entry */
13800 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13801 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13802 gw6.sin6_family = AF_INET6;
13803 gw6.sin6_len = sizeof(struct sockaddr_in6);
13804 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13805 sizeof(struct in6_addr));
13806 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13807 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13808 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13809 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13810 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
13812 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13817 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13823 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
13826 struct sockaddr_in *sin, *mask;
13827 struct ifaddr *ifa;
13828 struct in_addr srcnetaddr, gwnetaddr;
13830 if (ro == NULL || ro->ro_rt == NULL ||
13831 sifa->address.sa.sa_family != AF_INET) {
13834 ifa = (struct ifaddr *)sifa->ifa;
13835 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13836 sin = &sifa->address.sin;
13837 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13838 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13839 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13840 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13842 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13843 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13844 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13845 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13846 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13847 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {