2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctputil.h>
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_timer.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_indata.h>
52 #include <netinet/sctp_bsd_addr.h>
53 #include <netinet/sctp_input.h>
54 #include <netinet/sctp_crc32.h>
55 #if defined(INET) || defined(INET6)
56 #include <netinet/udp.h>
58 #include <netinet/udp_var.h>
59 #include <machine/in_cksum.h>
60 #include <netinet/in_kdtrace.h>
64 #define SCTP_MAX_GAPS_INARRAY 4
66 uint8_t right_edge; /* mergable on the right edge */
67 uint8_t left_edge; /* mergable on the left edge */
70 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
73 const struct sack_track sack_array[256] = {
74 {0, 0, 0, 0, /* 0x00 */
81 {1, 0, 1, 0, /* 0x01 */
88 {0, 0, 1, 0, /* 0x02 */
95 {1, 0, 1, 0, /* 0x03 */
102 {0, 0, 1, 0, /* 0x04 */
109 {1, 0, 2, 0, /* 0x05 */
116 {0, 0, 1, 0, /* 0x06 */
123 {1, 0, 1, 0, /* 0x07 */
130 {0, 0, 1, 0, /* 0x08 */
137 {1, 0, 2, 0, /* 0x09 */
144 {0, 0, 2, 0, /* 0x0a */
151 {1, 0, 2, 0, /* 0x0b */
158 {0, 0, 1, 0, /* 0x0c */
165 {1, 0, 2, 0, /* 0x0d */
172 {0, 0, 1, 0, /* 0x0e */
179 {1, 0, 1, 0, /* 0x0f */
186 {0, 0, 1, 0, /* 0x10 */
193 {1, 0, 2, 0, /* 0x11 */
200 {0, 0, 2, 0, /* 0x12 */
207 {1, 0, 2, 0, /* 0x13 */
214 {0, 0, 2, 0, /* 0x14 */
221 {1, 0, 3, 0, /* 0x15 */
228 {0, 0, 2, 0, /* 0x16 */
235 {1, 0, 2, 0, /* 0x17 */
242 {0, 0, 1, 0, /* 0x18 */
249 {1, 0, 2, 0, /* 0x19 */
256 {0, 0, 2, 0, /* 0x1a */
263 {1, 0, 2, 0, /* 0x1b */
270 {0, 0, 1, 0, /* 0x1c */
277 {1, 0, 2, 0, /* 0x1d */
284 {0, 0, 1, 0, /* 0x1e */
291 {1, 0, 1, 0, /* 0x1f */
298 {0, 0, 1, 0, /* 0x20 */
305 {1, 0, 2, 0, /* 0x21 */
312 {0, 0, 2, 0, /* 0x22 */
319 {1, 0, 2, 0, /* 0x23 */
326 {0, 0, 2, 0, /* 0x24 */
333 {1, 0, 3, 0, /* 0x25 */
340 {0, 0, 2, 0, /* 0x26 */
347 {1, 0, 2, 0, /* 0x27 */
354 {0, 0, 2, 0, /* 0x28 */
361 {1, 0, 3, 0, /* 0x29 */
368 {0, 0, 3, 0, /* 0x2a */
375 {1, 0, 3, 0, /* 0x2b */
382 {0, 0, 2, 0, /* 0x2c */
389 {1, 0, 3, 0, /* 0x2d */
396 {0, 0, 2, 0, /* 0x2e */
403 {1, 0, 2, 0, /* 0x2f */
410 {0, 0, 1, 0, /* 0x30 */
417 {1, 0, 2, 0, /* 0x31 */
424 {0, 0, 2, 0, /* 0x32 */
431 {1, 0, 2, 0, /* 0x33 */
438 {0, 0, 2, 0, /* 0x34 */
445 {1, 0, 3, 0, /* 0x35 */
452 {0, 0, 2, 0, /* 0x36 */
459 {1, 0, 2, 0, /* 0x37 */
466 {0, 0, 1, 0, /* 0x38 */
473 {1, 0, 2, 0, /* 0x39 */
480 {0, 0, 2, 0, /* 0x3a */
487 {1, 0, 2, 0, /* 0x3b */
494 {0, 0, 1, 0, /* 0x3c */
501 {1, 0, 2, 0, /* 0x3d */
508 {0, 0, 1, 0, /* 0x3e */
515 {1, 0, 1, 0, /* 0x3f */
522 {0, 0, 1, 0, /* 0x40 */
529 {1, 0, 2, 0, /* 0x41 */
536 {0, 0, 2, 0, /* 0x42 */
543 {1, 0, 2, 0, /* 0x43 */
550 {0, 0, 2, 0, /* 0x44 */
557 {1, 0, 3, 0, /* 0x45 */
564 {0, 0, 2, 0, /* 0x46 */
571 {1, 0, 2, 0, /* 0x47 */
578 {0, 0, 2, 0, /* 0x48 */
585 {1, 0, 3, 0, /* 0x49 */
592 {0, 0, 3, 0, /* 0x4a */
599 {1, 0, 3, 0, /* 0x4b */
606 {0, 0, 2, 0, /* 0x4c */
613 {1, 0, 3, 0, /* 0x4d */
620 {0, 0, 2, 0, /* 0x4e */
627 {1, 0, 2, 0, /* 0x4f */
634 {0, 0, 2, 0, /* 0x50 */
641 {1, 0, 3, 0, /* 0x51 */
648 {0, 0, 3, 0, /* 0x52 */
655 {1, 0, 3, 0, /* 0x53 */
662 {0, 0, 3, 0, /* 0x54 */
669 {1, 0, 4, 0, /* 0x55 */
676 {0, 0, 3, 0, /* 0x56 */
683 {1, 0, 3, 0, /* 0x57 */
690 {0, 0, 2, 0, /* 0x58 */
697 {1, 0, 3, 0, /* 0x59 */
704 {0, 0, 3, 0, /* 0x5a */
711 {1, 0, 3, 0, /* 0x5b */
718 {0, 0, 2, 0, /* 0x5c */
725 {1, 0, 3, 0, /* 0x5d */
732 {0, 0, 2, 0, /* 0x5e */
739 {1, 0, 2, 0, /* 0x5f */
746 {0, 0, 1, 0, /* 0x60 */
753 {1, 0, 2, 0, /* 0x61 */
760 {0, 0, 2, 0, /* 0x62 */
767 {1, 0, 2, 0, /* 0x63 */
774 {0, 0, 2, 0, /* 0x64 */
781 {1, 0, 3, 0, /* 0x65 */
788 {0, 0, 2, 0, /* 0x66 */
795 {1, 0, 2, 0, /* 0x67 */
802 {0, 0, 2, 0, /* 0x68 */
809 {1, 0, 3, 0, /* 0x69 */
816 {0, 0, 3, 0, /* 0x6a */
823 {1, 0, 3, 0, /* 0x6b */
830 {0, 0, 2, 0, /* 0x6c */
837 {1, 0, 3, 0, /* 0x6d */
844 {0, 0, 2, 0, /* 0x6e */
851 {1, 0, 2, 0, /* 0x6f */
858 {0, 0, 1, 0, /* 0x70 */
865 {1, 0, 2, 0, /* 0x71 */
872 {0, 0, 2, 0, /* 0x72 */
879 {1, 0, 2, 0, /* 0x73 */
886 {0, 0, 2, 0, /* 0x74 */
893 {1, 0, 3, 0, /* 0x75 */
900 {0, 0, 2, 0, /* 0x76 */
907 {1, 0, 2, 0, /* 0x77 */
914 {0, 0, 1, 0, /* 0x78 */
921 {1, 0, 2, 0, /* 0x79 */
928 {0, 0, 2, 0, /* 0x7a */
935 {1, 0, 2, 0, /* 0x7b */
942 {0, 0, 1, 0, /* 0x7c */
949 {1, 0, 2, 0, /* 0x7d */
956 {0, 0, 1, 0, /* 0x7e */
963 {1, 0, 1, 0, /* 0x7f */
970 {0, 1, 1, 0, /* 0x80 */
977 {1, 1, 2, 0, /* 0x81 */
984 {0, 1, 2, 0, /* 0x82 */
991 {1, 1, 2, 0, /* 0x83 */
998 {0, 1, 2, 0, /* 0x84 */
1005 {1, 1, 3, 0, /* 0x85 */
1012 {0, 1, 2, 0, /* 0x86 */
1019 {1, 1, 2, 0, /* 0x87 */
1026 {0, 1, 2, 0, /* 0x88 */
1033 {1, 1, 3, 0, /* 0x89 */
1040 {0, 1, 3, 0, /* 0x8a */
1047 {1, 1, 3, 0, /* 0x8b */
1054 {0, 1, 2, 0, /* 0x8c */
1061 {1, 1, 3, 0, /* 0x8d */
1068 {0, 1, 2, 0, /* 0x8e */
1075 {1, 1, 2, 0, /* 0x8f */
1082 {0, 1, 2, 0, /* 0x90 */
1089 {1, 1, 3, 0, /* 0x91 */
1096 {0, 1, 3, 0, /* 0x92 */
1103 {1, 1, 3, 0, /* 0x93 */
1110 {0, 1, 3, 0, /* 0x94 */
1117 {1, 1, 4, 0, /* 0x95 */
1124 {0, 1, 3, 0, /* 0x96 */
1131 {1, 1, 3, 0, /* 0x97 */
1138 {0, 1, 2, 0, /* 0x98 */
1145 {1, 1, 3, 0, /* 0x99 */
1152 {0, 1, 3, 0, /* 0x9a */
1159 {1, 1, 3, 0, /* 0x9b */
1166 {0, 1, 2, 0, /* 0x9c */
1173 {1, 1, 3, 0, /* 0x9d */
1180 {0, 1, 2, 0, /* 0x9e */
1187 {1, 1, 2, 0, /* 0x9f */
1194 {0, 1, 2, 0, /* 0xa0 */
1201 {1, 1, 3, 0, /* 0xa1 */
1208 {0, 1, 3, 0, /* 0xa2 */
1215 {1, 1, 3, 0, /* 0xa3 */
1222 {0, 1, 3, 0, /* 0xa4 */
1229 {1, 1, 4, 0, /* 0xa5 */
1236 {0, 1, 3, 0, /* 0xa6 */
1243 {1, 1, 3, 0, /* 0xa7 */
1250 {0, 1, 3, 0, /* 0xa8 */
1257 {1, 1, 4, 0, /* 0xa9 */
1264 {0, 1, 4, 0, /* 0xaa */
1271 {1, 1, 4, 0, /* 0xab */
1278 {0, 1, 3, 0, /* 0xac */
1285 {1, 1, 4, 0, /* 0xad */
1292 {0, 1, 3, 0, /* 0xae */
1299 {1, 1, 3, 0, /* 0xaf */
1306 {0, 1, 2, 0, /* 0xb0 */
1313 {1, 1, 3, 0, /* 0xb1 */
1320 {0, 1, 3, 0, /* 0xb2 */
1327 {1, 1, 3, 0, /* 0xb3 */
1334 {0, 1, 3, 0, /* 0xb4 */
1341 {1, 1, 4, 0, /* 0xb5 */
1348 {0, 1, 3, 0, /* 0xb6 */
1355 {1, 1, 3, 0, /* 0xb7 */
1362 {0, 1, 2, 0, /* 0xb8 */
1369 {1, 1, 3, 0, /* 0xb9 */
1376 {0, 1, 3, 0, /* 0xba */
1383 {1, 1, 3, 0, /* 0xbb */
1390 {0, 1, 2, 0, /* 0xbc */
1397 {1, 1, 3, 0, /* 0xbd */
1404 {0, 1, 2, 0, /* 0xbe */
1411 {1, 1, 2, 0, /* 0xbf */
1418 {0, 1, 1, 0, /* 0xc0 */
1425 {1, 1, 2, 0, /* 0xc1 */
1432 {0, 1, 2, 0, /* 0xc2 */
1439 {1, 1, 2, 0, /* 0xc3 */
1446 {0, 1, 2, 0, /* 0xc4 */
1453 {1, 1, 3, 0, /* 0xc5 */
1460 {0, 1, 2, 0, /* 0xc6 */
1467 {1, 1, 2, 0, /* 0xc7 */
1474 {0, 1, 2, 0, /* 0xc8 */
1481 {1, 1, 3, 0, /* 0xc9 */
1488 {0, 1, 3, 0, /* 0xca */
1495 {1, 1, 3, 0, /* 0xcb */
1502 {0, 1, 2, 0, /* 0xcc */
1509 {1, 1, 3, 0, /* 0xcd */
1516 {0, 1, 2, 0, /* 0xce */
1523 {1, 1, 2, 0, /* 0xcf */
1530 {0, 1, 2, 0, /* 0xd0 */
1537 {1, 1, 3, 0, /* 0xd1 */
1544 {0, 1, 3, 0, /* 0xd2 */
1551 {1, 1, 3, 0, /* 0xd3 */
1558 {0, 1, 3, 0, /* 0xd4 */
1565 {1, 1, 4, 0, /* 0xd5 */
1572 {0, 1, 3, 0, /* 0xd6 */
1579 {1, 1, 3, 0, /* 0xd7 */
1586 {0, 1, 2, 0, /* 0xd8 */
1593 {1, 1, 3, 0, /* 0xd9 */
1600 {0, 1, 3, 0, /* 0xda */
1607 {1, 1, 3, 0, /* 0xdb */
1614 {0, 1, 2, 0, /* 0xdc */
1621 {1, 1, 3, 0, /* 0xdd */
1628 {0, 1, 2, 0, /* 0xde */
1635 {1, 1, 2, 0, /* 0xdf */
1642 {0, 1, 1, 0, /* 0xe0 */
1649 {1, 1, 2, 0, /* 0xe1 */
1656 {0, 1, 2, 0, /* 0xe2 */
1663 {1, 1, 2, 0, /* 0xe3 */
1670 {0, 1, 2, 0, /* 0xe4 */
1677 {1, 1, 3, 0, /* 0xe5 */
1684 {0, 1, 2, 0, /* 0xe6 */
1691 {1, 1, 2, 0, /* 0xe7 */
1698 {0, 1, 2, 0, /* 0xe8 */
1705 {1, 1, 3, 0, /* 0xe9 */
1712 {0, 1, 3, 0, /* 0xea */
1719 {1, 1, 3, 0, /* 0xeb */
1726 {0, 1, 2, 0, /* 0xec */
1733 {1, 1, 3, 0, /* 0xed */
1740 {0, 1, 2, 0, /* 0xee */
1747 {1, 1, 2, 0, /* 0xef */
1754 {0, 1, 1, 0, /* 0xf0 */
1761 {1, 1, 2, 0, /* 0xf1 */
1768 {0, 1, 2, 0, /* 0xf2 */
1775 {1, 1, 2, 0, /* 0xf3 */
1782 {0, 1, 2, 0, /* 0xf4 */
1789 {1, 1, 3, 0, /* 0xf5 */
1796 {0, 1, 2, 0, /* 0xf6 */
1803 {1, 1, 2, 0, /* 0xf7 */
1810 {0, 1, 1, 0, /* 0xf8 */
1817 {1, 1, 2, 0, /* 0xf9 */
1824 {0, 1, 2, 0, /* 0xfa */
1831 {1, 1, 2, 0, /* 0xfb */
1838 {0, 1, 1, 0, /* 0xfc */
1845 {1, 1, 2, 0, /* 0xfd */
1852 {0, 1, 1, 0, /* 0xfe */
1859 {1, 1, 1, 0, /* 0xff */
1870 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1871 struct sctp_scoping *scope,
1874 if ((scope->loopback_scope == 0) &&
1875 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1877 * skip loopback if not in scope *
1881 switch (ifa->address.sa.sa_family) {
1884 if (scope->ipv4_addr_legal) {
1885 struct sockaddr_in *sin;
1887 sin = &ifa->address.sin;
1888 if (sin->sin_addr.s_addr == 0) {
1889 /* not in scope , unspecified */
1892 if ((scope->ipv4_local_scope == 0) &&
1893 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1894 /* private address not in scope */
1904 if (scope->ipv6_addr_legal) {
1905 struct sockaddr_in6 *sin6;
1908 * Must update the flags, bummer, which means any
1909 * IFA locks must now be applied HERE <->
1912 sctp_gather_internal_ifa_flags(ifa);
1914 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1917 /* ok to use deprecated addresses? */
1918 sin6 = &ifa->address.sin6;
1919 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1920 /* skip unspecifed addresses */
1923 if ( /* (local_scope == 0) && */
1924 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1927 if ((scope->site_scope == 0) &&
1928 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1942 static struct mbuf *
1943 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1945 #if defined(INET) || defined(INET6)
1946 struct sctp_paramhdr *paramh;
1951 switch (ifa->address.sa.sa_family) {
1954 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1959 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1965 #if defined(INET) || defined(INET6)
1966 if (M_TRAILINGSPACE(m) >= plen) {
1967 /* easy side we just drop it on the end */
1968 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1971 /* Need more space */
1973 while (SCTP_BUF_NEXT(mret) != NULL) {
1974 mret = SCTP_BUF_NEXT(mret);
1976 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1977 if (SCTP_BUF_NEXT(mret) == NULL) {
1978 /* We are hosed, can't add more addresses */
1981 mret = SCTP_BUF_NEXT(mret);
1982 paramh = mtod(mret, struct sctp_paramhdr *);
1984 /* now add the parameter */
1985 switch (ifa->address.sa.sa_family) {
1989 struct sctp_ipv4addr_param *ipv4p;
1990 struct sockaddr_in *sin;
1992 sin = &ifa->address.sin;
1993 ipv4p = (struct sctp_ipv4addr_param *)paramh;
1994 paramh->param_type = htons(SCTP_IPV4_ADDRESS);
1995 paramh->param_length = htons(plen);
1996 ipv4p->addr = sin->sin_addr.s_addr;
1997 SCTP_BUF_LEN(mret) += plen;
2004 struct sctp_ipv6addr_param *ipv6p;
2005 struct sockaddr_in6 *sin6;
2007 sin6 = &ifa->address.sin6;
2008 ipv6p = (struct sctp_ipv6addr_param *)paramh;
2009 paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2010 paramh->param_length = htons(plen);
2011 memcpy(ipv6p->addr, &sin6->sin6_addr,
2012 sizeof(ipv6p->addr));
2013 /* clear embedded scope in the address */
2014 in6_clearscope((struct in6_addr *)ipv6p->addr);
2015 SCTP_BUF_LEN(mret) += plen;
2031 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2032 struct sctp_scoping *scope,
2033 struct mbuf *m_at, int cnt_inits_to,
2034 uint16_t *padding_len, uint16_t *chunk_len)
2036 struct sctp_vrf *vrf = NULL;
2037 int cnt, limit_out = 0, total_count;
2040 vrf_id = inp->def_vrf_id;
2041 SCTP_IPI_ADDR_RLOCK();
2042 vrf = sctp_find_vrf(vrf_id);
2044 SCTP_IPI_ADDR_RUNLOCK();
2047 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2048 struct sctp_ifa *sctp_ifap;
2049 struct sctp_ifn *sctp_ifnp;
2052 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2054 cnt = SCTP_ADDRESS_LIMIT;
2057 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2058 if ((scope->loopback_scope == 0) &&
2059 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2061 * Skip loopback devices if loopback_scope
2066 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2068 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2069 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2070 &sctp_ifap->address.sin.sin_addr) != 0)) {
2075 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2076 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2077 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2081 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2084 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2088 if (cnt > SCTP_ADDRESS_LIMIT) {
2092 if (cnt > SCTP_ADDRESS_LIMIT) {
2099 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2101 if ((scope->loopback_scope == 0) &&
2102 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2104 * Skip loopback devices if
2105 * loopback_scope not set
2109 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2111 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2112 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2113 &sctp_ifap->address.sin.sin_addr) != 0)) {
2118 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2119 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2120 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2124 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2127 if (sctp_is_address_in_scope(sctp_ifap,
2131 if ((chunk_len != NULL) &&
2132 (padding_len != NULL) &&
2133 (*padding_len > 0)) {
2134 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2135 SCTP_BUF_LEN(m_at) += *padding_len;
2136 *chunk_len += *padding_len;
2139 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2150 if (total_count > SCTP_ADDRESS_LIMIT) {
2151 /* No more addresses */
2159 struct sctp_laddr *laddr;
2162 /* First, how many ? */
2163 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2164 if (laddr->ifa == NULL) {
2167 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2169 * Address being deleted by the system, dont
2173 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2175 * Address being deleted on this ep don't
2180 if (sctp_is_address_in_scope(laddr->ifa,
2187 * To get through a NAT we only list addresses if we have
2188 * more than one. That way if you just bind a single address
2189 * we let the source of the init dictate our address.
2193 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2194 if (laddr->ifa == NULL) {
2197 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2200 if (sctp_is_address_in_scope(laddr->ifa,
2204 if ((chunk_len != NULL) &&
2205 (padding_len != NULL) &&
2206 (*padding_len > 0)) {
2207 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2208 SCTP_BUF_LEN(m_at) += *padding_len;
2209 *chunk_len += *padding_len;
2212 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2214 if (cnt >= SCTP_ADDRESS_LIMIT) {
2220 SCTP_IPI_ADDR_RUNLOCK();
2224 static struct sctp_ifa *
2225 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2226 uint8_t dest_is_loop,
2227 uint8_t dest_is_priv,
2230 uint8_t dest_is_global = 0;
2232 /* dest_is_priv is true if destination is a private address */
2233 /* dest_is_loop is true if destination is a loopback addresses */
2236 * Here we determine if its a preferred address. A preferred address
2237 * means it is the same scope or higher scope then the destination.
2238 * L = loopback, P = private, G = global
2239 * -----------------------------------------
2240 * src | dest | result
2241 * ----------------------------------------
2243 * -----------------------------------------
2244 * P | L | yes-v4 no-v6
2245 * -----------------------------------------
2246 * G | L | yes-v4 no-v6
2247 * -----------------------------------------
2249 * -----------------------------------------
2251 * -----------------------------------------
2253 * -----------------------------------------
2255 * -----------------------------------------
2257 * -----------------------------------------
2259 * -----------------------------------------
2262 if (ifa->address.sa.sa_family != fam) {
2263 /* forget mis-matched family */
2266 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2269 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2270 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2271 /* Ok the address may be ok */
2273 if (fam == AF_INET6) {
2274 /* ok to use deprecated addresses? no lets not! */
2275 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2276 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2279 if (ifa->src_is_priv && !ifa->src_is_loop) {
2281 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2285 if (ifa->src_is_glob) {
2287 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2294 * Now that we know what is what, implement or table this could in
2295 * theory be done slicker (it used to be), but this is
2296 * straightforward and easier to validate :-)
2298 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2299 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2300 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2301 dest_is_loop, dest_is_priv, dest_is_global);
2303 if ((ifa->src_is_loop) && (dest_is_priv)) {
2304 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2307 if ((ifa->src_is_glob) && (dest_is_priv)) {
2308 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2311 if ((ifa->src_is_loop) && (dest_is_global)) {
2312 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2315 if ((ifa->src_is_priv) && (dest_is_global)) {
2316 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2319 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2320 /* its a preferred address */
2324 static struct sctp_ifa *
2325 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2326 uint8_t dest_is_loop,
2327 uint8_t dest_is_priv,
2330 uint8_t dest_is_global = 0;
2333 * Here we determine if its a acceptable address. A acceptable
2334 * address means it is the same scope or higher scope but we can
2335 * allow for NAT which means its ok to have a global dest and a
2338 * L = loopback, P = private, G = global
2339 * -----------------------------------------
2340 * src | dest | result
2341 * -----------------------------------------
2343 * -----------------------------------------
2344 * P | L | yes-v4 no-v6
2345 * -----------------------------------------
2347 * -----------------------------------------
2349 * -----------------------------------------
2351 * -----------------------------------------
2352 * G | P | yes - May not work
2353 * -----------------------------------------
2355 * -----------------------------------------
2356 * P | G | yes - May not work
2357 * -----------------------------------------
2359 * -----------------------------------------
2362 if (ifa->address.sa.sa_family != fam) {
2363 /* forget non matching family */
2364 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2365 ifa->address.sa.sa_family, fam);
2368 /* Ok the address may be ok */
2369 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2370 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2371 dest_is_loop, dest_is_priv);
2372 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2376 if (fam == AF_INET6) {
2377 /* ok to use deprecated addresses? */
2378 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2381 if (ifa->src_is_priv) {
2382 /* Special case, linklocal to loop */
2389 * Now that we know what is what, implement our table. This could in
2390 * theory be done slicker (it used to be), but this is
2391 * straightforward and easier to validate :-)
2393 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2396 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2399 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2402 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2405 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2406 /* its an acceptable address */
2411 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2413 struct sctp_laddr *laddr;
2416 /* There are no restrictions, no TCB :-) */
2419 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2420 if (laddr->ifa == NULL) {
2421 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2425 if (laddr->ifa == ifa) {
2426 /* Yes it is on the list */
2435 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2437 struct sctp_laddr *laddr;
2441 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2442 if (laddr->ifa == NULL) {
2443 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2447 if ((laddr->ifa == ifa) && laddr->action == 0)
2456 static struct sctp_ifa *
2457 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2460 int non_asoc_addr_ok,
2461 uint8_t dest_is_priv,
2462 uint8_t dest_is_loop,
2465 struct sctp_laddr *laddr, *starting_point;
2468 struct sctp_ifn *sctp_ifn;
2469 struct sctp_ifa *sctp_ifa, *sifa;
2470 struct sctp_vrf *vrf;
2473 vrf = sctp_find_vrf(vrf_id);
2477 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2478 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2479 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2481 * first question, is the ifn we will emit on in our list, if so, we
2482 * want such an address. Note that we first looked for a preferred
2486 /* is a preferred one on the interface we route out? */
2487 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2489 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2490 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2491 &sctp_ifa->address.sin.sin_addr) != 0)) {
2496 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2497 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2498 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2502 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2503 (non_asoc_addr_ok == 0))
2505 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2510 if (sctp_is_addr_in_ep(inp, sifa)) {
2511 atomic_add_int(&sifa->refcount, 1);
2517 * ok, now we now need to find one on the list of the addresses. We
2518 * can't get one on the emitting interface so let's find first a
2519 * preferred one. If not that an acceptable one otherwise... we
2522 starting_point = inp->next_addr_touse;
2524 if (inp->next_addr_touse == NULL) {
2525 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2528 for (laddr = inp->next_addr_touse; laddr;
2529 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2530 if (laddr->ifa == NULL) {
2531 /* address has been removed */
2534 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2535 /* address is being deleted */
2538 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2542 atomic_add_int(&sifa->refcount, 1);
2545 if (resettotop == 0) {
2546 inp->next_addr_touse = NULL;
2550 inp->next_addr_touse = starting_point;
2553 if (inp->next_addr_touse == NULL) {
2554 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2558 /* ok, what about an acceptable address in the inp */
2559 for (laddr = inp->next_addr_touse; laddr;
2560 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2561 if (laddr->ifa == NULL) {
2562 /* address has been removed */
2565 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2566 /* address is being deleted */
2569 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2573 atomic_add_int(&sifa->refcount, 1);
2576 if (resettotop == 0) {
2577 inp->next_addr_touse = NULL;
2578 goto once_again_too;
2582 * no address bound can be a source for the destination we are in
2590 static struct sctp_ifa *
2591 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2592 struct sctp_tcb *stcb,
2595 uint8_t dest_is_priv,
2596 uint8_t dest_is_loop,
2597 int non_asoc_addr_ok,
2600 struct sctp_laddr *laddr, *starting_point;
2602 struct sctp_ifn *sctp_ifn;
2603 struct sctp_ifa *sctp_ifa, *sifa;
2604 uint8_t start_at_beginning = 0;
2605 struct sctp_vrf *vrf;
2609 * first question, is the ifn we will emit on in our list, if so, we
2612 vrf = sctp_find_vrf(vrf_id);
2616 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2617 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2618 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2621 * first question, is the ifn we will emit on in our list? If so,
2622 * we want that one. First we look for a preferred. Second, we go
2623 * for an acceptable.
2626 /* first try for a preferred address on the ep */
2627 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2629 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2630 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2631 &sctp_ifa->address.sin.sin_addr) != 0)) {
2636 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2637 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2638 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2642 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2644 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2645 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2648 if (((non_asoc_addr_ok == 0) &&
2649 (sctp_is_addr_restricted(stcb, sifa))) ||
2650 (non_asoc_addr_ok &&
2651 (sctp_is_addr_restricted(stcb, sifa)) &&
2652 (!sctp_is_addr_pending(stcb, sifa)))) {
2653 /* on the no-no list */
2656 atomic_add_int(&sifa->refcount, 1);
2660 /* next try for an acceptable address on the ep */
2661 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2663 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2664 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2665 &sctp_ifa->address.sin.sin_addr) != 0)) {
2670 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2671 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2672 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2676 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2678 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2679 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2682 if (((non_asoc_addr_ok == 0) &&
2683 (sctp_is_addr_restricted(stcb, sifa))) ||
2684 (non_asoc_addr_ok &&
2685 (sctp_is_addr_restricted(stcb, sifa)) &&
2686 (!sctp_is_addr_pending(stcb, sifa)))) {
2687 /* on the no-no list */
2690 atomic_add_int(&sifa->refcount, 1);
2697 * if we can't find one like that then we must look at all addresses
2698 * bound to pick one at first preferable then secondly acceptable.
2700 starting_point = stcb->asoc.last_used_address;
2702 if (stcb->asoc.last_used_address == NULL) {
2703 start_at_beginning = 1;
2704 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2706 /* search beginning with the last used address */
2707 for (laddr = stcb->asoc.last_used_address; laddr;
2708 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2709 if (laddr->ifa == NULL) {
2710 /* address has been removed */
2713 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2714 /* address is being deleted */
2717 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2720 if (((non_asoc_addr_ok == 0) &&
2721 (sctp_is_addr_restricted(stcb, sifa))) ||
2722 (non_asoc_addr_ok &&
2723 (sctp_is_addr_restricted(stcb, sifa)) &&
2724 (!sctp_is_addr_pending(stcb, sifa)))) {
2725 /* on the no-no list */
2728 stcb->asoc.last_used_address = laddr;
2729 atomic_add_int(&sifa->refcount, 1);
2732 if (start_at_beginning == 0) {
2733 stcb->asoc.last_used_address = NULL;
2734 goto sctp_from_the_top;
2736 /* now try for any higher scope than the destination */
2737 stcb->asoc.last_used_address = starting_point;
2738 start_at_beginning = 0;
2740 if (stcb->asoc.last_used_address == NULL) {
2741 start_at_beginning = 1;
2742 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2744 /* search beginning with the last used address */
2745 for (laddr = stcb->asoc.last_used_address; laddr;
2746 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2747 if (laddr->ifa == NULL) {
2748 /* address has been removed */
2751 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2752 /* address is being deleted */
2755 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2759 if (((non_asoc_addr_ok == 0) &&
2760 (sctp_is_addr_restricted(stcb, sifa))) ||
2761 (non_asoc_addr_ok &&
2762 (sctp_is_addr_restricted(stcb, sifa)) &&
2763 (!sctp_is_addr_pending(stcb, sifa)))) {
2764 /* on the no-no list */
2767 stcb->asoc.last_used_address = laddr;
2768 atomic_add_int(&sifa->refcount, 1);
2771 if (start_at_beginning == 0) {
2772 stcb->asoc.last_used_address = NULL;
2773 goto sctp_from_the_top2;
2778 static struct sctp_ifa *
2779 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2780 struct sctp_inpcb *inp,
2781 struct sctp_tcb *stcb,
2782 int non_asoc_addr_ok,
2783 uint8_t dest_is_loop,
2784 uint8_t dest_is_priv,
2790 struct sctp_ifa *ifa, *sifa;
2791 int num_eligible_addr = 0;
2793 struct sockaddr_in6 sin6, lsa6;
2795 if (fam == AF_INET6) {
2796 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2797 (void)sa6_recoverscope(&sin6);
2800 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2802 if ((ifa->address.sa.sa_family == AF_INET) &&
2803 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2804 &ifa->address.sin.sin_addr) != 0)) {
2809 if ((ifa->address.sa.sa_family == AF_INET6) &&
2810 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2811 &ifa->address.sin6.sin6_addr) != 0)) {
2815 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2816 (non_asoc_addr_ok == 0))
2818 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2823 if (fam == AF_INET6 &&
2825 sifa->src_is_loop && sifa->src_is_priv) {
2827 * don't allow fe80::1 to be a src on loop ::1, we
2828 * don't list it to the peer so we will get an
2833 if (fam == AF_INET6 &&
2834 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2835 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2837 * link-local <-> link-local must belong to the same
2840 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2841 (void)sa6_recoverscope(&lsa6);
2842 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2849 * Check if the IPv6 address matches to next-hop. In the
2850 * mobile case, old IPv6 address may be not deleted from the
2851 * interface. Then, the interface has previous and new
2852 * addresses. We should use one corresponding to the
2853 * next-hop. (by micchie)
2856 if (stcb && fam == AF_INET6 &&
2857 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2858 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2865 /* Avoid topologically incorrect IPv4 address */
2866 if (stcb && fam == AF_INET &&
2867 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2868 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2874 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2877 if (((non_asoc_addr_ok == 0) &&
2878 (sctp_is_addr_restricted(stcb, sifa))) ||
2879 (non_asoc_addr_ok &&
2880 (sctp_is_addr_restricted(stcb, sifa)) &&
2881 (!sctp_is_addr_pending(stcb, sifa)))) {
2883 * It is restricted for some reason..
2884 * probably not yet added.
2889 if (num_eligible_addr >= addr_wanted) {
2892 num_eligible_addr++;
2899 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2900 struct sctp_inpcb *inp,
2901 struct sctp_tcb *stcb,
2902 int non_asoc_addr_ok,
2903 uint8_t dest_is_loop,
2904 uint8_t dest_is_priv,
2907 struct sctp_ifa *ifa, *sifa;
2908 int num_eligible_addr = 0;
2910 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2912 if ((ifa->address.sa.sa_family == AF_INET) &&
2913 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2914 &ifa->address.sin.sin_addr) != 0)) {
2919 if ((ifa->address.sa.sa_family == AF_INET6) &&
2921 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2922 &ifa->address.sin6.sin6_addr) != 0)) {
2926 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2927 (non_asoc_addr_ok == 0)) {
2930 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2936 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2939 if (((non_asoc_addr_ok == 0) &&
2940 (sctp_is_addr_restricted(stcb, sifa))) ||
2941 (non_asoc_addr_ok &&
2942 (sctp_is_addr_restricted(stcb, sifa)) &&
2943 (!sctp_is_addr_pending(stcb, sifa)))) {
2945 * It is restricted for some reason..
2946 * probably not yet added.
2951 num_eligible_addr++;
2953 return (num_eligible_addr);
2956 static struct sctp_ifa *
2957 sctp_choose_boundall(struct sctp_inpcb *inp,
2958 struct sctp_tcb *stcb,
2959 struct sctp_nets *net,
2962 uint8_t dest_is_priv,
2963 uint8_t dest_is_loop,
2964 int non_asoc_addr_ok,
2967 int cur_addr_num = 0, num_preferred = 0;
2969 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2970 struct sctp_ifa *sctp_ifa, *sifa;
2972 struct sctp_vrf *vrf;
2978 * For boundall we can use any address in the association.
2979 * If non_asoc_addr_ok is set we can use any address (at least in
2980 * theory). So we look for preferred addresses first. If we find one,
2981 * we use it. Otherwise we next try to get an address on the
2982 * interface, which we should be able to do (unless non_asoc_addr_ok
2983 * is false and we are routed out that way). In these cases where we
2984 * can't use the address of the interface we go through all the
2985 * ifn's looking for an address we can use and fill that in. Punting
2986 * means we send back address 0, which will probably cause problems
2987 * actually since then IP will fill in the address of the route ifn,
2988 * which means we probably already rejected it.. i.e. here comes an
2991 vrf = sctp_find_vrf(vrf_id);
2995 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2996 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2997 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2998 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2999 if (sctp_ifn == NULL) {
3000 /* ?? We don't have this guy ?? */
3001 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
3002 goto bound_all_plan_b;
3004 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
3005 ifn_index, sctp_ifn->ifn_name);
3008 cur_addr_num = net->indx_of_eligible_next_to_use;
3010 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3015 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3016 num_preferred, sctp_ifn->ifn_name);
3017 if (num_preferred == 0) {
3019 * no eligible addresses, we must use some other interface
3020 * address if we can find one.
3022 goto bound_all_plan_b;
3025 * Ok we have num_eligible_addr set with how many we can use, this
3026 * may vary from call to call due to addresses being deprecated
3029 if (cur_addr_num >= num_preferred) {
3033 * select the nth address from the list (where cur_addr_num is the
3034 * nth) and 0 is the first one, 1 is the second one etc...
3036 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3038 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3039 dest_is_priv, cur_addr_num, fam, ro);
3041 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3043 atomic_add_int(&sctp_ifa->refcount, 1);
3045 /* save off where the next one we will want */
3046 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3051 * plan_b: Look at all interfaces and find a preferred address. If
3052 * no preferred fall through to plan_c.
3055 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3056 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3057 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3058 sctp_ifn->ifn_name);
3059 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3060 /* wrong base scope */
3061 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3064 if ((sctp_ifn == looked_at) && looked_at) {
3065 /* already looked at this guy */
3066 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3069 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3070 dest_is_loop, dest_is_priv, fam);
3071 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3072 "Found ifn:%p %d preferred source addresses\n",
3073 ifn, num_preferred);
3074 if (num_preferred == 0) {
3075 /* None on this interface. */
3076 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3079 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3080 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3081 num_preferred, (void *)sctp_ifn, cur_addr_num);
3084 * Ok we have num_eligible_addr set with how many we can
3085 * use, this may vary from call to call due to addresses
3086 * being deprecated etc..
3088 if (cur_addr_num >= num_preferred) {
3091 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3092 dest_is_priv, cur_addr_num, fam, ro);
3096 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3097 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3099 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3100 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3101 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3102 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3104 atomic_add_int(&sifa->refcount, 1);
3108 again_with_private_addresses_allowed:
3110 /* plan_c: do we have an acceptable address on the emit interface */
3112 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3113 if (emit_ifn == NULL) {
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3117 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3118 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3120 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3121 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3122 &sctp_ifa->address.sin.sin_addr) != 0)) {
3123 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3128 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3129 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3130 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3131 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3135 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3136 (non_asoc_addr_ok == 0)) {
3137 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3140 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3143 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3147 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3148 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3152 if (((non_asoc_addr_ok == 0) &&
3153 (sctp_is_addr_restricted(stcb, sifa))) ||
3154 (non_asoc_addr_ok &&
3155 (sctp_is_addr_restricted(stcb, sifa)) &&
3156 (!sctp_is_addr_pending(stcb, sifa)))) {
3158 * It is restricted for some reason..
3159 * probably not yet added.
3161 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3166 atomic_add_int(&sifa->refcount, 1);
3171 * plan_d: We are in trouble. No preferred address on the emit
3172 * interface. And not even a preferred address on all interfaces. Go
3173 * out and see if we can find an acceptable address somewhere
3174 * amongst all interfaces.
3176 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3177 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3178 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3179 /* wrong base scope */
3182 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3184 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3185 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3186 &sctp_ifa->address.sin.sin_addr) != 0)) {
3191 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3192 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3193 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3197 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3198 (non_asoc_addr_ok == 0))
3200 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3206 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3210 if (((non_asoc_addr_ok == 0) &&
3211 (sctp_is_addr_restricted(stcb, sifa))) ||
3212 (non_asoc_addr_ok &&
3213 (sctp_is_addr_restricted(stcb, sifa)) &&
3214 (!sctp_is_addr_pending(stcb, sifa)))) {
3216 * It is restricted for some
3217 * reason.. probably not yet added.
3228 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3229 stcb->asoc.scope.ipv4_local_scope = 1;
3231 goto again_with_private_addresses_allowed;
3232 } else if (retried == 1) {
3233 stcb->asoc.scope.ipv4_local_scope = 0;
3241 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3242 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3243 /* wrong base scope */
3246 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3247 struct sctp_ifa *tmp_sifa;
3250 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3251 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3252 &sctp_ifa->address.sin.sin_addr) != 0)) {
3257 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3258 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3259 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3263 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3264 (non_asoc_addr_ok == 0))
3266 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3269 if (tmp_sifa == NULL) {
3272 if (tmp_sifa == sifa) {
3276 if (sctp_is_address_in_scope(tmp_sifa,
3277 &stcb->asoc.scope, 0) == 0) {
3280 if (((non_asoc_addr_ok == 0) &&
3281 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3282 (non_asoc_addr_ok &&
3283 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3284 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3294 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3295 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3296 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3301 atomic_add_int(&sifa->refcount, 1);
3309 /* tcb may be NULL */
3311 sctp_source_address_selection(struct sctp_inpcb *inp,
3312 struct sctp_tcb *stcb,
3314 struct sctp_nets *net,
3315 int non_asoc_addr_ok, uint32_t vrf_id)
3317 struct sctp_ifa *answer;
3318 uint8_t dest_is_priv, dest_is_loop;
3321 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3324 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3329 * - Find the route if needed, cache if I can.
3330 * - Look at interface address in route, Is it in the bound list. If so we
3331 * have the best source.
3332 * - If not we must rotate amongst the addresses.
3336 * Do we need to pay attention to scope. We can have a private address
3337 * or a global address we are sourcing or sending to. So if we draw
3339 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3341 * ------------------------------------------
3342 * source * dest * result
3343 * -----------------------------------------
3344 * <a> Private * Global * NAT
3345 * -----------------------------------------
3346 * <b> Private * Private * No problem
3347 * -----------------------------------------
3348 * <c> Global * Private * Huh, How will this work?
3349 * -----------------------------------------
3350 * <d> Global * Global * No Problem
3351 *------------------------------------------
3352 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3354 *------------------------------------------
3355 * source * dest * result
3356 * -----------------------------------------
3357 * <a> Linklocal * Global *
3358 * -----------------------------------------
3359 * <b> Linklocal * Linklocal * No problem
3360 * -----------------------------------------
3361 * <c> Global * Linklocal * Huh, How will this work?
3362 * -----------------------------------------
3363 * <d> Global * Global * No Problem
3364 *------------------------------------------
3365 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3367 * And then we add to that what happens if there are multiple addresses
3368 * assigned to an interface. Remember the ifa on a ifn is a linked
3369 * list of addresses. So one interface can have more than one IP
3370 * address. What happens if we have both a private and a global
3371 * address? Do we then use context of destination to sort out which
3372 * one is best? And what about NAT's sending P->G may get you a NAT
3373 * translation, or should you select the G thats on the interface in
3378 * - count the number of addresses on the interface.
3379 * - if it is one, no problem except case <c>.
3380 * For <a> we will assume a NAT out there.
3381 * - if there are more than one, then we need to worry about scope P
3382 * or G. We should prefer G -> G and P -> P if possible.
3383 * Then as a secondary fall back to mixed types G->P being a last
3385 * - The above all works for bound all, but bound specific we need to
3386 * use the same concept but instead only consider the bound
3387 * addresses. If the bound set is NOT assigned to the interface then
3388 * we must use rotation amongst the bound addresses..
3390 if (ro->ro_rt == NULL) {
3392 * Need a route to cache.
3394 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3396 if (ro->ro_rt == NULL) {
3399 fam = ro->ro_dst.sa_family;
3400 dest_is_priv = dest_is_loop = 0;
3401 /* Setup our scopes for the destination */
3405 /* Scope based on outbound address */
3406 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3409 /* mark it as local */
3410 net->addr_is_local = 1;
3412 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3419 /* Scope based on outbound address */
3420 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3421 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3423 * If the address is a loopback address, which
3424 * consists of "::1" OR "fe80::1%lo0", we are
3425 * loopback scope. But we don't use dest_is_priv
3426 * (link local addresses).
3430 /* mark it as local */
3431 net->addr_is_local = 1;
3433 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3439 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3440 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3441 SCTP_IPI_ADDR_RLOCK();
3442 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3446 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3447 dest_is_priv, dest_is_loop,
3448 non_asoc_addr_ok, fam);
3449 SCTP_IPI_ADDR_RUNLOCK();
3456 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3457 vrf_id, dest_is_priv,
3459 non_asoc_addr_ok, fam);
3461 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3466 SCTP_IPI_ADDR_RUNLOCK();
3471 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3474 struct sctp_sndinfo sndinfo;
3475 struct sctp_prinfo prinfo;
3476 struct sctp_authinfo authinfo;
3477 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3481 * Independent of how many mbufs, find the c_type inside the control
3482 * structure and copy out the data.
3485 tot_len = SCTP_BUF_LEN(control);
3486 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3487 rem_len = tot_len - off;
3488 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3489 /* There is not enough room for one more. */
3492 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3493 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3494 /* We dont't have a complete CMSG header. */
3497 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3498 /* We don't have the complete CMSG. */
3501 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3502 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3503 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3504 ((c_type == cmh.cmsg_type) ||
3505 ((c_type == SCTP_SNDRCV) &&
3506 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3507 (cmh.cmsg_type == SCTP_PRINFO) ||
3508 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3509 if (c_type == cmh.cmsg_type) {
3510 if (cpsize > INT_MAX) {
3513 if (cmsg_data_len < (int)cpsize) {
3516 /* It is exactly what we want. Copy it out. */
3517 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3520 struct sctp_sndrcvinfo *sndrcvinfo;
3522 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3524 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3527 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3529 switch (cmh.cmsg_type) {
3531 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3534 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3535 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3536 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3537 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3538 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3539 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3542 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3545 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3546 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3547 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3549 sndrcvinfo->sinfo_timetolive = 0;
3551 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3554 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3557 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3558 sndrcvinfo->sinfo_keynumber_valid = 1;
3559 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3572 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3575 struct sctp_initmsg initmsg;
3577 struct sockaddr_in sin;
3580 struct sockaddr_in6 sin6;
3582 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3584 tot_len = SCTP_BUF_LEN(control);
3585 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3586 rem_len = tot_len - off;
3587 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3588 /* There is not enough room for one more. */
3592 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3593 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3594 /* We dont't have a complete CMSG header. */
3598 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3599 /* We don't have the complete CMSG. */
3603 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3604 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3605 if (cmh.cmsg_level == IPPROTO_SCTP) {
3606 switch (cmh.cmsg_type) {
3608 if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3612 m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3613 if (initmsg.sinit_max_attempts)
3614 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3615 if (initmsg.sinit_num_ostreams)
3616 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3617 if (initmsg.sinit_max_instreams)
3618 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3619 if (initmsg.sinit_max_init_timeo)
3620 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3621 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3622 struct sctp_stream_out *tmp_str;
3624 #if defined(SCTP_DETAILED_STR_STATS)
3628 /* Default is NOT correct */
3629 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3630 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3631 SCTP_TCB_UNLOCK(stcb);
3632 SCTP_MALLOC(tmp_str,
3633 struct sctp_stream_out *,
3634 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3636 SCTP_TCB_LOCK(stcb);
3637 if (tmp_str != NULL) {
3638 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3639 stcb->asoc.strmout = tmp_str;
3640 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3642 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3644 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3645 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3646 stcb->asoc.strmout[i].chunks_on_queues = 0;
3647 stcb->asoc.strmout[i].next_mid_ordered = 0;
3648 stcb->asoc.strmout[i].next_mid_unordered = 0;
3649 #if defined(SCTP_DETAILED_STR_STATS)
3650 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3651 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3652 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3655 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3656 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3658 stcb->asoc.strmout[i].sid = i;
3659 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3660 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3661 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3666 case SCTP_DSTADDRV4:
3667 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3671 memset(&sin, 0, sizeof(struct sockaddr_in));
3672 sin.sin_family = AF_INET;
3673 sin.sin_len = sizeof(struct sockaddr_in);
3674 sin.sin_port = stcb->rport;
3675 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3676 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3677 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3678 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3682 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3683 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3690 case SCTP_DSTADDRV6:
3691 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3695 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3696 sin6.sin6_family = AF_INET6;
3697 sin6.sin6_len = sizeof(struct sockaddr_in6);
3698 sin6.sin6_port = stcb->rport;
3699 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3700 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3701 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3706 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3707 in6_sin6_2_sin(&sin, &sin6);
3708 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3709 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3710 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3714 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3715 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3721 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3722 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3736 #if defined(INET) || defined(INET6)
3737 static struct sctp_tcb *
3738 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3740 struct mbuf *control,
3741 struct sctp_nets **net_p,
3745 struct sctp_tcb *stcb;
3746 struct sockaddr *addr;
3748 struct sockaddr_in sin;
3751 struct sockaddr_in6 sin6;
3753 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3755 tot_len = SCTP_BUF_LEN(control);
3756 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3757 rem_len = tot_len - off;
3758 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3759 /* There is not enough room for one more. */
3763 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3764 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3765 /* We dont't have a complete CMSG header. */
3769 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3770 /* We don't have the complete CMSG. */
3774 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3775 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3776 if (cmh.cmsg_level == IPPROTO_SCTP) {
3777 switch (cmh.cmsg_type) {
3779 case SCTP_DSTADDRV4:
3780 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3784 memset(&sin, 0, sizeof(struct sockaddr_in));
3785 sin.sin_family = AF_INET;
3786 sin.sin_len = sizeof(struct sockaddr_in);
3787 sin.sin_port = port;
3788 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3789 addr = (struct sockaddr *)&sin;
3793 case SCTP_DSTADDRV6:
3794 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3798 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3799 sin6.sin6_family = AF_INET6;
3800 sin6.sin6_len = sizeof(struct sockaddr_in6);
3801 sin6.sin6_port = port;
3802 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3804 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3805 in6_sin6_2_sin(&sin, &sin6);
3806 addr = (struct sockaddr *)&sin;
3809 addr = (struct sockaddr *)&sin6;
3817 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3828 static struct mbuf *
3829 sctp_add_cookie(struct mbuf *init, int init_offset,
3830 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3832 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3833 struct sctp_state_cookie *stc;
3834 struct sctp_paramhdr *ph;
3839 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3840 sizeof(struct sctp_paramhdr)), 0,
3841 M_NOWAIT, 1, MT_DATA);
3845 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3846 if (copy_init == NULL) {
3850 #ifdef SCTP_MBUF_LOGGING
3851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3852 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3855 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3857 if (copy_initack == NULL) {
3859 sctp_m_freem(copy_init);
3862 #ifdef SCTP_MBUF_LOGGING
3863 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3864 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3867 /* easy side we just drop it on the end */
3868 ph = mtod(mret, struct sctp_paramhdr *);
3869 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3870 sizeof(struct sctp_paramhdr);
3871 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3872 sizeof(struct sctp_paramhdr));
3873 ph->param_type = htons(SCTP_STATE_COOKIE);
3874 ph->param_length = 0; /* fill in at the end */
3875 /* Fill in the stc cookie data */
3876 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3878 /* tack the INIT and then the INIT-ACK onto the chain */
3880 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3881 cookie_sz += SCTP_BUF_LEN(m_at);
3882 if (SCTP_BUF_NEXT(m_at) == NULL) {
3883 SCTP_BUF_NEXT(m_at) = copy_init;
3887 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3888 cookie_sz += SCTP_BUF_LEN(m_at);
3889 if (SCTP_BUF_NEXT(m_at) == NULL) {
3890 SCTP_BUF_NEXT(m_at) = copy_initack;
3894 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3895 cookie_sz += SCTP_BUF_LEN(m_at);
3896 if (SCTP_BUF_NEXT(m_at) == NULL) {
3900 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3902 /* no space, so free the entire chain */
3906 SCTP_BUF_LEN(sig) = 0;
3907 SCTP_BUF_NEXT(m_at) = sig;
3909 foo = (uint8_t *)(mtod(sig, caddr_t)+sig_offset);
3910 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3912 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3913 cookie_sz += SCTP_SIGNATURE_SIZE;
3914 ph->param_length = htons(cookie_sz);
3920 sctp_get_ect(struct sctp_tcb *stcb)
3922 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3923 return (SCTP_ECT0_BIT);
3929 #if defined(INET) || defined(INET6)
3931 sctp_handle_no_route(struct sctp_tcb *stcb,
3932 struct sctp_nets *net,
3935 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3938 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3939 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3940 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3941 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3942 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3943 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3947 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3948 net->dest_state &= ~SCTP_ADDR_PF;
3952 if (net == stcb->asoc.primary_destination) {
3953 /* need a new primary */
3954 struct sctp_nets *alt;
3956 alt = sctp_find_alternate_net(stcb, net, 0);
3958 if (stcb->asoc.alternate) {
3959 sctp_free_remote_addr(stcb->asoc.alternate);
3961 stcb->asoc.alternate = alt;
3962 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3963 if (net->ro._s_addr) {
3964 sctp_free_ifa(net->ro._s_addr);
3965 net->ro._s_addr = NULL;
3967 net->src_addr_selected = 0;
3976 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3977 struct sctp_tcb *stcb, /* may be NULL */
3978 struct sctp_nets *net,
3979 struct sockaddr *to,
3981 uint32_t auth_offset,
3982 struct sctp_auth_chunk *auth,
3983 uint16_t auth_keyid,
3984 int nofragment_flag,
3991 union sctp_sockstore *over_addr,
3992 uint8_t mflowtype, uint32_t mflowid,
3993 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3994 int so_locked SCTP_UNUSED
4000 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
4002 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
4003 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
4004 * - fill in the HMAC digest of any AUTH chunk in the packet.
4005 * - calculate and fill in the SCTP checksum.
4006 * - prepend an IP address header.
4007 * - if boundall use INADDR_ANY.
4008 * - if boundspecific do source address selection.
4009 * - set fragmentation option for ipV4.
4010 * - On return from IP output, check/adjust mtu size of output
4011 * interface and smallest_mtu size as well.
4013 /* Will need ifdefs around this */
4015 struct sctphdr *sctphdr;
4018 #if defined(INET) || defined(INET6)
4021 #if defined(INET) || defined(INET6)
4023 sctp_route_t *ro = NULL;
4024 struct udphdr *udp = NULL;
4027 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028 struct socket *so = NULL;
4031 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4032 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4036 #if defined(INET) || defined(INET6)
4038 vrf_id = stcb->asoc.vrf_id;
4040 vrf_id = inp->def_vrf_id;
4043 /* fill in the HMAC digest for any AUTH chunk in the packet */
4044 if ((auth != NULL) && (stcb != NULL)) {
4045 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4049 tos_value = net->dscp;
4051 tos_value = stcb->asoc.default_dscp;
4053 tos_value = inp->sctp_ep.default_dscp;
4056 switch (to->sa_family) {
4060 struct ip *ip = NULL;
4061 sctp_route_t iproute;
4064 len = SCTP_MIN_V4_OVERHEAD;
4066 len += sizeof(struct udphdr);
4068 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4071 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4074 SCTP_ALIGN_TO_END(newm, len);
4075 SCTP_BUF_LEN(newm) = len;
4076 SCTP_BUF_NEXT(newm) = m;
4079 m->m_pkthdr.flowid = net->flowid;
4080 M_HASHTYPE_SET(m, net->flowtype);
4082 m->m_pkthdr.flowid = mflowid;
4083 M_HASHTYPE_SET(m, mflowtype);
4085 packet_length = sctp_calculate_len(m);
4086 ip = mtod(m, struct ip *);
4087 ip->ip_v = IPVERSION;
4088 ip->ip_hl = (sizeof(struct ip) >> 2);
4089 if (tos_value == 0) {
4091 * This means especially, that it is not set
4092 * at the SCTP layer. So use the value from
4095 tos_value = inp->ip_inp.inp.inp_ip_tos;
4099 tos_value |= sctp_get_ect(stcb);
4101 if ((nofragment_flag) && (port == 0)) {
4102 ip->ip_off = htons(IP_DF);
4104 ip->ip_off = htons(0);
4106 /* FreeBSD has a function for ip_id's */
4109 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4110 ip->ip_len = htons(packet_length);
4111 ip->ip_tos = tos_value;
4113 ip->ip_p = IPPROTO_UDP;
4115 ip->ip_p = IPPROTO_SCTP;
4120 memset(&iproute, 0, sizeof(iproute));
4121 memcpy(&ro->ro_dst, to, to->sa_len);
4123 ro = (sctp_route_t *)&net->ro;
4125 /* Now the address selection part */
4126 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4128 /* call the routine to select the src address */
4129 if (net && out_of_asoc_ok == 0) {
4130 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4131 sctp_free_ifa(net->ro._s_addr);
4132 net->ro._s_addr = NULL;
4133 net->src_addr_selected = 0;
4139 if (net->src_addr_selected == 0) {
4140 /* Cache the source address */
4141 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4144 net->src_addr_selected = 1;
4146 if (net->ro._s_addr == NULL) {
4147 /* No route to host */
4148 net->src_addr_selected = 0;
4149 sctp_handle_no_route(stcb, net, so_locked);
4150 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4152 return (EHOSTUNREACH);
4154 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4156 if (over_addr == NULL) {
4157 struct sctp_ifa *_lsrc;
4159 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4163 if (_lsrc == NULL) {
4164 sctp_handle_no_route(stcb, net, so_locked);
4165 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4167 return (EHOSTUNREACH);
4169 ip->ip_src = _lsrc->address.sin.sin_addr;
4170 sctp_free_ifa(_lsrc);
4172 ip->ip_src = over_addr->sin.sin_addr;
4173 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4177 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4178 sctp_handle_no_route(stcb, net, so_locked);
4179 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4181 return (EHOSTUNREACH);
4183 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4184 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4185 udp->uh_dport = port;
4186 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4188 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4192 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4194 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4197 sctphdr->src_port = src_port;
4198 sctphdr->dest_port = dest_port;
4199 sctphdr->v_tag = v_tag;
4200 sctphdr->checksum = 0;
4203 * If source address selection fails and we find no
4204 * route then the ip_output should fail as well with
4205 * a NO_ROUTE_TO_HOST type error. We probably should
4206 * catch that somewhere and abort the association
4207 * right away (assuming this is an INIT being sent).
4209 if (ro->ro_rt == NULL) {
4211 * src addr selection failed to find a route
4212 * (or valid source addr), so we can't get
4213 * there from here (yet)!
4215 sctp_handle_no_route(stcb, net, so_locked);
4216 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4218 return (EHOSTUNREACH);
4220 if (ro != &iproute) {
4221 memcpy(&iproute, ro, sizeof(*ro));
4223 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4224 (uint32_t)(ntohl(ip->ip_src.s_addr)));
4225 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4226 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4227 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4230 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4231 /* failed to prepend data, give up */
4232 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4236 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4238 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4239 SCTP_STAT_INCR(sctps_sendswcrc);
4241 SCTP_ENABLE_UDP_CSUM(o_pak);
4244 m->m_pkthdr.csum_flags = CSUM_SCTP;
4245 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4246 SCTP_STAT_INCR(sctps_sendhwcrc);
4248 #ifdef SCTP_PACKET_LOGGING
4249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4250 sctp_packet_log(o_pak);
4252 /* send it out. table id is taken from stcb */
4253 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4254 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4255 so = SCTP_INP_SO(inp);
4256 SCTP_SOCKET_UNLOCK(so, 0);
4259 SCTP_PROBE5(send, NULL, stcb, ip, stcb, sctphdr);
4260 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4261 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4262 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4263 atomic_add_int(&stcb->asoc.refcnt, 1);
4264 SCTP_TCB_UNLOCK(stcb);
4265 SCTP_SOCKET_LOCK(so, 0);
4266 SCTP_TCB_LOCK(stcb);
4267 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4271 UDPSTAT_INC(udps_opackets);
4273 SCTP_STAT_INCR(sctps_sendpackets);
4274 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4276 SCTP_STAT_INCR(sctps_senderrors);
4278 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4280 /* free tempy routes */
4283 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4284 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4287 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4290 mtu -= sizeof(struct udphdr);
4292 if (mtu < net->mtu) {
4293 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4294 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4299 } else if (ro->ro_rt == NULL) {
4300 /* route was freed */
4301 if (net->ro._s_addr &&
4302 net->src_addr_selected) {
4303 sctp_free_ifa(net->ro._s_addr);
4304 net->ro._s_addr = NULL;
4306 net->src_addr_selected = 0;
4315 uint32_t flowlabel, flowinfo;
4316 struct ip6_hdr *ip6h;
4317 struct route_in6 ip6route;
4319 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4321 struct sockaddr_in6 lsa6_storage;
4323 u_short prev_port = 0;
4327 flowlabel = net->flowlabel;
4329 flowlabel = stcb->asoc.default_flowlabel;
4331 flowlabel = inp->sctp_ep.default_flowlabel;
4333 if (flowlabel == 0) {
4335 * This means especially, that it is not set
4336 * at the SCTP layer. So use the value from
4339 flowlabel = ntohl(((struct inpcb *)inp)->inp_flow);
4341 flowlabel &= 0x000fffff;
4342 len = SCTP_MIN_OVERHEAD;
4344 len += sizeof(struct udphdr);
4346 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4349 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4352 SCTP_ALIGN_TO_END(newm, len);
4353 SCTP_BUF_LEN(newm) = len;
4354 SCTP_BUF_NEXT(newm) = m;
4357 m->m_pkthdr.flowid = net->flowid;
4358 M_HASHTYPE_SET(m, net->flowtype);
4360 m->m_pkthdr.flowid = mflowid;
4361 M_HASHTYPE_SET(m, mflowtype);
4363 packet_length = sctp_calculate_len(m);
4365 ip6h = mtod(m, struct ip6_hdr *);
4366 /* protect *sin6 from overwrite */
4367 sin6 = (struct sockaddr_in6 *)to;
4371 /* KAME hack: embed scopeid */
4372 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4373 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4378 memset(&ip6route, 0, sizeof(ip6route));
4379 ro = (sctp_route_t *)&ip6route;
4380 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4382 ro = (sctp_route_t *)&net->ro;
4385 * We assume here that inp_flow is in host byte
4386 * order within the TCB!
4388 if (tos_value == 0) {
4390 * This means especially, that it is not set
4391 * at the SCTP layer. So use the value from
4394 tos_value = (ntohl(((struct inpcb *)inp)->inp_flow) >> 20) & 0xff;
4398 tos_value |= sctp_get_ect(stcb);
4402 flowinfo |= tos_value;
4404 flowinfo |= flowlabel;
4405 ip6h->ip6_flow = htonl(flowinfo);
4407 ip6h->ip6_nxt = IPPROTO_UDP;
4409 ip6h->ip6_nxt = IPPROTO_SCTP;
4411 ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4412 ip6h->ip6_dst = sin6->sin6_addr;
4415 * Add SRC address selection here: we can only reuse
4416 * to a limited degree the kame src-addr-sel, since
4417 * we can try their selection but it may not be
4420 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4421 lsa6_tmp.sin6_family = AF_INET6;
4422 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4424 if (net && out_of_asoc_ok == 0) {
4425 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4426 sctp_free_ifa(net->ro._s_addr);
4427 net->ro._s_addr = NULL;
4428 net->src_addr_selected = 0;
4434 if (net->src_addr_selected == 0) {
4435 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4436 /* KAME hack: embed scopeid */
4437 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4438 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4442 /* Cache the source address */
4443 net->ro._s_addr = sctp_source_address_selection(inp,
4449 (void)sa6_recoverscope(sin6);
4450 net->src_addr_selected = 1;
4452 if (net->ro._s_addr == NULL) {
4453 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4454 net->src_addr_selected = 0;
4455 sctp_handle_no_route(stcb, net, so_locked);
4456 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4458 return (EHOSTUNREACH);
4460 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4462 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4463 /* KAME hack: embed scopeid */
4464 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4465 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4469 if (over_addr == NULL) {
4470 struct sctp_ifa *_lsrc;
4472 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4476 if (_lsrc == NULL) {
4477 sctp_handle_no_route(stcb, net, so_locked);
4478 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4480 return (EHOSTUNREACH);
4482 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4483 sctp_free_ifa(_lsrc);
4485 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4486 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4488 (void)sa6_recoverscope(sin6);
4490 lsa6->sin6_port = inp->sctp_lport;
4492 if (ro->ro_rt == NULL) {
4494 * src addr selection failed to find a route
4495 * (or valid source addr), so we can't get
4498 sctp_handle_no_route(stcb, net, so_locked);
4499 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4501 return (EHOSTUNREACH);
4504 * XXX: sa6 may not have a valid sin6_scope_id in
4505 * the non-SCOPEDROUTING case.
4507 memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4508 lsa6_storage.sin6_family = AF_INET6;
4509 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4510 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4511 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4512 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4517 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4518 lsa6_storage.sin6_port = inp->sctp_lport;
4519 lsa6 = &lsa6_storage;
4520 ip6h->ip6_src = lsa6->sin6_addr;
4523 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4524 sctp_handle_no_route(stcb, net, so_locked);
4525 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4527 return (EHOSTUNREACH);
4529 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4530 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4531 udp->uh_dport = port;
4532 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4534 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4536 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4539 sctphdr->src_port = src_port;
4540 sctphdr->dest_port = dest_port;
4541 sctphdr->v_tag = v_tag;
4542 sctphdr->checksum = 0;
4545 * We set the hop limit now since there is a good
4546 * chance that our ro pointer is now filled
4548 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4549 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4552 /* Copy to be sure something bad is not happening */
4553 sin6->sin6_addr = ip6h->ip6_dst;
4554 lsa6->sin6_addr = ip6h->ip6_src;
4557 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4558 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4559 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4560 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4561 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4563 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4565 * preserve the port and scope for link
4568 prev_scope = sin6->sin6_scope_id;
4569 prev_port = sin6->sin6_port;
4572 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4573 /* failed to prepend data, give up */
4575 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4578 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4580 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4581 SCTP_STAT_INCR(sctps_sendswcrc);
4582 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4583 udp->uh_sum = 0xffff;
4586 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4587 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4588 SCTP_STAT_INCR(sctps_sendhwcrc);
4590 /* send it out. table id is taken from stcb */
4591 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4592 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4593 so = SCTP_INP_SO(inp);
4594 SCTP_SOCKET_UNLOCK(so, 0);
4597 #ifdef SCTP_PACKET_LOGGING
4598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4599 sctp_packet_log(o_pak);
4601 SCTP_PROBE5(send, NULL, stcb, ip6h, stcb, sctphdr);
4602 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4603 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4604 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4605 atomic_add_int(&stcb->asoc.refcnt, 1);
4606 SCTP_TCB_UNLOCK(stcb);
4607 SCTP_SOCKET_LOCK(so, 0);
4608 SCTP_TCB_LOCK(stcb);
4609 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4613 /* for link local this must be done */
4614 sin6->sin6_scope_id = prev_scope;
4615 sin6->sin6_port = prev_port;
4617 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4619 UDPSTAT_INC(udps_opackets);
4621 SCTP_STAT_INCR(sctps_sendpackets);
4622 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4624 SCTP_STAT_INCR(sctps_senderrors);
4627 /* Now if we had a temp route free it */
4631 * PMTU check versus smallest asoc MTU goes
4634 if (ro->ro_rt == NULL) {
4635 /* Route was freed */
4636 if (net->ro._s_addr &&
4637 net->src_addr_selected) {
4638 sctp_free_ifa(net->ro._s_addr);
4639 net->ro._s_addr = NULL;
4641 net->src_addr_selected = 0;
4643 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4644 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4647 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4650 mtu -= sizeof(struct udphdr);
4652 if (mtu < net->mtu) {
4653 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4654 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4660 if (ND_IFINFO(ifp)->linkmtu &&
4661 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4662 sctp_mtu_size_reset(inp,
4664 ND_IFINFO(ifp)->linkmtu);
4672 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4673 ((struct sockaddr *)to)->sa_family);
4675 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4682 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4683 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4688 struct mbuf *m, *m_last;
4689 struct sctp_nets *net;
4690 struct sctp_init_chunk *init;
4691 struct sctp_supported_addr_param *sup_addr;
4692 struct sctp_adaptation_layer_indication *ali;
4693 struct sctp_supported_chunk_types_param *pr_supported;
4694 struct sctp_paramhdr *ph;
4695 int cnt_inits_to = 0;
4697 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4699 /* INIT's always go to the primary (and usually ONLY address) */
4700 net = stcb->asoc.primary_destination;
4702 net = TAILQ_FIRST(&stcb->asoc.nets);
4707 /* we confirm any address we send an INIT to */
4708 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4709 (void)sctp_set_primary_addr(stcb, NULL, net);
4711 /* we confirm any address we send an INIT to */
4712 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4714 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4716 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4718 * special hook, if we are sending to link local it will not
4719 * show up in our private address count.
4721 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4725 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4726 /* This case should not happen */
4727 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4730 /* start the INIT timer */
4731 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4733 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4735 /* No memory, INIT timer will re-attempt. */
4736 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4739 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
4741 /* Now lets put the chunk header in place */
4742 init = mtod(m, struct sctp_init_chunk *);
4743 /* now the chunk header */
4744 init->ch.chunk_type = SCTP_INITIATION;
4745 init->ch.chunk_flags = 0;
4746 /* fill in later from mbuf we build */
4747 init->ch.chunk_length = 0;
4748 /* place in my tag */
4749 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4750 /* set up some of the credits. */
4751 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4752 SCTP_MINIMAL_RWND));
4753 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4754 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4755 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4757 /* Adaptation layer indication parameter */
4758 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4759 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
4760 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4761 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4762 ali->ph.param_length = htons(parameter_len);
4763 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
4764 chunk_len += parameter_len;
4768 if (stcb->asoc.ecn_supported == 1) {
4769 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4770 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4771 ph->param_type = htons(SCTP_ECN_CAPABLE);
4772 ph->param_length = htons(parameter_len);
4773 chunk_len += parameter_len;
4776 /* PR-SCTP supported parameter */
4777 if (stcb->asoc.prsctp_supported == 1) {
4778 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4779 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4780 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4781 ph->param_length = htons(parameter_len);
4782 chunk_len += parameter_len;
4785 /* Add NAT friendly parameter. */
4786 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4787 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4788 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4789 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4790 ph->param_length = htons(parameter_len);
4791 chunk_len += parameter_len;
4794 /* And now tell the peer which extensions we support */
4796 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4797 if (stcb->asoc.prsctp_supported == 1) {
4798 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4799 if (stcb->asoc.idata_supported) {
4800 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4803 if (stcb->asoc.auth_supported == 1) {
4804 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4806 if (stcb->asoc.asconf_supported == 1) {
4807 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4808 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4810 if (stcb->asoc.reconfig_supported == 1) {
4811 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4813 if (stcb->asoc.idata_supported) {
4814 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4816 if (stcb->asoc.nrsack_supported == 1) {
4817 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4819 if (stcb->asoc.pktdrop_supported == 1) {
4820 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4823 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4824 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4825 pr_supported->ph.param_length = htons(parameter_len);
4826 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4827 chunk_len += parameter_len;
4829 /* add authentication parameters */
4830 if (stcb->asoc.auth_supported) {
4831 /* attach RANDOM parameter, if available */
4832 if (stcb->asoc.authinfo.random != NULL) {
4833 struct sctp_auth_random *randp;
4835 if (padding_len > 0) {
4836 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4837 chunk_len += padding_len;
4840 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4841 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4842 /* random key already contains the header */
4843 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4844 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4845 chunk_len += parameter_len;
4847 /* add HMAC_ALGO parameter */
4848 if (stcb->asoc.local_hmacs != NULL) {
4849 struct sctp_auth_hmac_algo *hmacs;
4851 if (padding_len > 0) {
4852 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4853 chunk_len += padding_len;
4856 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4857 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
4858 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4859 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4860 hmacs->ph.param_length = htons(parameter_len);
4861 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
4862 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4863 chunk_len += parameter_len;
4865 /* add CHUNKS parameter */
4866 if (stcb->asoc.local_auth_chunks != NULL) {
4867 struct sctp_auth_chunk_list *chunks;
4869 if (padding_len > 0) {
4870 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4871 chunk_len += padding_len;
4874 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4875 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
4876 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4877 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4878 chunks->ph.param_length = htons(parameter_len);
4879 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4880 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4881 chunk_len += parameter_len;
4885 /* now any cookie time extensions */
4886 if (stcb->asoc.cookie_preserve_req) {
4887 struct sctp_cookie_perserve_param *cookie_preserve;
4889 if (padding_len > 0) {
4890 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4891 chunk_len += padding_len;
4894 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
4895 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4896 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4897 cookie_preserve->ph.param_length = htons(parameter_len);
4898 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4899 stcb->asoc.cookie_preserve_req = 0;
4900 chunk_len += parameter_len;
4903 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4906 if (padding_len > 0) {
4907 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4908 chunk_len += padding_len;
4911 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4912 if (stcb->asoc.scope.ipv4_addr_legal) {
4913 parameter_len += (uint16_t)sizeof(uint16_t);
4915 if (stcb->asoc.scope.ipv6_addr_legal) {
4916 parameter_len += (uint16_t)sizeof(uint16_t);
4918 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4919 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4920 sup_addr->ph.param_length = htons(parameter_len);
4922 if (stcb->asoc.scope.ipv4_addr_legal) {
4923 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4925 if (stcb->asoc.scope.ipv6_addr_legal) {
4926 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4928 padding_len = 4 - 2 * i;
4929 chunk_len += parameter_len;
4932 SCTP_BUF_LEN(m) = chunk_len;
4933 /* now the addresses */
4935 * To optimize this we could put the scoping stuff into a structure
4936 * and remove the individual uint8's from the assoc structure. Then
4937 * we could just sifa in the address within the stcb. But for now
4938 * this is a quick hack to get the address stuff teased apart.
4940 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4942 &padding_len, &chunk_len);
4944 init->ch.chunk_length = htons(chunk_len);
4945 if (padding_len > 0) {
4946 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4951 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4952 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
4953 (struct sockaddr *)&net->ro._l_addr,
4954 m, 0, NULL, 0, 0, 0, 0,
4955 inp->sctp_lport, stcb->rport, htonl(0),
4959 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
4960 if (error == ENOBUFS) {
4961 stcb->asoc.ifp_had_enobuf = 1;
4962 SCTP_STAT_INCR(sctps_lowlevelerr);
4965 stcb->asoc.ifp_had_enobuf = 0;
4967 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4968 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4972 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4973 int param_offset, int *abort_processing,
4974 struct sctp_chunkhdr *cp,
4979 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4980 * being equal to the beginning of the params i.e. (iphlen +
4981 * sizeof(struct sctp_init_msg) parse through the parameters to the
4982 * end of the mbuf verifying that all parameters are known.
4984 * For unknown parameters build and return a mbuf with
4985 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4986 * processing this chunk stop, and set *abort_processing to 1.
4988 * By having param_offset be pre-set to where parameters begin it is
4989 * hoped that this routine may be reused in the future by new
4992 struct sctp_paramhdr *phdr, params;
4994 struct mbuf *mat, *m_tmp, *op_err, *op_err_last;
4995 int at, limit, pad_needed;
4996 uint16_t ptype, plen, padded_size;
4998 *abort_processing = 0;
4999 if (cookie_found != NULL) {
5003 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
5008 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
5009 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5010 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
5011 ptype = ntohs(phdr->param_type);
5012 plen = ntohs(phdr->param_length);
5013 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5014 /* wacked parameter */
5015 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5018 limit -= SCTP_SIZE32(plen);
5020 * All parameters for all chunks that we know/understand are
5021 * listed here. We process them other places and make
5022 * appropriate stop actions per the upper bits. However this
5023 * is the generic routine processor's can call to get back
5024 * an operr.. to either incorporate (init-ack) or send.
5026 padded_size = SCTP_SIZE32(plen);
5028 /* Param's with variable size */
5029 case SCTP_HEARTBEAT_INFO:
5030 case SCTP_UNRECOG_PARAM:
5031 case SCTP_ERROR_CAUSE_IND:
5035 case SCTP_STATE_COOKIE:
5036 if (cookie_found != NULL) {
5041 /* Param's with variable size within a range */
5042 case SCTP_CHUNK_LIST:
5043 case SCTP_SUPPORTED_CHUNK_EXT:
5044 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5045 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5050 case SCTP_SUPPORTED_ADDRTYPE:
5051 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5052 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5058 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5059 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5064 case SCTP_SET_PRIM_ADDR:
5065 case SCTP_DEL_IP_ADDRESS:
5066 case SCTP_ADD_IP_ADDRESS:
5067 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5068 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5069 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5074 /* Param's with a fixed size */
5075 case SCTP_IPV4_ADDRESS:
5076 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5077 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5082 case SCTP_IPV6_ADDRESS:
5083 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5084 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5089 case SCTP_COOKIE_PRESERVE:
5090 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5091 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5096 case SCTP_HAS_NAT_SUPPORT:
5099 case SCTP_PRSCTP_SUPPORTED:
5100 if (padded_size != sizeof(struct sctp_paramhdr)) {
5101 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5106 case SCTP_ECN_CAPABLE:
5107 if (padded_size != sizeof(struct sctp_paramhdr)) {
5108 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5113 case SCTP_ULP_ADAPTATION:
5114 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5115 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5120 case SCTP_SUCCESS_REPORT:
5121 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5122 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5127 case SCTP_HOSTNAME_ADDRESS:
5129 /* Hostname parameters are deprecated. */
5130 struct sctp_gen_error_cause *cause;
5133 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5134 *abort_processing = 1;
5135 sctp_m_freem(op_err);
5139 l_len = SCTP_MIN_OVERHEAD;
5141 l_len = SCTP_MIN_V4_OVERHEAD;
5143 l_len += sizeof(struct sctp_chunkhdr);
5144 l_len += sizeof(struct sctp_gen_error_cause);
5145 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5146 if (op_err != NULL) {
5148 * Pre-reserve space for IP, SCTP,
5152 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5154 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5156 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5157 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5158 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
5159 cause = mtod(op_err, struct sctp_gen_error_cause *);
5160 cause->code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5161 cause->length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5162 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5163 if (SCTP_BUF_NEXT(op_err) == NULL) {
5164 sctp_m_freem(op_err);
5174 * we do not recognize the parameter figure out what
5177 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5178 if ((ptype & 0x4000) == 0x4000) {
5179 /* Report bit is set?? */
5180 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5181 if (op_err == NULL) {
5184 /* Ok need to try to get an mbuf */
5186 l_len = SCTP_MIN_OVERHEAD;
5188 l_len = SCTP_MIN_V4_OVERHEAD;
5190 l_len += sizeof(struct sctp_chunkhdr);
5191 l_len += sizeof(struct sctp_paramhdr);
5192 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5194 SCTP_BUF_LEN(op_err) = 0;
5196 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5198 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5200 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5201 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5202 op_err_last = op_err;
5205 if (op_err != NULL) {
5206 /* If we have space */
5207 struct sctp_paramhdr *param;
5209 if (pad_needed > 0) {
5210 op_err_last = sctp_add_pad_tombuf(op_err_last, pad_needed);
5212 if (op_err_last == NULL) {
5213 sctp_m_freem(op_err);
5216 goto more_processing;
5218 if (M_TRAILINGSPACE(op_err_last) < (int)sizeof(struct sctp_paramhdr)) {
5219 m_tmp = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
5220 if (m_tmp == NULL) {
5221 sctp_m_freem(op_err);
5224 goto more_processing;
5226 SCTP_BUF_LEN(m_tmp) = 0;
5227 SCTP_BUF_NEXT(m_tmp) = NULL;
5228 SCTP_BUF_NEXT(op_err_last) = m_tmp;
5229 op_err_last = m_tmp;
5231 param = (struct sctp_paramhdr *)(mtod(op_err_last, caddr_t)+SCTP_BUF_LEN(op_err_last));
5232 param->param_type = htons(SCTP_UNRECOG_PARAM);
5233 param->param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5234 SCTP_BUF_LEN(op_err_last) += sizeof(struct sctp_paramhdr);
5235 SCTP_BUF_NEXT(op_err_last) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5236 if (SCTP_BUF_NEXT(op_err_last) == NULL) {
5237 sctp_m_freem(op_err);
5240 goto more_processing;
5242 while (SCTP_BUF_NEXT(op_err_last) != NULL) {
5243 op_err_last = SCTP_BUF_NEXT(op_err_last);
5246 if (plen % 4 != 0) {
5247 pad_needed = 4 - (plen % 4);
5254 if ((ptype & 0x8000) == 0x0000) {
5255 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5258 /* skip this chunk and continue processing */
5259 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5260 at += SCTP_SIZE32(plen);
5265 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5269 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5270 *abort_processing = 1;
5271 sctp_m_freem(op_err);
5275 struct sctp_paramhdr *param;
5278 l_len = SCTP_MIN_OVERHEAD;
5280 l_len = SCTP_MIN_V4_OVERHEAD;
5282 l_len += sizeof(struct sctp_chunkhdr);
5283 l_len += (2 * sizeof(struct sctp_paramhdr));
5284 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5286 SCTP_BUF_LEN(op_err) = 0;
5288 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5290 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5292 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5293 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5294 SCTP_BUF_LEN(op_err) = 2 * sizeof(struct sctp_paramhdr);
5295 param = mtod(op_err, struct sctp_paramhdr *);
5296 param->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5297 param->param_length = htons(2 * sizeof(struct sctp_paramhdr));
5299 param->param_type = htons(ptype);
5300 param->param_length = htons(plen);
5307 sctp_are_there_new_addresses(struct sctp_association *asoc,
5308 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5311 * Given a INIT packet, look through the packet to verify that there
5312 * are NO new addresses. As we go through the parameters add reports
5313 * of any un-understood parameters that require an error. Also we
5314 * must return (1) to drop the packet if we see a un-understood
5315 * parameter that tells us to drop the chunk.
5317 struct sockaddr *sa_touse;
5318 struct sockaddr *sa;
5319 struct sctp_paramhdr *phdr, params;
5320 uint16_t ptype, plen;
5322 struct sctp_nets *net;
5325 struct sockaddr_in sin4, *sa4;
5328 struct sockaddr_in6 sin6, *sa6;
5332 memset(&sin4, 0, sizeof(sin4));
5333 sin4.sin_family = AF_INET;
5334 sin4.sin_len = sizeof(sin4);
5337 memset(&sin6, 0, sizeof(sin6));
5338 sin6.sin6_family = AF_INET6;
5339 sin6.sin6_len = sizeof(sin6);
5341 /* First what about the src address of the pkt ? */
5343 switch (src->sa_family) {
5346 if (asoc->scope.ipv4_addr_legal) {
5353 if (asoc->scope.ipv6_addr_legal) {
5364 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5365 sa = (struct sockaddr *)&net->ro._l_addr;
5366 if (sa->sa_family == src->sa_family) {
5368 if (sa->sa_family == AF_INET) {
5369 struct sockaddr_in *src4;
5371 sa4 = (struct sockaddr_in *)sa;
5372 src4 = (struct sockaddr_in *)src;
5373 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5380 if (sa->sa_family == AF_INET6) {
5381 struct sockaddr_in6 *src6;
5383 sa6 = (struct sockaddr_in6 *)sa;
5384 src6 = (struct sockaddr_in6 *)src;
5385 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5394 /* New address added! no need to look further. */
5398 /* Ok so far lets munge through the rest of the packet */
5399 offset += sizeof(struct sctp_init_chunk);
5400 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5403 ptype = ntohs(phdr->param_type);
5404 plen = ntohs(phdr->param_length);
5407 case SCTP_IPV4_ADDRESS:
5409 struct sctp_ipv4addr_param *p4, p4_buf;
5411 if (plen != sizeof(struct sctp_ipv4addr_param)) {
5414 phdr = sctp_get_next_param(in_initpkt, offset,
5415 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5419 if (asoc->scope.ipv4_addr_legal) {
5420 p4 = (struct sctp_ipv4addr_param *)phdr;
5421 sin4.sin_addr.s_addr = p4->addr;
5422 sa_touse = (struct sockaddr *)&sin4;
5428 case SCTP_IPV6_ADDRESS:
5430 struct sctp_ipv6addr_param *p6, p6_buf;
5432 if (plen != sizeof(struct sctp_ipv6addr_param)) {
5435 phdr = sctp_get_next_param(in_initpkt, offset,
5436 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5440 if (asoc->scope.ipv6_addr_legal) {
5441 p6 = (struct sctp_ipv6addr_param *)phdr;
5442 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5444 sa_touse = (struct sockaddr *)&sin6;
5454 /* ok, sa_touse points to one to check */
5456 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5457 sa = (struct sockaddr *)&net->ro._l_addr;
5458 if (sa->sa_family != sa_touse->sa_family) {
5462 if (sa->sa_family == AF_INET) {
5463 sa4 = (struct sockaddr_in *)sa;
5464 if (sa4->sin_addr.s_addr ==
5465 sin4.sin_addr.s_addr) {
5472 if (sa->sa_family == AF_INET6) {
5473 sa6 = (struct sockaddr_in6 *)sa;
5474 if (SCTP6_ARE_ADDR_EQUAL(
5483 /* New addr added! no need to look further */
5487 offset += SCTP_SIZE32(plen);
5488 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5494 * Given a MBUF chain that was sent into us containing an INIT. Build a
5495 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5496 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5497 * message (i.e. the struct sctp_init_msg).
5500 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5501 struct sctp_nets *src_net, struct mbuf *init_pkt,
5502 int iphlen, int offset,
5503 struct sockaddr *src, struct sockaddr *dst,
5504 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5505 uint8_t mflowtype, uint32_t mflowid,
5506 uint32_t vrf_id, uint16_t port)
5508 struct sctp_association *asoc;
5509 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5510 struct sctp_init_ack_chunk *initack;
5511 struct sctp_adaptation_layer_indication *ali;
5512 struct sctp_supported_chunk_types_param *pr_supported;
5513 struct sctp_paramhdr *ph;
5514 union sctp_sockstore *over_addr;
5515 struct sctp_scoping scp;
5518 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5519 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5520 struct sockaddr_in *sin;
5523 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5524 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5525 struct sockaddr_in6 *sin6;
5527 struct sockaddr *to;
5528 struct sctp_state_cookie stc;
5529 struct sctp_nets *net = NULL;
5530 uint8_t *signature = NULL;
5531 int cnt_inits_to = 0;
5532 uint16_t his_limit, i_want;
5534 int nat_friendly = 0;
5537 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5544 if ((asoc != NULL) &&
5545 (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5546 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5548 * new addresses, out of here in non-cookie-wait
5551 * Send an ABORT, without the new address error
5552 * cause. This looks no different than if no
5553 * listener was present.
5555 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5557 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5558 mflowtype, mflowid, inp->fibnum,
5562 if (src_net != NULL && (src_net->port != port)) {
5564 * change of remote encapsulation port, out of here
5565 * in non-cookie-wait states
5567 * Send an ABORT, without an specific error cause.
5568 * This looks no different than if no listener was
5571 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5572 "Remote encapsulation port changed");
5573 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5574 mflowtype, mflowid, inp->fibnum,
5580 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5581 (offset + sizeof(struct sctp_init_chunk)),
5583 (struct sctp_chunkhdr *)init_chk,
5584 &nat_friendly, NULL);
5587 if (op_err == NULL) {
5588 char msg[SCTP_DIAG_INFO_LEN];
5590 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5591 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5594 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5595 init_chk->init.initiate_tag, op_err,
5596 mflowtype, mflowid, inp->fibnum,
5600 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5602 /* No memory, INIT timer will re-attempt. */
5603 sctp_m_freem(op_err);
5606 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
5610 * We might not overwrite the identification[] completely and on
5611 * some platforms time_entered will contain some padding. Therefore
5612 * zero out the cookie to avoid putting uninitialized memory on the
5615 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5617 /* the time I built cookie */
5618 (void)SCTP_GETTIME_TIMEVAL(&now);
5619 stc.time_entered.tv_sec = now.tv_sec;
5620 stc.time_entered.tv_usec = now.tv_usec;
5622 /* populate any tie tags */
5624 /* unlock before tag selections */
5625 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5626 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5627 stc.cookie_life = asoc->cookie_life;
5628 net = asoc->primary_destination;
5630 stc.tie_tag_my_vtag = 0;
5631 stc.tie_tag_peer_vtag = 0;
5632 /* life I will award this cookie */
5633 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5636 /* copy in the ports for later check */
5637 stc.myport = sh->dest_port;
5638 stc.peerport = sh->src_port;
5641 * If we wanted to honor cookie life extensions, we would add to
5642 * stc.cookie_life. For now we should NOT honor any extension
5644 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5645 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5646 stc.ipv6_addr_legal = 1;
5647 if (SCTP_IPV6_V6ONLY(inp)) {
5648 stc.ipv4_addr_legal = 0;
5650 stc.ipv4_addr_legal = 1;
5653 stc.ipv6_addr_legal = 0;
5654 stc.ipv4_addr_legal = 1;
5659 switch (dst->sa_family) {
5663 /* lookup address */
5664 stc.address[0] = src4->sin_addr.s_addr;
5668 stc.addr_type = SCTP_IPV4_ADDRESS;
5669 /* local from address */
5670 stc.laddress[0] = dst4->sin_addr.s_addr;
5671 stc.laddress[1] = 0;
5672 stc.laddress[2] = 0;
5673 stc.laddress[3] = 0;
5674 stc.laddr_type = SCTP_IPV4_ADDRESS;
5675 /* scope_id is only for v6 */
5677 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5678 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5681 /* Must use the address in this case */
5682 if (sctp_is_address_on_local_host(src, vrf_id)) {
5683 stc.loopback_scope = 1;
5686 stc.local_scope = 0;
5694 stc.addr_type = SCTP_IPV6_ADDRESS;
5695 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5696 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
5697 if (sctp_is_address_on_local_host(src, vrf_id)) {
5698 stc.loopback_scope = 1;
5699 stc.local_scope = 0;
5702 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5703 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5705 * If the new destination or source
5706 * is a LINK_LOCAL we must have
5707 * common both site and local scope.
5708 * Don't set local scope though
5709 * since we must depend on the
5710 * source to be added implicitly. We
5711 * cannot assure just because we
5712 * share one link that all links are
5715 stc.local_scope = 0;
5719 * we start counting for the private
5720 * address stuff at 1. since the
5721 * link local we source from won't
5722 * show up in our scoped count.
5726 * pull out the scope_id from
5729 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5730 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5732 * If the new destination or source
5733 * is SITE_LOCAL then we must have
5734 * site scope in common.
5738 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5739 stc.laddr_type = SCTP_IPV6_ADDRESS;
5749 /* set the scope per the existing tcb */
5752 struct sctp_nets *lnet;
5755 stc.loopback_scope = asoc->scope.loopback_scope;
5756 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5757 stc.site_scope = asoc->scope.site_scope;
5758 stc.local_scope = asoc->scope.local_scope;
5760 /* Why do we not consider IPv4 LL addresses? */
5761 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5762 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5763 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5765 * if we have a LL address, start
5773 /* use the net pointer */
5774 to = (struct sockaddr *)&net->ro._l_addr;
5775 switch (to->sa_family) {
5778 sin = (struct sockaddr_in *)to;
5779 stc.address[0] = sin->sin_addr.s_addr;
5783 stc.addr_type = SCTP_IPV4_ADDRESS;
5784 if (net->src_addr_selected == 0) {
5786 * strange case here, the INIT should have
5787 * did the selection.
5789 net->ro._s_addr = sctp_source_address_selection(inp,
5790 stcb, (sctp_route_t *)&net->ro,
5792 if (net->ro._s_addr == NULL) {
5793 sctp_m_freem(op_err);
5798 net->src_addr_selected = 1;
5801 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5802 stc.laddress[1] = 0;
5803 stc.laddress[2] = 0;
5804 stc.laddress[3] = 0;
5805 stc.laddr_type = SCTP_IPV4_ADDRESS;
5806 /* scope_id is only for v6 */
5812 sin6 = (struct sockaddr_in6 *)to;
5813 memcpy(&stc.address, &sin6->sin6_addr,
5814 sizeof(struct in6_addr));
5815 stc.addr_type = SCTP_IPV6_ADDRESS;
5816 stc.scope_id = sin6->sin6_scope_id;
5817 if (net->src_addr_selected == 0) {
5819 * strange case here, the INIT should have
5820 * done the selection.
5822 net->ro._s_addr = sctp_source_address_selection(inp,
5823 stcb, (sctp_route_t *)&net->ro,
5825 if (net->ro._s_addr == NULL) {
5826 sctp_m_freem(op_err);
5831 net->src_addr_selected = 1;
5833 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5834 sizeof(struct in6_addr));
5835 stc.laddr_type = SCTP_IPV6_ADDRESS;
5840 /* Now lets put the SCTP header in place */
5841 initack = mtod(m, struct sctp_init_ack_chunk *);
5842 /* Save it off for quick ref */
5843 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
5845 memcpy(stc.identification, SCTP_VERSION_STRING,
5846 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5847 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5848 /* now the chunk header */
5849 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5850 initack->ch.chunk_flags = 0;
5851 /* fill in later from mbuf we build */
5852 initack->ch.chunk_length = 0;
5853 /* place in my tag */
5854 if ((asoc != NULL) &&
5855 ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
5856 (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
5857 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
5858 /* re-use the v-tags and init-seq here */
5859 initack->init.initiate_tag = htonl(asoc->my_vtag);
5860 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5862 uint32_t vtag, itsn;
5865 atomic_add_int(&asoc->refcnt, 1);
5866 SCTP_TCB_UNLOCK(stcb);
5868 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5869 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5871 * Got a duplicate vtag on some guy behind a
5872 * nat make sure we don't use it.
5876 initack->init.initiate_tag = htonl(vtag);
5877 /* get a TSN to use too */
5878 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5879 initack->init.initial_tsn = htonl(itsn);
5880 SCTP_TCB_LOCK(stcb);
5881 atomic_add_int(&asoc->refcnt, -1);
5883 SCTP_INP_INCR_REF(inp);
5884 SCTP_INP_RUNLOCK(inp);
5885 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5886 initack->init.initiate_tag = htonl(vtag);
5887 /* get a TSN to use too */
5888 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5889 SCTP_INP_RLOCK(inp);
5890 SCTP_INP_DECR_REF(inp);
5893 /* save away my tag to */
5894 stc.my_vtag = initack->init.initiate_tag;
5896 /* set up some of the credits. */
5897 so = inp->sctp_socket;
5899 /* memory problem */
5900 sctp_m_freem(op_err);
5904 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5906 /* set what I want */
5907 his_limit = ntohs(init_chk->init.num_inbound_streams);
5908 /* choose what I want */
5910 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5911 i_want = asoc->streamoutcnt;
5913 i_want = asoc->pre_open_streams;
5916 i_want = inp->sctp_ep.pre_open_stream_count;
5918 if (his_limit < i_want) {
5919 /* I Want more :< */
5920 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5922 /* I can have what I want :> */
5923 initack->init.num_outbound_streams = htons(i_want);
5925 /* tell him his limit. */
5926 initack->init.num_inbound_streams =
5927 htons(inp->sctp_ep.max_open_streams_intome);
5929 /* adaptation layer indication parameter */
5930 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5931 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5932 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5933 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5934 ali->ph.param_length = htons(parameter_len);
5935 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5936 chunk_len += parameter_len;
5940 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5941 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5942 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5943 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5944 ph->param_type = htons(SCTP_ECN_CAPABLE);
5945 ph->param_length = htons(parameter_len);
5946 chunk_len += parameter_len;
5949 /* PR-SCTP supported parameter */
5950 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5951 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5952 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5953 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5954 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5955 ph->param_length = htons(parameter_len);
5956 chunk_len += parameter_len;
5959 /* Add NAT friendly parameter */
5961 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5962 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5963 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5964 ph->param_length = htons(parameter_len);
5965 chunk_len += parameter_len;
5968 /* And now tell the peer which extensions we support */
5970 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5971 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5972 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5973 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5974 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5975 ((asoc == NULL) && (inp->idata_supported == 1))) {
5976 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5979 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5980 ((asoc == NULL) && (inp->auth_supported == 1))) {
5981 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5983 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5984 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5985 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5986 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5988 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5989 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5990 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5992 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5993 ((asoc == NULL) && (inp->idata_supported == 1))) {
5994 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5996 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5997 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5998 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
6000 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
6001 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
6002 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
6005 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
6006 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
6007 pr_supported->ph.param_length = htons(parameter_len);
6008 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6009 chunk_len += parameter_len;
6012 /* add authentication parameters */
6013 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
6014 ((asoc == NULL) && (inp->auth_supported == 1))) {
6015 struct sctp_auth_random *randp;
6016 struct sctp_auth_hmac_algo *hmacs;
6017 struct sctp_auth_chunk_list *chunks;
6019 if (padding_len > 0) {
6020 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6021 chunk_len += padding_len;
6024 /* generate and add RANDOM parameter */
6025 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
6026 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6027 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6028 randp->ph.param_type = htons(SCTP_RANDOM);
6029 randp->ph.param_length = htons(parameter_len);
6030 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6031 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6032 chunk_len += parameter_len;
6034 if (padding_len > 0) {
6035 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6036 chunk_len += padding_len;
6039 /* add HMAC_ALGO parameter */
6040 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
6041 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6042 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6043 (uint8_t *)hmacs->hmac_ids);
6044 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6045 hmacs->ph.param_length = htons(parameter_len);
6046 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6047 chunk_len += parameter_len;
6049 if (padding_len > 0) {
6050 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6051 chunk_len += padding_len;
6054 /* add CHUNKS parameter */
6055 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6056 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6057 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6058 chunks->chunk_types);
6059 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6060 chunks->ph.param_length = htons(parameter_len);
6061 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6062 chunk_len += parameter_len;
6064 SCTP_BUF_LEN(m) = chunk_len;
6066 /* now the addresses */
6068 * To optimize this we could put the scoping stuff into a structure
6069 * and remove the individual uint8's from the stc structure. Then we
6070 * could just sifa in the address within the stc.. but for now this
6071 * is a quick hack to get the address stuff teased apart.
6073 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6074 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6075 scp.loopback_scope = stc.loopback_scope;
6076 scp.ipv4_local_scope = stc.ipv4_scope;
6077 scp.local_scope = stc.local_scope;
6078 scp.site_scope = stc.site_scope;
6079 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6081 &padding_len, &chunk_len);
6082 /* padding_len can only be positive, if no addresses have been added */
6083 if (padding_len > 0) {
6084 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6085 chunk_len += padding_len;
6086 SCTP_BUF_LEN(m) += padding_len;
6090 /* tack on the operational error if present */
6093 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6094 parameter_len += SCTP_BUF_LEN(m_tmp);
6096 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6097 SCTP_BUF_NEXT(m_last) = op_err;
6098 while (SCTP_BUF_NEXT(m_last) != NULL) {
6099 m_last = SCTP_BUF_NEXT(m_last);
6101 chunk_len += parameter_len;
6103 if (padding_len > 0) {
6104 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6105 if (m_last == NULL) {
6106 /* Houston we have a problem, no space */
6110 chunk_len += padding_len;
6113 /* Now we must build a cookie */
6114 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6115 if (m_cookie == NULL) {
6116 /* memory problem */
6120 /* Now append the cookie to the end and update the space/size */
6121 SCTP_BUF_NEXT(m_last) = m_cookie;
6123 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6124 parameter_len += SCTP_BUF_LEN(m_tmp);
6125 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6129 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6130 chunk_len += parameter_len;
6133 * Place in the size, but we don't include the last pad (if any) in
6136 initack->ch.chunk_length = htons(chunk_len);
6139 * Time to sign the cookie, we don't sign over the cookie signature
6140 * though thus we set trailer.
6142 (void)sctp_hmac_m(SCTP_HMAC,
6143 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6144 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6145 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6147 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6148 * here since the timer will drive a retranmission.
6150 if (padding_len > 0) {
6151 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6156 if (stc.loopback_scope) {
6157 over_addr = (union sctp_sockstore *)dst;
6162 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6164 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6167 SCTP_SO_NOT_LOCKED))) {
6168 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6169 if (error == ENOBUFS) {
6171 asoc->ifp_had_enobuf = 1;
6173 SCTP_STAT_INCR(sctps_lowlevelerr);
6177 asoc->ifp_had_enobuf = 0;
6180 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6185 sctp_prune_prsctp(struct sctp_tcb *stcb,
6186 struct sctp_association *asoc,
6187 struct sctp_sndrcvinfo *srcv,
6191 struct sctp_tmit_chunk *chk, *nchk;
6193 SCTP_TCB_LOCK_ASSERT(stcb);
6194 if ((asoc->prsctp_supported) &&
6195 (asoc->sent_queue_cnt_removeable > 0)) {
6196 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6198 * Look for chunks marked with the PR_SCTP flag AND
6199 * the buffer space flag. If the one being sent is
6200 * equal or greater priority then purge the old one
6201 * and free some space.
6203 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6205 * This one is PR-SCTP AND buffer space
6208 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6210 * Lower numbers equates to higher
6211 * priority so if the one we are
6212 * looking at has a larger or equal
6213 * priority we want to drop the data
6214 * and NOT retransmit it.
6218 * We release the book_size
6219 * if the mbuf is here
6224 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6228 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6231 freed_spc += ret_spc;
6232 if (freed_spc >= dataout) {
6235 } /* if chunk was present */
6236 } /* if of sufficient priority */
6237 } /* if chunk has enabled */
6238 } /* tailqforeach */
6240 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6241 /* Here we must move to the sent queue and mark */
6242 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6243 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6246 * We release the book_size
6247 * if the mbuf is here
6251 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6254 freed_spc += ret_spc;
6255 if (freed_spc >= dataout) {
6258 } /* end if chk->data */
6259 } /* end if right class */
6260 } /* end if chk pr-sctp */
6261 } /* tailqforeachsafe (chk) */
6262 } /* if enabled in asoc */
6266 sctp_get_frag_point(struct sctp_tcb *stcb,
6267 struct sctp_association *asoc)
6272 * For endpoints that have both v6 and v4 addresses we must reserve
6273 * room for the ipv6 header, for those that are only dealing with V4
6274 * we use a larger frag point.
6276 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6277 ovh = SCTP_MIN_OVERHEAD;
6279 ovh = SCTP_MIN_V4_OVERHEAD;
6281 ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6282 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6283 siz = asoc->smallest_mtu - ovh;
6285 siz = (stcb->asoc.sctp_frag_point - ovh);
6287 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6289 /* A data chunk MUST fit in a cluster */
6290 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6293 /* adjust for an AUTH chunk if DATA requires auth */
6294 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6295 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6298 /* make it an even word boundary please */
6305 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6308 * We assume that the user wants PR_SCTP_TTL if the user provides a
6309 * positive lifetime but does not specify any PR_SCTP policy.
6311 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6312 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6313 } else if (sp->timetolive > 0) {
6314 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6315 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6319 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6320 case CHUNK_FLAGS_PR_SCTP_BUF:
6322 * Time to live is a priority stored in tv_sec when doing
6323 * the buffer drop thing.
6325 sp->ts.tv_sec = sp->timetolive;
6328 case CHUNK_FLAGS_PR_SCTP_TTL:
6332 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6333 tv.tv_sec = sp->timetolive / 1000;
6334 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6336 * TODO sctp_constants.h needs alternative time
6337 * macros when _KERNEL is undefined.
6339 timevaladd(&sp->ts, &tv);
6342 case CHUNK_FLAGS_PR_SCTP_RTX:
6344 * Time to live is a the number or retransmissions stored in
6347 sp->ts.tv_sec = sp->timetolive;
6351 SCTPDBG(SCTP_DEBUG_USRREQ1,
6352 "Unknown PR_SCTP policy %u.\n",
6353 PR_SCTP_POLICY(sp->sinfo_flags));
6359 sctp_msg_append(struct sctp_tcb *stcb,
6360 struct sctp_nets *net,
6362 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6366 struct sctp_stream_queue_pending *sp = NULL;
6367 struct sctp_stream_out *strm;
6370 * Given an mbuf chain, put it into the association send queue and
6371 * place it on the wheel
6373 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6374 /* Invalid stream number */
6375 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6379 if ((stcb->asoc.stream_locked) &&
6380 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6381 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6385 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6386 /* Now can we send this? */
6387 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6388 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6389 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6390 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6391 /* got data while shutting down */
6392 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6396 sctp_alloc_a_strmoq(stcb, sp);
6398 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6402 sp->sinfo_flags = srcv->sinfo_flags;
6403 sp->timetolive = srcv->sinfo_timetolive;
6404 sp->ppid = srcv->sinfo_ppid;
6405 sp->context = srcv->sinfo_context;
6407 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6409 atomic_add_int(&sp->net->ref_count, 1);
6413 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6414 sp->sid = srcv->sinfo_stream;
6415 sp->msg_is_complete = 1;
6416 sp->sender_all_done = 1;
6419 sp->tail_mbuf = NULL;
6420 sctp_set_prsctp_policy(sp);
6422 * We could in theory (for sendall) sifa the length in, but we would
6423 * still have to hunt through the chain since we need to setup the
6427 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6428 if (SCTP_BUF_NEXT(at) == NULL)
6430 sp->length += SCTP_BUF_LEN(at);
6432 if (srcv->sinfo_keynumber_valid) {
6433 sp->auth_keyid = srcv->sinfo_keynumber;
6435 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6437 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6438 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6439 sp->holds_key_ref = 1;
6441 if (hold_stcb_lock == 0) {
6442 SCTP_TCB_SEND_LOCK(stcb);
6444 sctp_snd_sb_alloc(stcb, sp->length);
6445 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6446 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6447 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6449 if (hold_stcb_lock == 0) {
6450 SCTP_TCB_SEND_UNLOCK(stcb);
6460 static struct mbuf *
6461 sctp_copy_mbufchain(struct mbuf *clonechain,
6462 struct mbuf *outchain,
6463 struct mbuf **endofchain,
6466 uint8_t copy_by_ref)
6469 struct mbuf *appendchain;
6473 if (endofchain == NULL) {
6477 sctp_m_freem(outchain);
6480 if (can_take_mbuf) {
6481 appendchain = clonechain;
6484 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6486 /* Its not in a cluster */
6487 if (*endofchain == NULL) {
6488 /* lets get a mbuf cluster */
6489 if (outchain == NULL) {
6490 /* This is the general case */
6492 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6493 if (outchain == NULL) {
6496 SCTP_BUF_LEN(outchain) = 0;
6497 *endofchain = outchain;
6498 /* get the prepend space */
6499 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6502 * We really should not get a NULL
6508 if (SCTP_BUF_NEXT(m) == NULL) {
6512 m = SCTP_BUF_NEXT(m);
6515 if (*endofchain == NULL) {
6517 * huh, TSNH XXX maybe we
6520 sctp_m_freem(outchain);
6524 /* get the new end of length */
6525 len = (int)M_TRAILINGSPACE(*endofchain);
6527 /* how much is left at the end? */
6528 len = (int)M_TRAILINGSPACE(*endofchain);
6530 /* Find the end of the data, for appending */
6531 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6533 /* Now lets copy it out */
6534 if (len >= sizeofcpy) {
6535 /* It all fits, copy it in */
6536 m_copydata(clonechain, 0, sizeofcpy, cp);
6537 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6539 /* fill up the end of the chain */
6541 m_copydata(clonechain, 0, len, cp);
6542 SCTP_BUF_LEN((*endofchain)) += len;
6543 /* now we need another one */
6546 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6551 SCTP_BUF_NEXT((*endofchain)) = m;
6553 cp = mtod((*endofchain), caddr_t);
6554 m_copydata(clonechain, len, sizeofcpy, cp);
6555 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6559 /* copy the old fashion way */
6560 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6561 #ifdef SCTP_MBUF_LOGGING
6562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6563 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6568 if (appendchain == NULL) {
6571 sctp_m_freem(outchain);
6575 /* tack on to the end */
6576 if (*endofchain != NULL) {
6577 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6581 if (SCTP_BUF_NEXT(m) == NULL) {
6582 SCTP_BUF_NEXT(m) = appendchain;
6585 m = SCTP_BUF_NEXT(m);
6589 * save off the end and update the end-chain position
6593 if (SCTP_BUF_NEXT(m) == NULL) {
6597 m = SCTP_BUF_NEXT(m);
6601 /* save off the end and update the end-chain position */
6604 if (SCTP_BUF_NEXT(m) == NULL) {
6608 m = SCTP_BUF_NEXT(m);
6610 return (appendchain);
6615 sctp_med_chunk_output(struct sctp_inpcb *inp,
6616 struct sctp_tcb *stcb,
6617 struct sctp_association *asoc,
6620 int control_only, int from_where,
6621 struct timeval *now, int *now_filled, int frag_point, int so_locked
6622 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6628 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6629 uint32_t val SCTP_UNUSED)
6631 struct sctp_copy_all *ca;
6634 int added_control = 0;
6635 int un_sent, do_chunk_output = 1;
6636 struct sctp_association *asoc;
6637 struct sctp_nets *net;
6639 ca = (struct sctp_copy_all *)ptr;
6640 if (ca->m == NULL) {
6643 if (ca->inp != inp) {
6647 if (ca->sndlen > 0) {
6648 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6650 /* can't copy so we are done */
6654 #ifdef SCTP_MBUF_LOGGING
6655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6656 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6662 SCTP_TCB_LOCK_ASSERT(stcb);
6663 if (stcb->asoc.alternate) {
6664 net = stcb->asoc.alternate;
6666 net = stcb->asoc.primary_destination;
6668 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6669 /* Abort this assoc with m as the user defined reason */
6671 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6673 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6674 0, M_NOWAIT, 1, MT_DATA);
6675 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6678 struct sctp_paramhdr *ph;
6680 ph = mtod(m, struct sctp_paramhdr *);
6681 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6682 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
6685 * We add one here to keep the assoc from dis-appearing on
6688 atomic_add_int(&stcb->asoc.refcnt, 1);
6689 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6691 * sctp_abort_an_association calls sctp_free_asoc() free
6692 * association will NOT free it since we incremented the
6693 * refcnt .. we do this to prevent it being freed and things
6694 * getting tricky since we could end up (from free_asoc)
6695 * calling inpcb_free which would get a recursive lock call
6696 * to the iterator lock.. But as a consequence of that the
6697 * stcb will return to us un-locked.. since free_asoc
6698 * returns with either no TCB or the TCB unlocked, we must
6699 * relock.. to unlock in the iterator timer :-0
6701 SCTP_TCB_LOCK(stcb);
6702 atomic_add_int(&stcb->asoc.refcnt, -1);
6703 goto no_chunk_output;
6706 ret = sctp_msg_append(stcb, net, m,
6710 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6711 /* shutdown this assoc */
6712 if (TAILQ_EMPTY(&asoc->send_queue) &&
6713 TAILQ_EMPTY(&asoc->sent_queue) &&
6714 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
6715 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6719 * there is nothing queued to send, so I'm
6722 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
6723 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6724 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6726 * only send SHUTDOWN the first time
6729 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
6730 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6732 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
6733 sctp_stop_timers_for_shutdown(stcb);
6734 sctp_send_shutdown(stcb, net);
6735 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6737 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6738 asoc->primary_destination);
6740 do_chunk_output = 0;
6744 * we still got (or just got) data to send,
6745 * so set SHUTDOWN_PENDING
6748 * XXX sockets draft says that SCTP_EOF
6749 * should be sent with no data. currently,
6750 * we will allow user data to be sent first
6751 * and move to SHUTDOWN-PENDING
6753 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
6754 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6755 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6756 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6757 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
6759 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
6760 if (TAILQ_EMPTY(&asoc->send_queue) &&
6761 TAILQ_EMPTY(&asoc->sent_queue) &&
6762 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6763 struct mbuf *op_err;
6764 char msg[SCTP_DIAG_INFO_LEN];
6767 snprintf(msg, sizeof(msg),
6768 "%s:%d at %s", __FILE__, __LINE__, __func__);
6769 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6771 atomic_add_int(&stcb->asoc.refcnt, 1);
6772 sctp_abort_an_association(stcb->sctp_ep, stcb,
6773 op_err, SCTP_SO_NOT_LOCKED);
6774 atomic_add_int(&stcb->asoc.refcnt, -1);
6775 goto no_chunk_output;
6777 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6778 asoc->primary_destination);
6784 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6785 (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
6787 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6788 (stcb->asoc.total_flight > 0) &&
6789 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6790 do_chunk_output = 0;
6792 if (do_chunk_output)
6793 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6794 else if (added_control) {
6795 int num_out, reason, now_filled = 0;
6799 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6800 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6801 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6812 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6814 struct sctp_copy_all *ca;
6816 ca = (struct sctp_copy_all *)ptr;
6818 * Do a notify here? Kacheong suggests that the notify be done at
6819 * the send time.. so you would push up a notification if any send
6820 * failed. Don't know if this is feasible since the only failures we
6821 * have is "memory" related and if you cannot get an mbuf to send
6822 * the data you surely can't get an mbuf to send up to notify the
6823 * user you can't send the data :->
6826 /* now free everything */
6828 /* Lets clear the flag to allow others to run. */
6829 ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6831 sctp_m_freem(ca->m);
6832 SCTP_FREE(ca, SCTP_M_COPYAL);
6835 static struct mbuf *
6836 sctp_copy_out_all(struct uio *uio, ssize_t len)
6838 struct mbuf *ret, *at;
6839 ssize_t left, willcpy, cancpy, error;
6841 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6847 SCTP_BUF_LEN(ret) = 0;
6848 /* save space for the data chunk header */
6849 cancpy = (int)M_TRAILINGSPACE(ret);
6850 willcpy = min(cancpy, left);
6853 /* Align data to the end */
6854 error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
6860 SCTP_BUF_LEN(at) = (int)willcpy;
6861 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6864 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
6865 if (SCTP_BUF_NEXT(at) == NULL) {
6868 at = SCTP_BUF_NEXT(at);
6869 SCTP_BUF_LEN(at) = 0;
6870 cancpy = (int)M_TRAILINGSPACE(at);
6871 willcpy = min(cancpy, left);
6878 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6879 struct sctp_sndrcvinfo *srcv)
6882 struct sctp_copy_all *ca;
6884 if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
6885 /* There is another. */
6888 if (uio->uio_resid > SCTP_MAX_SENDALL_LIMIT) {
6889 /* You must be less than the max! */
6892 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6896 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6899 memset(ca, 0, sizeof(struct sctp_copy_all));
6903 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6906 * take off the sendall flag, it would be bad if we failed to do
6909 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6910 /* get length and mbuf chain */
6912 ca->sndlen = uio->uio_resid;
6913 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6914 if (ca->m == NULL) {
6915 SCTP_FREE(ca, SCTP_M_COPYAL);
6916 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6920 /* Gather the length of the send */
6924 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6925 ca->sndlen += SCTP_BUF_LEN(mat);
6928 inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6929 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6930 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6931 SCTP_ASOC_ANY_STATE,
6933 sctp_sendall_completes, inp, 1);
6935 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6936 SCTP_FREE(ca, SCTP_M_COPYAL);
6937 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6945 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6947 struct sctp_tmit_chunk *chk, *nchk;
6949 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6950 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6951 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6952 asoc->ctrl_queue_cnt--;
6954 sctp_m_freem(chk->data);
6957 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6963 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6965 struct sctp_association *asoc;
6966 struct sctp_tmit_chunk *chk, *nchk;
6967 struct sctp_asconf_chunk *acp;
6970 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6971 /* find SCTP_ASCONF chunk in queue */
6972 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6974 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6975 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6980 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6981 asoc->ctrl_queue_cnt--;
6983 sctp_m_freem(chk->data);
6986 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6993 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6994 struct sctp_association *asoc,
6995 struct sctp_tmit_chunk **data_list,
6997 struct sctp_nets *net)
7000 struct sctp_tmit_chunk *tp1;
7002 for (i = 0; i < bundle_at; i++) {
7003 /* off of the send queue */
7004 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
7005 asoc->send_queue_cnt--;
7008 * Any chunk NOT 0 you zap the time chunk 0 gets
7009 * zapped or set based on if a RTO measurment is
7012 data_list[i]->do_rtt = 0;
7015 data_list[i]->sent_rcv_time = net->last_sent_time;
7016 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
7017 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
7018 if (data_list[i]->whoTo == NULL) {
7019 data_list[i]->whoTo = net;
7020 atomic_add_int(&net->ref_count, 1);
7022 /* on to the sent queue */
7023 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
7024 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7025 struct sctp_tmit_chunk *tpp;
7027 /* need to move back */
7029 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7031 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7035 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7038 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7040 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7045 /* This does not lower until the cum-ack passes it */
7046 asoc->sent_queue_cnt++;
7047 if ((asoc->peers_rwnd <= 0) &&
7048 (asoc->total_flight == 0) &&
7050 /* Mark the chunk as being a window probe */
7051 SCTP_STAT_INCR(sctps_windowprobed);
7053 #ifdef SCTP_AUDITING_ENABLED
7054 sctp_audit_log(0xC2, 3);
7056 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7057 data_list[i]->snd_count = 1;
7058 data_list[i]->rec.data.chunk_was_revoked = 0;
7059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7060 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7061 data_list[i]->whoTo->flight_size,
7062 data_list[i]->book_size,
7063 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7064 data_list[i]->rec.data.tsn);
7066 sctp_flight_size_increase(data_list[i]);
7067 sctp_total_flight_increase(stcb, data_list[i]);
7068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7069 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7070 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7072 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7073 (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7074 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7075 /* SWS sender side engages */
7076 asoc->peers_rwnd = 0;
7079 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7080 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7085 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7086 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7091 struct sctp_tmit_chunk *chk, *nchk;
7093 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7094 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7095 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7096 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7097 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7098 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7099 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7100 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7101 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7102 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7103 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7104 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7105 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7106 /* Stray chunks must be cleaned up */
7108 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7109 asoc->ctrl_queue_cnt--;
7111 sctp_m_freem(chk->data);
7114 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7115 asoc->fwd_tsn_cnt--;
7117 sctp_free_a_chunk(stcb, chk, so_locked);
7118 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7119 /* special handling, we must look into the param */
7120 if (chk != asoc->str_reset) {
7121 goto clean_up_anyway;
7128 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7129 uint32_t space_left, uint32_t frag_point, int eeor_on)
7132 * Make a decision on if I should split a msg into multiple parts.
7133 * This is only asked of incomplete messages.
7137 * If we are doing EEOR we need to always send it if its the
7138 * entire thing, since it might be all the guy is putting in
7141 if (space_left >= length) {
7143 * If we have data outstanding,
7144 * we get another chance when the sack
7145 * arrives to transmit - wait for more data
7147 if (stcb->asoc.total_flight == 0) {
7149 * If nothing is in flight, we zero the
7157 /* You can fill the rest */
7158 return (space_left);
7162 * For those strange folk that make the send buffer
7163 * smaller than our fragmentation point, we can't
7164 * get a full msg in so we have to allow splitting.
7166 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7169 if ((length <= space_left) ||
7170 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7171 /* Sub-optimial residual don't split in non-eeor mode. */
7175 * If we reach here length is larger than the space_left. Do we wish
7176 * to split it for the sake of packet putting together?
7178 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7179 /* Its ok to split it */
7180 return (min(space_left, frag_point));
7182 /* Nope, can't split */
7187 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7188 struct sctp_stream_out *strq,
7189 uint32_t space_left,
7190 uint32_t frag_point,
7195 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7200 /* Move from the stream to the send_queue keeping track of the total */
7201 struct sctp_association *asoc;
7202 struct sctp_stream_queue_pending *sp;
7203 struct sctp_tmit_chunk *chk;
7204 struct sctp_data_chunk *dchkh = NULL;
7205 struct sctp_idata_chunk *ndchkh = NULL;
7206 uint32_t to_move, length;
7208 uint8_t rcv_flags = 0;
7210 uint8_t send_lock_up = 0;
7212 SCTP_TCB_LOCK_ASSERT(stcb);
7215 /* sa_ignore FREED_MEMORY */
7216 sp = TAILQ_FIRST(&strq->outqueue);
7218 if (send_lock_up == 0) {
7219 SCTP_TCB_SEND_LOCK(stcb);
7222 sp = TAILQ_FIRST(&strq->outqueue);
7226 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7227 (stcb->asoc.idata_supported == 0) &&
7228 (strq->last_msg_incomplete)) {
7229 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7231 strq->last_msg_incomplete);
7232 strq->last_msg_incomplete = 0;
7236 SCTP_TCB_SEND_UNLOCK(stcb);
7241 if ((sp->msg_is_complete) && (sp->length == 0)) {
7242 if (sp->sender_all_done) {
7244 * We are doing deferred cleanup. Last time through
7245 * when we took all the data the sender_all_done was
7248 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7249 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7250 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7251 sp->sender_all_done,
7253 sp->msg_is_complete,
7257 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7258 SCTP_TCB_SEND_LOCK(stcb);
7261 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7262 TAILQ_REMOVE(&strq->outqueue, sp, next);
7263 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7264 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7265 (strq->chunks_on_queues == 0) &&
7266 TAILQ_EMPTY(&strq->outqueue)) {
7267 stcb->asoc.trigger_reset = 1;
7270 sctp_free_remote_addr(sp->net);
7274 sctp_m_freem(sp->data);
7277 sctp_free_a_strmoq(stcb, sp, so_locked);
7278 /* we can't be locked to it */
7280 SCTP_TCB_SEND_UNLOCK(stcb);
7283 /* back to get the next msg */
7287 * sender just finished this but still holds a
7295 /* is there some to get */
7296 if (sp->length == 0) {
7301 } else if (sp->discard_rest) {
7302 if (send_lock_up == 0) {
7303 SCTP_TCB_SEND_LOCK(stcb);
7306 /* Whack down the size */
7307 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7308 if ((stcb->sctp_socket != NULL) &&
7309 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7310 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7311 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7314 sctp_m_freem(sp->data);
7316 sp->tail_mbuf = NULL;
7325 some_taken = sp->some_taken;
7327 length = sp->length;
7328 if (sp->msg_is_complete) {
7329 /* The message is complete */
7330 to_move = min(length, frag_point);
7331 if (to_move == length) {
7332 /* All of it fits in the MTU */
7333 if (sp->some_taken) {
7334 rcv_flags |= SCTP_DATA_LAST_FRAG;
7336 rcv_flags |= SCTP_DATA_NOT_FRAG;
7338 sp->put_last_out = 1;
7339 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7340 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7343 /* Not all of it fits, we fragment */
7344 if (sp->some_taken == 0) {
7345 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7350 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7353 * We use a snapshot of length in case it
7354 * is expanding during the compare.
7359 if (to_move >= llen) {
7361 if (send_lock_up == 0) {
7363 * We are taking all of an incomplete msg
7364 * thus we need a send lock.
7366 SCTP_TCB_SEND_LOCK(stcb);
7368 if (sp->msg_is_complete) {
7370 * the sender finished the
7377 if (sp->some_taken == 0) {
7378 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7382 /* Nothing to take. */
7389 /* If we reach here, we can copy out a chunk */
7390 sctp_alloc_a_chunk(stcb, chk);
7392 /* No chunk memory */
7398 * Setup for unordered if needed by looking at the user sent info
7401 if (sp->sinfo_flags & SCTP_UNORDERED) {
7402 rcv_flags |= SCTP_DATA_UNORDERED;
7404 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7405 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7406 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7408 /* clear out the chunk before setting up */
7409 memset(chk, 0, sizeof(*chk));
7410 chk->rec.data.rcv_flags = rcv_flags;
7412 if (to_move >= length) {
7413 /* we think we can steal the whole thing */
7414 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7415 SCTP_TCB_SEND_LOCK(stcb);
7418 if (to_move < sp->length) {
7419 /* bail, it changed */
7422 chk->data = sp->data;
7423 chk->last_mbuf = sp->tail_mbuf;
7424 /* register the stealing */
7425 sp->data = sp->tail_mbuf = NULL;
7430 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7431 chk->last_mbuf = NULL;
7432 if (chk->data == NULL) {
7433 sp->some_taken = some_taken;
7434 sctp_free_a_chunk(stcb, chk, so_locked);
7439 #ifdef SCTP_MBUF_LOGGING
7440 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7441 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7444 /* Pull off the data */
7445 m_adj(sp->data, to_move);
7446 /* Now lets work our way down and compact it */
7448 while (m && (SCTP_BUF_LEN(m) == 0)) {
7449 sp->data = SCTP_BUF_NEXT(m);
7450 SCTP_BUF_NEXT(m) = NULL;
7451 if (sp->tail_mbuf == m) {
7453 * Freeing tail? TSNH since
7454 * we supposedly were taking less
7455 * than the sp->length.
7458 panic("Huh, freing tail? - TSNH");
7460 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7461 sp->tail_mbuf = sp->data = NULL;
7470 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7471 chk->copy_by_ref = 1;
7473 chk->copy_by_ref = 0;
7476 * get last_mbuf and counts of mb usage This is ugly but hopefully
7477 * its only one mbuf.
7479 if (chk->last_mbuf == NULL) {
7480 chk->last_mbuf = chk->data;
7481 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7482 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7486 if (to_move > length) {
7487 /*- This should not happen either
7488 * since we always lower to_move to the size
7489 * of sp->length if its larger.
7492 panic("Huh, how can to_move be larger?");
7494 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7498 atomic_subtract_int(&sp->length, to_move);
7500 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
7501 if (M_LEADINGSPACE(chk->data) < leading) {
7502 /* Not enough room for a chunk header, get some */
7505 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
7508 * we're in trouble here. _PREPEND below will free
7509 * all the data if there is no leading space, so we
7510 * must put the data back and restore.
7512 if (send_lock_up == 0) {
7513 SCTP_TCB_SEND_LOCK(stcb);
7516 if (sp->data == NULL) {
7517 /* unsteal the data */
7518 sp->data = chk->data;
7519 sp->tail_mbuf = chk->last_mbuf;
7523 /* reassemble the data */
7525 sp->data = chk->data;
7526 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7528 sp->some_taken = some_taken;
7529 atomic_add_int(&sp->length, to_move);
7532 sctp_free_a_chunk(stcb, chk, so_locked);
7536 SCTP_BUF_LEN(m) = 0;
7537 SCTP_BUF_NEXT(m) = chk->data;
7539 M_ALIGN(chk->data, 4);
7542 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
7543 if (chk->data == NULL) {
7544 /* HELP, TSNH since we assured it would not above? */
7546 panic("prepend failes HELP?");
7548 SCTP_PRINTF("prepend fails HELP?\n");
7549 sctp_free_a_chunk(stcb, chk, so_locked);
7555 sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
7556 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
7557 chk->book_size_scale = 0;
7558 chk->sent = SCTP_DATAGRAM_UNSENT;
7561 chk->asoc = &stcb->asoc;
7562 chk->pad_inplace = 0;
7563 chk->no_fr_allowed = 0;
7564 if (stcb->asoc.idata_supported == 0) {
7565 if (rcv_flags & SCTP_DATA_UNORDERED) {
7566 /* Just use 0. The receiver ignores the values. */
7567 chk->rec.data.mid = 0;
7569 chk->rec.data.mid = strq->next_mid_ordered;
7570 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7571 strq->next_mid_ordered++;
7575 if (rcv_flags & SCTP_DATA_UNORDERED) {
7576 chk->rec.data.mid = strq->next_mid_unordered;
7577 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7578 strq->next_mid_unordered++;
7581 chk->rec.data.mid = strq->next_mid_ordered;
7582 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7583 strq->next_mid_ordered++;
7587 chk->rec.data.sid = sp->sid;
7588 chk->rec.data.ppid = sp->ppid;
7589 chk->rec.data.context = sp->context;
7590 chk->rec.data.doing_fast_retransmit = 0;
7592 chk->rec.data.timetodrop = sp->ts;
7593 chk->flags = sp->act_flags;
7596 chk->whoTo = sp->net;
7597 atomic_add_int(&chk->whoTo->ref_count, 1);
7601 if (sp->holds_key_ref) {
7602 chk->auth_keyid = sp->auth_keyid;
7603 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7604 chk->holds_key_ref = 1;
7606 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
7607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7608 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7609 (uint32_t)(uintptr_t)stcb, sp->length,
7610 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
7613 if (stcb->asoc.idata_supported == 0) {
7614 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7616 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7619 * Put the rest of the things in place now. Size was done earlier in
7620 * previous loop prior to padding.
7623 #ifdef SCTP_ASOCLOG_OF_TSNS
7624 SCTP_TCB_LOCK_ASSERT(stcb);
7625 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7626 asoc->tsn_out_at = 0;
7627 asoc->tsn_out_wrapped = 1;
7629 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
7630 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
7631 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
7632 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7633 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7634 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7635 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7636 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7639 if (stcb->asoc.idata_supported == 0) {
7640 dchkh->ch.chunk_type = SCTP_DATA;
7641 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7642 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
7643 dchkh->dp.sid = htons(strq->sid);
7644 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
7645 dchkh->dp.ppid = chk->rec.data.ppid;
7646 dchkh->ch.chunk_length = htons(chk->send_size);
7648 ndchkh->ch.chunk_type = SCTP_IDATA;
7649 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7650 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
7651 ndchkh->dp.sid = htons(strq->sid);
7652 ndchkh->dp.reserved = htons(0);
7653 ndchkh->dp.mid = htonl(chk->rec.data.mid);
7655 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
7657 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
7659 ndchkh->ch.chunk_length = htons(chk->send_size);
7661 /* Now advance the chk->send_size by the actual pad needed. */
7662 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7667 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7668 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7670 chk->last_mbuf = lm;
7671 chk->pad_inplace = 1;
7673 chk->send_size += pads;
7675 if (PR_SCTP_ENABLED(chk->flags)) {
7676 asoc->pr_sctp_cnt++;
7678 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7679 /* All done pull and kill the message */
7680 if (sp->put_last_out == 0) {
7681 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7682 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7683 sp->sender_all_done,
7685 sp->msg_is_complete,
7689 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7690 SCTP_TCB_SEND_LOCK(stcb);
7693 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7694 TAILQ_REMOVE(&strq->outqueue, sp, next);
7695 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7696 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7697 (strq->chunks_on_queues == 0) &&
7698 TAILQ_EMPTY(&strq->outqueue)) {
7699 stcb->asoc.trigger_reset = 1;
7702 sctp_free_remote_addr(sp->net);
7706 sctp_m_freem(sp->data);
7709 sctp_free_a_strmoq(stcb, sp, so_locked);
7711 asoc->chunks_on_out_queue++;
7712 strq->chunks_on_queues++;
7713 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7714 asoc->send_queue_cnt++;
7717 SCTP_TCB_SEND_UNLOCK(stcb);
7724 sctp_fill_outqueue(struct sctp_tcb *stcb,
7725 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7726 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7731 struct sctp_association *asoc;
7732 struct sctp_stream_out *strq;
7733 uint32_t space_left, moved, total_moved;
7736 SCTP_TCB_LOCK_ASSERT(stcb);
7739 switch (net->ro._l_addr.sa.sa_family) {
7742 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
7747 space_left = net->mtu - SCTP_MIN_OVERHEAD;
7752 space_left = net->mtu;
7755 /* Need an allowance for the data chunk header too */
7756 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7758 /* must make even word boundary */
7759 space_left &= 0xfffffffc;
7760 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7763 while ((space_left > 0) && (strq != NULL)) {
7764 moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
7765 &giveup, eeor_mode, &bail, so_locked);
7766 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
7767 if ((giveup != 0) || (bail != 0)) {
7770 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7771 total_moved += moved;
7772 space_left -= moved;
7773 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
7774 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7778 space_left &= 0xfffffffc;
7783 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7785 if (total_moved == 0) {
7786 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7787 (net == stcb->asoc.primary_destination)) {
7788 /* ran dry for primary network net */
7789 SCTP_STAT_INCR(sctps_primary_randry);
7790 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7791 /* ran dry with CMT on */
7792 SCTP_STAT_INCR(sctps_cmt_randry);
7798 sctp_fix_ecn_echo(struct sctp_association *asoc)
7800 struct sctp_tmit_chunk *chk;
7802 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7803 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7804 chk->sent = SCTP_DATAGRAM_UNSENT;
7810 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7812 struct sctp_association *asoc;
7813 struct sctp_tmit_chunk *chk;
7814 struct sctp_stream_queue_pending *sp;
7821 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7822 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7823 if (sp->net == net) {
7824 sctp_free_remote_addr(sp->net);
7829 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7830 if (chk->whoTo == net) {
7831 sctp_free_remote_addr(chk->whoTo);
7838 sctp_med_chunk_output(struct sctp_inpcb *inp,
7839 struct sctp_tcb *stcb,
7840 struct sctp_association *asoc,
7843 int control_only, int from_where,
7844 struct timeval *now, int *now_filled, int frag_point, int so_locked
7845 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7851 * Ok this is the generic chunk service queue. we must do the
7853 * - Service the stream queue that is next, moving any
7854 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7855 * LAST to the out queue in one pass) and assigning TSN's. This
7856 * only applys though if the peer does not support NDATA. For NDATA
7857 * chunks its ok to not send the entire message ;-)
7858 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
7859 * fomulate and send the low level chunks. Making sure to combine
7860 * any control in the control chunk queue also.
7862 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7863 struct mbuf *outchain, *endoutchain;
7864 struct sctp_tmit_chunk *chk, *nchk;
7866 /* temp arrays for unlinking */
7867 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7868 int no_fragmentflg, error;
7869 unsigned int max_rwnd_per_dest, max_send_per_dest;
7870 int one_chunk, hbflag, skip_data_for_this_net;
7871 int asconf, cookie, no_out_cnt;
7872 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7873 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7875 uint32_t auth_offset = 0;
7876 struct sctp_auth_chunk *auth = NULL;
7877 uint16_t auth_keyid;
7878 int override_ok = 1;
7879 int skip_fill_up = 0;
7880 int data_auth_reqd = 0;
7883 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7890 auth_keyid = stcb->asoc.authinfo.active_keyid;
7891 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7892 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7893 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7898 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7900 * First lets prime the pump. For each destination, if there is room
7901 * in the flight size, attempt to pull an MTU's worth out of the
7902 * stream queues into the general send_queue
7904 #ifdef SCTP_AUDITING_ENABLED
7905 sctp_audit_log(0xC2, 2);
7907 SCTP_TCB_LOCK_ASSERT(stcb);
7914 /* Nothing to possible to send? */
7915 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7916 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7917 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7918 TAILQ_EMPTY(&asoc->send_queue) &&
7919 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
7924 if (asoc->peers_rwnd == 0) {
7925 /* No room in peers rwnd */
7927 if (asoc->total_flight > 0) {
7928 /* we are allowed one chunk in flight */
7932 if (stcb->asoc.ecn_echo_cnt_onq) {
7933 /* Record where a sack goes, if any */
7934 if (no_data_chunks &&
7935 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7936 /* Nothing but ECNe to send - we don't do that */
7937 goto nothing_to_send;
7939 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7940 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7941 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7942 sack_goes_to = chk->whoTo;
7947 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7948 if (stcb->sctp_socket)
7949 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7951 max_send_per_dest = 0;
7952 if (no_data_chunks == 0) {
7953 /* How many non-directed chunks are there? */
7954 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7955 if (chk->whoTo == NULL) {
7957 * We already have non-directed chunks on
7958 * the queue, no need to do a fill-up.
7966 if ((no_data_chunks == 0) &&
7967 (skip_fill_up == 0) &&
7968 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7969 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7971 * This for loop we are in takes in each net, if
7972 * its's got space in cwnd and has data sent to it
7973 * (when CMT is off) then it calls
7974 * sctp_fill_outqueue for the net. This gets data on
7975 * the send queue for that network.
7977 * In sctp_fill_outqueue TSN's are assigned and data
7978 * is copied out of the stream buffers. Note mostly
7979 * copy by reference (we hope).
7981 net->window_probe = 0;
7982 if ((net != stcb->asoc.alternate) &&
7983 ((net->dest_state & SCTP_ADDR_PF) ||
7984 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7985 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7986 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7987 sctp_log_cwnd(stcb, net, 1,
7988 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7992 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7993 (net->flight_size == 0)) {
7994 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7996 if (net->flight_size >= net->cwnd) {
7997 /* skip this network, no room - can't fill */
7998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7999 sctp_log_cwnd(stcb, net, 3,
8000 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8004 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8005 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
8007 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
8009 /* memory alloc failure */
8015 /* now service each destination and send out what we can for it */
8016 /* Nothing to send? */
8017 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8018 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8019 TAILQ_EMPTY(&asoc->send_queue)) {
8024 if (asoc->sctp_cmt_on_off > 0) {
8025 /* get the last start point */
8026 start_at = asoc->last_net_cmt_send_started;
8027 if (start_at == NULL) {
8028 /* null so to beginning */
8029 start_at = TAILQ_FIRST(&asoc->nets);
8031 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8032 if (start_at == NULL) {
8033 start_at = TAILQ_FIRST(&asoc->nets);
8036 asoc->last_net_cmt_send_started = start_at;
8038 start_at = TAILQ_FIRST(&asoc->nets);
8040 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8041 if (chk->whoTo == NULL) {
8042 if (asoc->alternate) {
8043 chk->whoTo = asoc->alternate;
8045 chk->whoTo = asoc->primary_destination;
8047 atomic_add_int(&chk->whoTo->ref_count, 1);
8050 old_start_at = NULL;
8051 again_one_more_time:
8052 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8053 /* how much can we send? */
8054 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8055 if (old_start_at && (old_start_at == net)) {
8056 /* through list ocmpletely. */
8060 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8061 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8062 (net->flight_size >= net->cwnd)) {
8064 * Nothing on control or asconf and flight is full,
8065 * we can skip even in the CMT case.
8070 endoutchain = outchain = NULL;
8073 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8074 skip_data_for_this_net = 1;
8076 skip_data_for_this_net = 0;
8078 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8081 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8086 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8096 if (mtu > asoc->peers_rwnd) {
8097 if (asoc->total_flight > 0) {
8098 /* We have a packet in flight somewhere */
8099 r_mtu = asoc->peers_rwnd;
8101 /* We are always allowed to send one MTU out */
8109 /************************/
8110 /* ASCONF transmission */
8111 /************************/
8112 /* Now first lets go through the asconf queue */
8113 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8114 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8117 if (chk->whoTo == NULL) {
8118 if (asoc->alternate == NULL) {
8119 if (asoc->primary_destination != net) {
8123 if (asoc->alternate != net) {
8128 if (chk->whoTo != net) {
8132 if (chk->data == NULL) {
8135 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8136 chk->sent != SCTP_DATAGRAM_RESEND) {
8140 * if no AUTH is yet included and this chunk
8141 * requires it, make sure to account for it. We
8142 * don't apply the size until the AUTH chunk is
8143 * actually added below in case there is no room for
8144 * this chunk. NOTE: we overload the use of "omtu"
8147 if ((auth == NULL) &&
8148 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8149 stcb->asoc.peer_auth_chunks)) {
8150 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8153 /* Here we do NOT factor the r_mtu */
8154 if ((chk->send_size < (int)(mtu - omtu)) ||
8155 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8157 * We probably should glom the mbuf chain
8158 * from the chk->data for control but the
8159 * problem is it becomes yet one more level
8160 * of tracking to do if for some reason
8161 * output fails. Then I have got to
8162 * reconstruct the merged control chain.. el
8163 * yucko.. for now we take the easy way and
8167 * Add an AUTH chunk, if chunk requires it
8168 * save the offset into the chain for AUTH
8170 if ((auth == NULL) &&
8171 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8172 stcb->asoc.peer_auth_chunks))) {
8173 outchain = sctp_add_auth_chunk(outchain,
8178 chk->rec.chunk_id.id);
8179 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8181 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8182 (int)chk->rec.chunk_id.can_take_data,
8183 chk->send_size, chk->copy_by_ref);
8184 if (outchain == NULL) {
8186 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8189 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8190 /* update our MTU size */
8191 if (mtu > (chk->send_size + omtu))
8192 mtu -= (chk->send_size + omtu);
8195 to_out += (chk->send_size + omtu);
8196 /* Do clear IP_DF ? */
8197 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8200 if (chk->rec.chunk_id.can_take_data)
8203 * set hb flag since we can use these for
8209 * should sysctl this: don't bundle data
8210 * with ASCONF since it requires AUTH
8213 chk->sent = SCTP_DATAGRAM_SENT;
8214 if (chk->whoTo == NULL) {
8216 atomic_add_int(&net->ref_count, 1);
8221 * Ok we are out of room but we can
8222 * output without effecting the
8223 * flight size since this little guy
8224 * is a control only packet.
8226 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8228 * do NOT clear the asconf flag as
8229 * it is used to do appropriate
8230 * source address selection.
8232 if (*now_filled == 0) {
8233 (void)SCTP_GETTIME_TIMEVAL(now);
8236 net->last_sent_time = *now;
8238 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8239 (struct sockaddr *)&net->ro._l_addr,
8240 outchain, auth_offset, auth,
8241 stcb->asoc.authinfo.active_keyid,
8242 no_fragmentflg, 0, asconf,
8243 inp->sctp_lport, stcb->rport,
8244 htonl(stcb->asoc.peer_vtag),
8249 * error, we could not
8252 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8253 if (from_where == 0) {
8254 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8256 if (error == ENOBUFS) {
8257 asoc->ifp_had_enobuf = 1;
8258 SCTP_STAT_INCR(sctps_lowlevelerr);
8260 /* error, could not output */
8261 if (error == EHOSTUNREACH) {
8267 sctp_move_chunks_from_net(stcb, net);
8272 asoc->ifp_had_enobuf = 0;
8275 * increase the number we sent, if a
8276 * cookie is sent we don't tell them
8279 outchain = endoutchain = NULL;
8283 *num_out += ctl_cnt;
8284 /* recalc a clean slate and setup */
8285 switch (net->ro._l_addr.sa.sa_family) {
8288 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8293 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8310 /************************/
8311 /* Control transmission */
8312 /************************/
8313 /* Now first lets go through the control queue */
8314 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8315 if ((sack_goes_to) &&
8316 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8317 (chk->whoTo != sack_goes_to)) {
8319 * if we have a sack in queue, and we are
8320 * looking at an ecn echo that is NOT queued
8321 * to where the sack is going..
8323 if (chk->whoTo == net) {
8325 * Don't transmit it to where its
8326 * going (current net)
8329 } else if (sack_goes_to == net) {
8331 * But do transmit it to this
8334 goto skip_net_check;
8337 if (chk->whoTo == NULL) {
8338 if (asoc->alternate == NULL) {
8339 if (asoc->primary_destination != net) {
8343 if (asoc->alternate != net) {
8348 if (chk->whoTo != net) {
8353 if (chk->data == NULL) {
8356 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8358 * It must be unsent. Cookies and ASCONF's
8359 * hang around but there timers will force
8360 * when marked for resend.
8365 * if no AUTH is yet included and this chunk
8366 * requires it, make sure to account for it. We
8367 * don't apply the size until the AUTH chunk is
8368 * actually added below in case there is no room for
8369 * this chunk. NOTE: we overload the use of "omtu"
8372 if ((auth == NULL) &&
8373 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8374 stcb->asoc.peer_auth_chunks)) {
8375 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8378 /* Here we do NOT factor the r_mtu */
8379 if ((chk->send_size <= (int)(mtu - omtu)) ||
8380 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8382 * We probably should glom the mbuf chain
8383 * from the chk->data for control but the
8384 * problem is it becomes yet one more level
8385 * of tracking to do if for some reason
8386 * output fails. Then I have got to
8387 * reconstruct the merged control chain.. el
8388 * yucko.. for now we take the easy way and
8392 * Add an AUTH chunk, if chunk requires it
8393 * save the offset into the chain for AUTH
8395 if ((auth == NULL) &&
8396 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8397 stcb->asoc.peer_auth_chunks))) {
8398 outchain = sctp_add_auth_chunk(outchain,
8403 chk->rec.chunk_id.id);
8404 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8406 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8407 (int)chk->rec.chunk_id.can_take_data,
8408 chk->send_size, chk->copy_by_ref);
8409 if (outchain == NULL) {
8411 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8414 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8415 /* update our MTU size */
8416 if (mtu > (chk->send_size + omtu))
8417 mtu -= (chk->send_size + omtu);
8420 to_out += (chk->send_size + omtu);
8421 /* Do clear IP_DF ? */
8422 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8425 if (chk->rec.chunk_id.can_take_data)
8427 /* Mark things to be removed, if needed */
8428 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8429 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8430 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8431 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8432 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8433 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8434 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8435 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8436 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8437 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8438 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8439 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8442 /* remove these chunks at the end */
8443 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8444 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8445 /* turn off the timer */
8446 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8447 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8449 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8455 * Other chunks, since they have
8456 * timers running (i.e. COOKIE) we
8457 * just "trust" that it gets sent or
8461 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8464 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8466 * Increment ecne send count
8467 * here this means we may be
8468 * over-zealous in our
8469 * counting if the send
8470 * fails, but its the best
8471 * place to do it (we used
8472 * to do it in the queue of
8473 * the chunk, but that did
8474 * not tell how many times
8477 SCTP_STAT_INCR(sctps_sendecne);
8479 chk->sent = SCTP_DATAGRAM_SENT;
8480 if (chk->whoTo == NULL) {
8482 atomic_add_int(&net->ref_count, 1);
8488 * Ok we are out of room but we can
8489 * output without effecting the
8490 * flight size since this little guy
8491 * is a control only packet.
8494 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8496 * do NOT clear the asconf
8497 * flag as it is used to do
8498 * appropriate source
8499 * address selection.
8503 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8506 /* Only HB or ASCONF advances time */
8508 if (*now_filled == 0) {
8509 (void)SCTP_GETTIME_TIMEVAL(now);
8512 net->last_sent_time = *now;
8515 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8516 (struct sockaddr *)&net->ro._l_addr,
8519 stcb->asoc.authinfo.active_keyid,
8520 no_fragmentflg, 0, asconf,
8521 inp->sctp_lport, stcb->rport,
8522 htonl(stcb->asoc.peer_vtag),
8527 * error, we could not
8530 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8531 if (from_where == 0) {
8532 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8534 if (error == ENOBUFS) {
8535 asoc->ifp_had_enobuf = 1;
8536 SCTP_STAT_INCR(sctps_lowlevelerr);
8538 if (error == EHOSTUNREACH) {
8544 sctp_move_chunks_from_net(stcb, net);
8549 asoc->ifp_had_enobuf = 0;
8552 * increase the number we sent, if a
8553 * cookie is sent we don't tell them
8556 outchain = endoutchain = NULL;
8560 *num_out += ctl_cnt;
8561 /* recalc a clean slate and setup */
8562 switch (net->ro._l_addr.sa.sa_family) {
8565 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8570 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8587 /* JRI: if dest is in PF state, do not send data to it */
8588 if ((asoc->sctp_cmt_on_off > 0) &&
8589 (net != stcb->asoc.alternate) &&
8590 (net->dest_state & SCTP_ADDR_PF)) {
8593 if (net->flight_size >= net->cwnd) {
8596 if ((asoc->sctp_cmt_on_off > 0) &&
8597 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8598 (net->flight_size > max_rwnd_per_dest)) {
8602 * We need a specific accounting for the usage of the send
8603 * buffer. We also need to check the number of messages per
8604 * net. For now, this is better than nothing and it disabled
8607 if ((asoc->sctp_cmt_on_off > 0) &&
8608 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8609 (max_send_per_dest > 0) &&
8610 (net->flight_size > max_send_per_dest)) {
8613 /*********************/
8614 /* Data transmission */
8615 /*********************/
8617 * if AUTH for DATA is required and no AUTH has been added
8618 * yet, account for this in the mtu now... if no data can be
8619 * bundled, this adjustment won't matter anyways since the
8620 * packet will be going out...
8622 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8623 stcb->asoc.peer_auth_chunks);
8624 if (data_auth_reqd && (auth == NULL)) {
8625 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8627 /* now lets add any data within the MTU constraints */
8628 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8631 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8632 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8639 if (net->mtu > SCTP_MIN_OVERHEAD)
8640 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8650 if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
8651 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8652 (skip_data_for_this_net == 0)) ||
8654 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8655 if (no_data_chunks) {
8656 /* let only control go out */
8660 if (net->flight_size >= net->cwnd) {
8661 /* skip this net, no room for data */
8665 if ((chk->whoTo != NULL) &&
8666 (chk->whoTo != net)) {
8667 /* Don't send the chunk on this net */
8671 if (asoc->sctp_cmt_on_off == 0) {
8672 if ((asoc->alternate) &&
8673 (asoc->alternate != net) &&
8674 (chk->whoTo == NULL)) {
8676 } else if ((net != asoc->primary_destination) &&
8677 (asoc->alternate == NULL) &&
8678 (chk->whoTo == NULL)) {
8682 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8684 * strange, we have a chunk that is
8685 * to big for its destination and
8686 * yet no fragment ok flag.
8687 * Something went wrong when the
8688 * PMTU changed...we did not mark
8689 * this chunk for some reason?? I
8690 * will fix it here by letting IP
8691 * fragment it for now and printing
8692 * a warning. This really should not
8695 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8696 chk->send_size, mtu);
8697 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8699 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8700 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
8701 struct sctp_data_chunk *dchkh;
8703 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8704 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8706 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8707 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8708 /* ok we will add this one */
8711 * Add an AUTH chunk, if chunk
8712 * requires it, save the offset into
8713 * the chain for AUTH
8715 if (data_auth_reqd) {
8717 outchain = sctp_add_auth_chunk(outchain,
8723 auth_keyid = chk->auth_keyid;
8725 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8726 } else if (override_ok) {
8731 auth_keyid = chk->auth_keyid;
8733 } else if (auth_keyid != chk->auth_keyid) {
8741 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8742 chk->send_size, chk->copy_by_ref);
8743 if (outchain == NULL) {
8744 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8745 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8746 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8749 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8752 /* upate our MTU size */
8753 /* Do clear IP_DF ? */
8754 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8757 /* unsigned subtraction of mtu */
8758 if (mtu > chk->send_size)
8759 mtu -= chk->send_size;
8762 /* unsigned subtraction of r_mtu */
8763 if (r_mtu > chk->send_size)
8764 r_mtu -= chk->send_size;
8768 to_out += chk->send_size;
8769 if ((to_out > mx_mtu) && no_fragmentflg) {
8771 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8773 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8777 chk->window_probe = 0;
8778 data_list[bundle_at++] = chk;
8779 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8782 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8783 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8784 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8786 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8788 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8789 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8799 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8801 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8802 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8803 data_list[0]->window_probe = 1;
8804 net->window_probe = 1;
8810 * Must be sent in order of the
8811 * TSN's (on a network)
8815 } /* for (chunk gather loop for this net) */
8816 } /* if asoc.state OPEN */
8818 /* Is there something to send for this destination? */
8820 /* We may need to start a control timer or two */
8822 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8825 * do NOT clear the asconf flag as it is
8826 * used to do appropriate source address
8831 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8834 /* must start a send timer if data is being sent */
8835 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8837 * no timer running on this destination
8840 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8842 if (bundle_at || hbflag) {
8843 /* For data/asconf and hb set time */
8844 if (*now_filled == 0) {
8845 (void)SCTP_GETTIME_TIMEVAL(now);
8848 net->last_sent_time = *now;
8850 /* Now send it, if there is anything to send :> */
8851 if ((error = sctp_lowlevel_chunk_output(inp,
8854 (struct sockaddr *)&net->ro._l_addr,
8862 inp->sctp_lport, stcb->rport,
8863 htonl(stcb->asoc.peer_vtag),
8867 /* error, we could not output */
8868 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8869 if (from_where == 0) {
8870 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8872 if (error == ENOBUFS) {
8873 asoc->ifp_had_enobuf = 1;
8874 SCTP_STAT_INCR(sctps_lowlevelerr);
8876 if (error == EHOSTUNREACH) {
8878 * Destination went unreachable
8881 sctp_move_chunks_from_net(stcb, net);
8885 * I add this line to be paranoid. As far as
8886 * I can tell the continue, takes us back to
8887 * the top of the for, but just to make sure
8888 * I will reset these again here.
8890 ctl_cnt = bundle_at = 0;
8891 continue; /* This takes us back to the
8892 * for() for the nets. */
8894 asoc->ifp_had_enobuf = 0;
8900 *num_out += (ctl_cnt + bundle_at);
8903 /* setup for a RTO measurement */
8904 tsns_sent = data_list[0]->rec.data.tsn;
8905 /* fill time if not already filled */
8906 if (*now_filled == 0) {
8907 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8909 *now = asoc->time_last_sent;
8911 asoc->time_last_sent = *now;
8913 if (net->rto_needed) {
8914 data_list[0]->do_rtt = 1;
8915 net->rto_needed = 0;
8917 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8918 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8925 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8928 if (old_start_at == NULL) {
8929 old_start_at = start_at;
8930 start_at = TAILQ_FIRST(&asoc->nets);
8932 goto again_one_more_time;
8936 * At the end there should be no NON timed chunks hanging on this
8939 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8940 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8942 if ((*num_out == 0) && (*reason_code == 0)) {
8947 sctp_clean_up_ctl(stcb, asoc, so_locked);
8952 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8955 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8956 * the control chunk queue.
8958 struct sctp_chunkhdr *hdr;
8959 struct sctp_tmit_chunk *chk;
8960 struct mbuf *mat, *last_mbuf;
8961 uint32_t chunk_length;
8962 uint16_t padding_length;
8964 SCTP_TCB_LOCK_ASSERT(stcb);
8965 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8966 if (op_err == NULL) {
8971 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8972 chunk_length += SCTP_BUF_LEN(mat);
8973 if (SCTP_BUF_NEXT(mat) == NULL) {
8977 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8978 sctp_m_freem(op_err);
8981 padding_length = chunk_length % 4;
8982 if (padding_length != 0) {
8983 padding_length = 4 - padding_length;
8985 if (padding_length != 0) {
8986 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8987 sctp_m_freem(op_err);
8991 sctp_alloc_a_chunk(stcb, chk);
8994 sctp_m_freem(op_err);
8997 chk->copy_by_ref = 0;
8998 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8999 chk->rec.chunk_id.can_take_data = 0;
9001 chk->send_size = (uint16_t)chunk_length;
9002 chk->sent = SCTP_DATAGRAM_UNSENT;
9004 chk->asoc = &stcb->asoc;
9007 hdr = mtod(op_err, struct sctp_chunkhdr *);
9008 hdr->chunk_type = SCTP_OPERATION_ERROR;
9009 hdr->chunk_flags = 0;
9010 hdr->chunk_length = htons(chk->send_size);
9011 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9012 chk->asoc->ctrl_queue_cnt++;
9016 sctp_send_cookie_echo(struct mbuf *m,
9017 int offset, int limit,
9018 struct sctp_tcb *stcb,
9019 struct sctp_nets *net)
9022 * pull out the cookie and put it at the front of the control chunk
9026 struct mbuf *cookie;
9027 struct sctp_paramhdr param, *phdr;
9028 struct sctp_chunkhdr *hdr;
9029 struct sctp_tmit_chunk *chk;
9030 uint16_t ptype, plen;
9032 SCTP_TCB_LOCK_ASSERT(stcb);
9033 /* First find the cookie in the param area */
9035 at = offset + sizeof(struct sctp_init_chunk);
9037 phdr = sctp_get_next_param(m, at, ¶m, sizeof(param));
9041 ptype = ntohs(phdr->param_type);
9042 plen = ntohs(phdr->param_length);
9043 if (plen < sizeof(struct sctp_paramhdr)) {
9046 if (ptype == SCTP_STATE_COOKIE) {
9049 /* found the cookie */
9050 if (at + plen > limit) {
9053 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9054 if (cookie == NULL) {
9058 if ((pad = (plen % 4)) > 0) {
9062 if (sctp_pad_lastmbuf(cookie, pad, NULL) == NULL) {
9066 #ifdef SCTP_MBUF_LOGGING
9067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9068 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9073 at += SCTP_SIZE32(plen);
9075 /* ok, we got the cookie lets change it into a cookie echo chunk */
9076 /* first the change from param to cookie */
9077 hdr = mtod(cookie, struct sctp_chunkhdr *);
9078 hdr->chunk_type = SCTP_COOKIE_ECHO;
9079 hdr->chunk_flags = 0;
9080 /* get the chunk stuff now and place it in the FRONT of the queue */
9081 sctp_alloc_a_chunk(stcb, chk);
9084 sctp_m_freem(cookie);
9087 chk->copy_by_ref = 0;
9088 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9089 chk->rec.chunk_id.can_take_data = 0;
9090 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9091 chk->send_size = SCTP_SIZE32(plen);
9092 chk->sent = SCTP_DATAGRAM_UNSENT;
9094 chk->asoc = &stcb->asoc;
9097 atomic_add_int(&chk->whoTo->ref_count, 1);
9098 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9099 chk->asoc->ctrl_queue_cnt++;
9104 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9108 struct sctp_nets *net)
9111 * take a HB request and make it into a HB ack and send it.
9113 struct mbuf *outchain;
9114 struct sctp_chunkhdr *chdr;
9115 struct sctp_tmit_chunk *chk;
9118 /* must have a net pointer */
9121 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9122 if (outchain == NULL) {
9123 /* gak out of memory */
9126 #ifdef SCTP_MBUF_LOGGING
9127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9128 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9131 chdr = mtod(outchain, struct sctp_chunkhdr *);
9132 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9133 chdr->chunk_flags = 0;
9134 if (chk_length % 4 != 0) {
9135 sctp_pad_lastmbuf(outchain, 4 - (chk_length % 4), NULL);
9137 sctp_alloc_a_chunk(stcb, chk);
9140 sctp_m_freem(outchain);
9143 chk->copy_by_ref = 0;
9144 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9145 chk->rec.chunk_id.can_take_data = 1;
9147 chk->send_size = chk_length;
9148 chk->sent = SCTP_DATAGRAM_UNSENT;
9150 chk->asoc = &stcb->asoc;
9151 chk->data = outchain;
9153 atomic_add_int(&chk->whoTo->ref_count, 1);
9154 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9155 chk->asoc->ctrl_queue_cnt++;
9159 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9161 /* formulate and queue a cookie-ack back to sender */
9162 struct mbuf *cookie_ack;
9163 struct sctp_chunkhdr *hdr;
9164 struct sctp_tmit_chunk *chk;
9166 SCTP_TCB_LOCK_ASSERT(stcb);
9168 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9169 if (cookie_ack == NULL) {
9173 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9174 sctp_alloc_a_chunk(stcb, chk);
9177 sctp_m_freem(cookie_ack);
9180 chk->copy_by_ref = 0;
9181 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9182 chk->rec.chunk_id.can_take_data = 1;
9184 chk->send_size = sizeof(struct sctp_chunkhdr);
9185 chk->sent = SCTP_DATAGRAM_UNSENT;
9187 chk->asoc = &stcb->asoc;
9188 chk->data = cookie_ack;
9189 if (chk->asoc->last_control_chunk_from != NULL) {
9190 chk->whoTo = chk->asoc->last_control_chunk_from;
9191 atomic_add_int(&chk->whoTo->ref_count, 1);
9195 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9196 hdr->chunk_type = SCTP_COOKIE_ACK;
9197 hdr->chunk_flags = 0;
9198 hdr->chunk_length = htons(chk->send_size);
9199 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9200 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9201 chk->asoc->ctrl_queue_cnt++;
9207 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9209 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9210 struct mbuf *m_shutdown_ack;
9211 struct sctp_shutdown_ack_chunk *ack_cp;
9212 struct sctp_tmit_chunk *chk;
9214 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9215 if (m_shutdown_ack == NULL) {
9219 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9220 sctp_alloc_a_chunk(stcb, chk);
9223 sctp_m_freem(m_shutdown_ack);
9226 chk->copy_by_ref = 0;
9227 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9228 chk->rec.chunk_id.can_take_data = 1;
9230 chk->send_size = sizeof(struct sctp_chunkhdr);
9231 chk->sent = SCTP_DATAGRAM_UNSENT;
9233 chk->asoc = &stcb->asoc;
9234 chk->data = m_shutdown_ack;
9237 atomic_add_int(&chk->whoTo->ref_count, 1);
9239 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9240 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9241 ack_cp->ch.chunk_flags = 0;
9242 ack_cp->ch.chunk_length = htons(chk->send_size);
9243 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9244 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9245 chk->asoc->ctrl_queue_cnt++;
9250 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9252 /* formulate and queue a SHUTDOWN to the sender */
9253 struct mbuf *m_shutdown;
9254 struct sctp_shutdown_chunk *shutdown_cp;
9255 struct sctp_tmit_chunk *chk;
9257 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9258 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9259 /* We already have a SHUTDOWN queued. Reuse it. */
9261 sctp_free_remote_addr(chk->whoTo);
9268 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9269 if (m_shutdown == NULL) {
9273 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9274 sctp_alloc_a_chunk(stcb, chk);
9277 sctp_m_freem(m_shutdown);
9280 chk->copy_by_ref = 0;
9281 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9282 chk->rec.chunk_id.can_take_data = 1;
9284 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9285 chk->sent = SCTP_DATAGRAM_UNSENT;
9287 chk->asoc = &stcb->asoc;
9288 chk->data = m_shutdown;
9291 atomic_add_int(&chk->whoTo->ref_count, 1);
9293 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9294 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9295 shutdown_cp->ch.chunk_flags = 0;
9296 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9297 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9298 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9299 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9300 chk->asoc->ctrl_queue_cnt++;
9302 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9305 atomic_add_int(&chk->whoTo->ref_count, 1);
9307 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9308 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9309 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9315 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9318 * formulate and queue an ASCONF to the peer. ASCONF parameters
9319 * should be queued on the assoc queue.
9321 struct sctp_tmit_chunk *chk;
9322 struct mbuf *m_asconf;
9325 SCTP_TCB_LOCK_ASSERT(stcb);
9327 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9328 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9329 /* can't send a new one if there is one in flight already */
9333 /* compose an ASCONF chunk, maximum length is PMTU */
9334 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9335 if (m_asconf == NULL) {
9339 sctp_alloc_a_chunk(stcb, chk);
9342 sctp_m_freem(m_asconf);
9346 chk->copy_by_ref = 0;
9347 chk->rec.chunk_id.id = SCTP_ASCONF;
9348 chk->rec.chunk_id.can_take_data = 0;
9349 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9350 chk->data = m_asconf;
9351 chk->send_size = len;
9352 chk->sent = SCTP_DATAGRAM_UNSENT;
9354 chk->asoc = &stcb->asoc;
9357 atomic_add_int(&chk->whoTo->ref_count, 1);
9359 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9360 chk->asoc->ctrl_queue_cnt++;
9365 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9368 * formulate and queue a asconf-ack back to sender. the asconf-ack
9369 * must be stored in the tcb.
9371 struct sctp_tmit_chunk *chk;
9372 struct sctp_asconf_ack *ack, *latest_ack;
9374 struct sctp_nets *net = NULL;
9376 SCTP_TCB_LOCK_ASSERT(stcb);
9377 /* Get the latest ASCONF-ACK */
9378 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9379 if (latest_ack == NULL) {
9382 if (latest_ack->last_sent_to != NULL &&
9383 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9384 /* we're doing a retransmission */
9385 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9388 if (stcb->asoc.last_control_chunk_from == NULL) {
9389 if (stcb->asoc.alternate) {
9390 net = stcb->asoc.alternate;
9392 net = stcb->asoc.primary_destination;
9395 net = stcb->asoc.last_control_chunk_from;
9400 if (stcb->asoc.last_control_chunk_from == NULL) {
9401 if (stcb->asoc.alternate) {
9402 net = stcb->asoc.alternate;
9404 net = stcb->asoc.primary_destination;
9407 net = stcb->asoc.last_control_chunk_from;
9410 latest_ack->last_sent_to = net;
9412 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9413 if (ack->data == NULL) {
9417 /* copy the asconf_ack */
9418 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9419 if (m_ack == NULL) {
9420 /* couldn't copy it */
9423 #ifdef SCTP_MBUF_LOGGING
9424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9425 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9429 sctp_alloc_a_chunk(stcb, chk);
9433 sctp_m_freem(m_ack);
9436 chk->copy_by_ref = 0;
9437 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9438 chk->rec.chunk_id.can_take_data = 1;
9439 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9442 atomic_add_int(&chk->whoTo->ref_count, 1);
9445 chk->send_size = ack->len;
9446 chk->sent = SCTP_DATAGRAM_UNSENT;
9448 chk->asoc = &stcb->asoc;
9450 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9451 chk->asoc->ctrl_queue_cnt++;
9458 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9459 struct sctp_tcb *stcb,
9460 struct sctp_association *asoc,
9461 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9462 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9468 * send out one MTU of retransmission. If fast_retransmit is
9469 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9470 * rwnd. For a Cookie or Asconf in the control chunk queue we
9471 * retransmit them by themselves.
9473 * For data chunks we will pick out the lowest TSN's in the sent_queue
9474 * marked for resend and bundle them all together (up to a MTU of
9475 * destination). The address to send to should have been
9476 * selected/changed where the retransmission was marked (i.e. in FR
9477 * or t3-timeout routines).
9479 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9480 struct sctp_tmit_chunk *chk, *fwd;
9481 struct mbuf *m, *endofchain;
9482 struct sctp_nets *net = NULL;
9483 uint32_t tsns_sent = 0;
9484 int no_fragmentflg, bundle_at, cnt_thru;
9486 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9487 struct sctp_auth_chunk *auth = NULL;
9488 uint32_t auth_offset = 0;
9489 uint16_t auth_keyid;
9490 int override_ok = 1;
9491 int data_auth_reqd = 0;
9494 SCTP_TCB_LOCK_ASSERT(stcb);
9495 tmr_started = ctl_cnt = bundle_at = error = 0;
9500 endofchain = m = NULL;
9501 auth_keyid = stcb->asoc.authinfo.active_keyid;
9502 #ifdef SCTP_AUDITING_ENABLED
9503 sctp_audit_log(0xC3, 1);
9505 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9506 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9507 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9508 asoc->sent_queue_retran_cnt);
9509 asoc->sent_queue_cnt = 0;
9510 asoc->sent_queue_cnt_removeable = 0;
9511 /* send back 0/0 so we enter normal transmission */
9515 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9516 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9517 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9518 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9519 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9522 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9523 if (chk != asoc->str_reset) {
9525 * not eligible for retran if its
9532 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9536 * Add an AUTH chunk, if chunk requires it save the
9537 * offset into the chain for AUTH
9539 if ((auth == NULL) &&
9540 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9541 stcb->asoc.peer_auth_chunks))) {
9542 m = sctp_add_auth_chunk(m, &endofchain,
9543 &auth, &auth_offset,
9545 chk->rec.chunk_id.id);
9546 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9548 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9554 /* do we have control chunks to retransmit? */
9556 /* Start a timer no matter if we succeed or fail */
9557 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9558 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9559 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9560 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9561 chk->snd_count++; /* update our count */
9562 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9563 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9564 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9565 no_fragmentflg, 0, 0,
9566 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9567 chk->whoTo->port, NULL,
9570 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9571 if (error == ENOBUFS) {
9572 asoc->ifp_had_enobuf = 1;
9573 SCTP_STAT_INCR(sctps_lowlevelerr);
9577 asoc->ifp_had_enobuf = 0;
9583 * We don't want to mark the net->sent time here since this
9584 * we use this for HB and retrans cannot measure RTT
9586 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9588 chk->sent = SCTP_DATAGRAM_SENT;
9589 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9593 /* Clean up the fwd-tsn list */
9594 sctp_clean_up_ctl(stcb, asoc, so_locked);
9599 * Ok, it is just data retransmission we need to do or that and a
9600 * fwd-tsn with it all.
9602 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9603 return (SCTP_RETRAN_DONE);
9605 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
9606 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
9607 /* not yet open, resend the cookie and that is it */
9610 #ifdef SCTP_AUDITING_ENABLED
9611 sctp_auditing(20, inp, stcb, NULL);
9613 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9614 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9615 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9616 /* No, not sent to this net or not ready for rtx */
9619 if (chk->data == NULL) {
9620 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9621 chk->rec.data.tsn, chk->snd_count, chk->sent);
9624 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9625 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9626 struct mbuf *op_err;
9627 char msg[SCTP_DIAG_INFO_LEN];
9629 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9630 chk->rec.data.tsn, chk->snd_count);
9631 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9633 atomic_add_int(&stcb->asoc.refcnt, 1);
9634 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9636 SCTP_TCB_LOCK(stcb);
9637 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9638 return (SCTP_RETRAN_EXIT);
9640 /* pick up the net */
9642 switch (net->ro._l_addr.sa.sa_family) {
9645 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9650 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9659 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9660 /* No room in peers rwnd */
9663 tsn = asoc->last_acked_seq + 1;
9664 if (tsn == chk->rec.data.tsn) {
9666 * we make a special exception for this
9667 * case. The peer has no rwnd but is missing
9668 * the lowest chunk.. which is probably what
9669 * is holding up the rwnd.
9671 goto one_chunk_around;
9676 if (asoc->peers_rwnd < mtu) {
9678 if ((asoc->peers_rwnd == 0) &&
9679 (asoc->total_flight == 0)) {
9680 chk->window_probe = 1;
9681 chk->whoTo->window_probe = 1;
9684 #ifdef SCTP_AUDITING_ENABLED
9685 sctp_audit_log(0xC3, 2);
9689 net->fast_retran_ip = 0;
9690 if (chk->rec.data.doing_fast_retransmit == 0) {
9692 * if no FR in progress skip destination that have
9693 * flight_size > cwnd.
9695 if (net->flight_size >= net->cwnd) {
9700 * Mark the destination net to have FR recovery
9704 net->fast_retran_ip = 1;
9708 * if no AUTH is yet included and this chunk requires it,
9709 * make sure to account for it. We don't apply the size
9710 * until the AUTH chunk is actually added below in case
9711 * there is no room for this chunk.
9713 if (data_auth_reqd && (auth == NULL)) {
9714 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9718 if ((chk->send_size <= (mtu - dmtu)) ||
9719 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9720 /* ok we will add this one */
9721 if (data_auth_reqd) {
9723 m = sctp_add_auth_chunk(m,
9729 auth_keyid = chk->auth_keyid;
9731 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9732 } else if (override_ok) {
9733 auth_keyid = chk->auth_keyid;
9735 } else if (chk->auth_keyid != auth_keyid) {
9736 /* different keyid, so done bundling */
9740 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9742 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9745 /* Do clear IP_DF ? */
9746 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9749 /* upate our MTU size */
9750 if (mtu > (chk->send_size + dmtu))
9751 mtu -= (chk->send_size + dmtu);
9754 data_list[bundle_at++] = chk;
9755 if (one_chunk && (asoc->total_flight <= 0)) {
9756 SCTP_STAT_INCR(sctps_windowprobed);
9759 if (one_chunk == 0) {
9761 * now are there anymore forward from chk to pick
9764 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9765 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9766 /* Nope, not for retran */
9769 if (fwd->whoTo != net) {
9770 /* Nope, not the net in question */
9773 if (data_auth_reqd && (auth == NULL)) {
9774 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9777 if (fwd->send_size <= (mtu - dmtu)) {
9778 if (data_auth_reqd) {
9780 m = sctp_add_auth_chunk(m,
9786 auth_keyid = fwd->auth_keyid;
9788 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9789 } else if (override_ok) {
9790 auth_keyid = fwd->auth_keyid;
9792 } else if (fwd->auth_keyid != auth_keyid) {
9800 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9802 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9805 /* Do clear IP_DF ? */
9806 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9809 /* upate our MTU size */
9810 if (mtu > (fwd->send_size + dmtu))
9811 mtu -= (fwd->send_size + dmtu);
9814 data_list[bundle_at++] = fwd;
9815 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9819 /* can't fit so we are done */
9824 /* Is there something to send for this destination? */
9827 * No matter if we fail/or succeed we should start a
9828 * timer. A failure is like a lost IP packet :-)
9830 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9832 * no timer running on this destination
9835 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9838 /* Now lets send it, if there is anything to send :> */
9839 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9840 (struct sockaddr *)&net->ro._l_addr, m,
9841 auth_offset, auth, auth_keyid,
9842 no_fragmentflg, 0, 0,
9843 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9847 /* error, we could not output */
9848 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9849 if (error == ENOBUFS) {
9850 asoc->ifp_had_enobuf = 1;
9851 SCTP_STAT_INCR(sctps_lowlevelerr);
9855 asoc->ifp_had_enobuf = 0;
9862 * We don't want to mark the net->sent time here
9863 * since this we use this for HB and retrans cannot
9866 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9868 /* For auto-close */
9870 if (*now_filled == 0) {
9871 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9872 *now = asoc->time_last_sent;
9875 asoc->time_last_sent = *now;
9877 *cnt_out += bundle_at;
9878 #ifdef SCTP_AUDITING_ENABLED
9879 sctp_audit_log(0xC4, bundle_at);
9882 tsns_sent = data_list[0]->rec.data.tsn;
9884 for (i = 0; i < bundle_at; i++) {
9885 SCTP_STAT_INCR(sctps_sendretransdata);
9886 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9888 * When we have a revoked data, and we
9889 * retransmit it, then we clear the revoked
9890 * flag since this flag dictates if we
9891 * subtracted from the fs
9893 if (data_list[i]->rec.data.chunk_was_revoked) {
9894 /* Deflate the cwnd */
9895 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9896 data_list[i]->rec.data.chunk_was_revoked = 0;
9898 data_list[i]->snd_count++;
9899 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9900 /* record the time */
9901 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9902 if (data_list[i]->book_size_scale) {
9904 * need to double the book size on
9907 data_list[i]->book_size_scale = 0;
9909 * Since we double the booksize, we
9910 * must also double the output queue
9911 * size, since this get shrunk when
9912 * we free by this amount.
9914 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9915 data_list[i]->book_size *= 2;
9919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9920 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9921 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9923 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9924 (uint32_t)(data_list[i]->send_size +
9925 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9927 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9928 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9929 data_list[i]->whoTo->flight_size,
9930 data_list[i]->book_size,
9931 (uint32_t)(uintptr_t)data_list[i]->whoTo,
9932 data_list[i]->rec.data.tsn);
9934 sctp_flight_size_increase(data_list[i]);
9935 sctp_total_flight_increase(stcb, data_list[i]);
9936 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9937 /* SWS sender side engages */
9938 asoc->peers_rwnd = 0;
9941 (data_list[i]->rec.data.doing_fast_retransmit)) {
9942 SCTP_STAT_INCR(sctps_sendfastretrans);
9943 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9944 (tmr_started == 0)) {
9946 * ok we just fast-retrans'd
9947 * the lowest TSN, i.e the
9948 * first on the list. In
9949 * this case we want to give
9950 * some more time to get a
9951 * SACK back without a
9954 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9955 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
9956 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9961 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9963 #ifdef SCTP_AUDITING_ENABLED
9964 sctp_auditing(21, inp, stcb, NULL);
9970 if (asoc->sent_queue_retran_cnt <= 0) {
9971 /* all done we have no more to retran */
9972 asoc->sent_queue_retran_cnt = 0;
9976 /* No more room in rwnd */
9979 /* stop the for loop here. we sent out a packet */
9986 sctp_timer_validation(struct sctp_inpcb *inp,
9987 struct sctp_tcb *stcb,
9988 struct sctp_association *asoc)
9990 struct sctp_nets *net;
9992 /* Validate that a timer is running somewhere */
9993 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9994 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9995 /* Here is a timer */
9999 SCTP_TCB_LOCK_ASSERT(stcb);
10000 /* Gak, we did not have a timer somewhere */
10001 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
10002 if (asoc->alternate) {
10003 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
10005 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
10011 sctp_chunk_output(struct sctp_inpcb *inp,
10012 struct sctp_tcb *stcb,
10015 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10021 * Ok this is the generic chunk service queue. we must do the
10023 * - See if there are retransmits pending, if so we must
10025 * - Service the stream queue that is next, moving any
10026 * message (note I must get a complete message i.e.
10027 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
10029 * - Check to see if the cwnd/rwnd allows any output, if so we
10030 * go ahead and fomulate and send the low level chunks. Making sure
10031 * to combine any control in the control chunk queue also.
10033 struct sctp_association *asoc;
10034 struct sctp_nets *net;
10035 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10036 unsigned int burst_cnt = 0;
10037 struct timeval now;
10038 int now_filled = 0;
10040 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10043 unsigned int tot_frs = 0;
10045 asoc = &stcb->asoc;
10047 /* The Nagle algorithm is only applied when handling a send call. */
10048 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10049 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10057 SCTP_TCB_LOCK_ASSERT(stcb);
10059 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10061 if ((un_sent <= 0) &&
10062 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10063 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10064 (asoc->sent_queue_retran_cnt == 0) &&
10065 (asoc->trigger_reset == 0)) {
10066 /* Nothing to do unless there is something to be sent left */
10070 * Do we have something to send, data or control AND a sack timer
10071 * running, if so piggy-back the sack.
10073 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10074 sctp_send_sack(stcb, so_locked);
10075 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10077 while (asoc->sent_queue_retran_cnt) {
10079 * Ok, it is retransmission time only, we send out only ONE
10080 * packet with a single call off to the retran code.
10082 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10084 * Special hook for handling cookiess discarded
10085 * by peer that carried data. Send cookie-ack only
10086 * and then the next call with get the retran's.
10088 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10090 &now, &now_filled, frag_point, so_locked);
10092 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10093 /* if its not from a HB then do it */
10095 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10101 * its from any other place, we don't allow retran
10102 * output (only control)
10107 /* Can't send anymore */
10109 * now lets push out control by calling med-level
10110 * output once. this assures that we WILL send HB's
10113 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10115 &now, &now_filled, frag_point, so_locked);
10116 #ifdef SCTP_AUDITING_ENABLED
10117 sctp_auditing(8, inp, stcb, NULL);
10119 sctp_timer_validation(inp, stcb, asoc);
10124 * The count was off.. retran is not happening so do
10125 * the normal retransmission.
10127 #ifdef SCTP_AUDITING_ENABLED
10128 sctp_auditing(9, inp, stcb, NULL);
10130 if (ret == SCTP_RETRAN_EXIT) {
10135 if (from_where == SCTP_OUTPUT_FROM_T3) {
10136 /* Only one transmission allowed out of a timeout */
10137 #ifdef SCTP_AUDITING_ENABLED
10138 sctp_auditing(10, inp, stcb, NULL);
10140 /* Push out any control */
10141 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10142 &now, &now_filled, frag_point, so_locked);
10145 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10146 /* Hit FR burst limit */
10149 if ((num_out == 0) && (ret == 0)) {
10150 /* No more retrans to send */
10154 #ifdef SCTP_AUDITING_ENABLED
10155 sctp_auditing(12, inp, stcb, NULL);
10157 /* Check for bad destinations, if they exist move chunks around. */
10158 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10159 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10161 * if possible move things off of this address we
10162 * still may send below due to the dormant state but
10163 * we try to find an alternate address to send to
10164 * and if we have one we move all queued data on the
10165 * out wheel to this alternate address.
10167 if (net->ref_count > 1)
10168 sctp_move_chunks_from_net(stcb, net);
10171 * if ((asoc->sat_network) || (net->addr_is_local))
10172 * { burst_limit = asoc->max_burst *
10173 * SCTP_SAT_NETWORK_BURST_INCR; }
10175 if (asoc->max_burst > 0) {
10176 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10177 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10179 * JRS - Use the congestion
10180 * control given in the
10181 * congestion control module
10183 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10185 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10187 SCTP_STAT_INCR(sctps_maxburstqueued);
10189 net->fast_retran_ip = 0;
10191 if (net->flight_size == 0) {
10193 * Should be decaying the
10205 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10206 &reason_code, 0, from_where,
10207 &now, &now_filled, frag_point, so_locked);
10209 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10210 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10211 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10213 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10214 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10215 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10219 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10221 tot_out += num_out;
10223 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10224 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10225 if (num_out == 0) {
10226 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10231 * When the Nagle algorithm is used, look at how
10232 * much is unsent, then if its smaller than an MTU
10233 * and we have data in flight we stop, except if we
10234 * are handling a fragmented user message.
10236 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10237 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10238 (stcb->asoc.total_flight > 0)) {
10239 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10243 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10244 TAILQ_EMPTY(&asoc->send_queue) &&
10245 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10246 /* Nothing left to send */
10249 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10250 /* Nothing left to send */
10253 } while (num_out &&
10254 ((asoc->max_burst == 0) ||
10255 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10256 (burst_cnt < asoc->max_burst)));
10258 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10259 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10260 SCTP_STAT_INCR(sctps_maxburstqueued);
10261 asoc->burst_limit_applied = 1;
10262 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10263 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10266 asoc->burst_limit_applied = 0;
10269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10270 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10272 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10276 * Now we need to clean up the control chunk chain if a ECNE is on
10277 * it. It must be marked as UNSENT again so next call will continue
10278 * to send it until such time that we get a CWR, to remove it.
10280 if (stcb->asoc.ecn_echo_cnt_onq)
10281 sctp_fix_ecn_echo(asoc);
10283 if (stcb->asoc.trigger_reset) {
10284 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10294 struct sctp_inpcb *inp,
10296 struct sockaddr *addr,
10297 struct mbuf *control,
10302 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10306 if (inp->sctp_socket == NULL) {
10307 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10310 return (sctp_sosend(inp->sctp_socket,
10312 (struct uio *)NULL,
10320 send_forward_tsn(struct sctp_tcb *stcb,
10321 struct sctp_association *asoc)
10323 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10324 struct sctp_forward_tsn_chunk *fwdtsn;
10325 struct sctp_strseq *strseq;
10326 struct sctp_strseq_mid *strseq_m;
10327 uint32_t advance_peer_ack_point;
10328 unsigned int cnt_of_space, i, ovh;
10329 unsigned int space_needed;
10330 unsigned int cnt_of_skipped = 0;
10332 SCTP_TCB_LOCK_ASSERT(stcb);
10333 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10334 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10335 /* mark it to unsent */
10336 chk->sent = SCTP_DATAGRAM_UNSENT;
10337 chk->snd_count = 0;
10338 /* Do we correct its output location? */
10340 sctp_free_remote_addr(chk->whoTo);
10343 goto sctp_fill_in_rest;
10346 /* Ok if we reach here we must build one */
10347 sctp_alloc_a_chunk(stcb, chk);
10351 asoc->fwd_tsn_cnt++;
10352 chk->copy_by_ref = 0;
10354 * We don't do the old thing here since this is used not for on-wire
10355 * but to tell if we are sending a fwd-tsn by the stack during
10356 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10358 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10359 chk->rec.chunk_id.can_take_data = 0;
10363 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10364 if (chk->data == NULL) {
10365 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10368 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10369 chk->sent = SCTP_DATAGRAM_UNSENT;
10370 chk->snd_count = 0;
10371 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10372 asoc->ctrl_queue_cnt++;
10375 * Here we go through and fill out the part that deals with
10376 * stream/seq of the ones we skip.
10378 SCTP_BUF_LEN(chk->data) = 0;
10379 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10380 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10381 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10382 /* no more to look at */
10385 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10386 /* We don't report these */
10391 if (asoc->idata_supported) {
10392 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10393 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10395 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10396 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10398 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10400 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10401 ovh = SCTP_MIN_OVERHEAD;
10403 ovh = SCTP_MIN_V4_OVERHEAD;
10405 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10406 /* trim to a mtu size */
10407 cnt_of_space = asoc->smallest_mtu - ovh;
10409 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10410 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10411 0xff, 0, cnt_of_skipped,
10412 asoc->advanced_peer_ack_point);
10414 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10415 if (cnt_of_space < space_needed) {
10417 * ok we must trim down the chunk by lowering the
10418 * advance peer ack point.
10420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10421 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10422 0xff, 0xff, cnt_of_space,
10425 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10426 if (asoc->idata_supported) {
10427 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10429 cnt_of_skipped /= sizeof(struct sctp_strseq);
10432 * Go through and find the TSN that will be the one
10435 at = TAILQ_FIRST(&asoc->sent_queue);
10437 for (i = 0; i < cnt_of_skipped; i++) {
10438 tp1 = TAILQ_NEXT(at, sctp_next);
10445 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10446 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10447 0xff, cnt_of_skipped, at->rec.data.tsn,
10448 asoc->advanced_peer_ack_point);
10452 * last now points to last one I can report, update
10456 advance_peer_ack_point = last->rec.data.tsn;
10458 if (asoc->idata_supported) {
10459 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10460 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10462 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10463 cnt_of_skipped * sizeof(struct sctp_strseq);
10466 chk->send_size = space_needed;
10467 /* Setup the chunk */
10468 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10469 fwdtsn->ch.chunk_length = htons(chk->send_size);
10470 fwdtsn->ch.chunk_flags = 0;
10471 if (asoc->idata_supported) {
10472 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10474 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10476 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10477 SCTP_BUF_LEN(chk->data) = chk->send_size;
10480 * Move pointer to after the fwdtsn and transfer to the
10483 if (asoc->idata_supported) {
10484 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10487 strseq = (struct sctp_strseq *)fwdtsn;
10491 * Now populate the strseq list. This is done blindly
10492 * without pulling out duplicate stream info. This is
10493 * inefficent but won't harm the process since the peer will
10494 * look at these in sequence and will thus release anything.
10495 * It could mean we exceed the PMTU and chop off some that
10496 * we could have included.. but this is unlikely (aka 1432/4
10497 * would mean 300+ stream seq's would have to be reported in
10498 * one FWD-TSN. With a bit of work we can later FIX this to
10499 * optimize and pull out duplicates.. but it does add more
10500 * overhead. So for now... not!
10503 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10504 if (i >= cnt_of_skipped) {
10507 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10508 /* We don't report these */
10511 if (at->rec.data.tsn == advance_peer_ack_point) {
10512 at->rec.data.fwd_tsn_cnt = 0;
10514 if (asoc->idata_supported) {
10515 strseq_m->sid = htons(at->rec.data.sid);
10516 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10517 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
10519 strseq_m->flags = 0;
10521 strseq_m->mid = htonl(at->rec.data.mid);
10524 strseq->sid = htons(at->rec.data.sid);
10525 strseq->ssn = htons((uint16_t)at->rec.data.mid);
10534 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10535 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10541 * Queue up a SACK or NR-SACK in the control queue.
10542 * We must first check to see if a SACK or NR-SACK is
10543 * somehow on the control queue.
10544 * If so, we will take and and remove the old one.
10546 struct sctp_association *asoc;
10547 struct sctp_tmit_chunk *chk, *a_chk;
10548 struct sctp_sack_chunk *sack;
10549 struct sctp_nr_sack_chunk *nr_sack;
10550 struct sctp_gap_ack_block *gap_descriptor;
10551 const struct sack_track *selector;
10556 int limit_reached = 0;
10557 unsigned int i, siz, j;
10558 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10561 uint32_t highest_tsn;
10566 if (stcb->asoc.nrsack_supported == 1) {
10567 type = SCTP_NR_SELECTIVE_ACK;
10569 type = SCTP_SELECTIVE_ACK;
10572 asoc = &stcb->asoc;
10573 SCTP_TCB_LOCK_ASSERT(stcb);
10574 if (asoc->last_data_chunk_from == NULL) {
10575 /* Hmm we never received anything */
10578 sctp_slide_mapping_arrays(stcb);
10579 sctp_set_rwnd(stcb, asoc);
10580 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10581 if (chk->rec.chunk_id.id == type) {
10582 /* Hmm, found a sack already on queue, remove it */
10583 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10584 asoc->ctrl_queue_cnt--;
10587 sctp_m_freem(a_chk->data);
10588 a_chk->data = NULL;
10590 if (a_chk->whoTo) {
10591 sctp_free_remote_addr(a_chk->whoTo);
10592 a_chk->whoTo = NULL;
10597 if (a_chk == NULL) {
10598 sctp_alloc_a_chunk(stcb, a_chk);
10599 if (a_chk == NULL) {
10600 /* No memory so we drop the idea, and set a timer */
10601 if (stcb->asoc.delayed_ack) {
10602 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10603 stcb->sctp_ep, stcb, NULL,
10604 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10605 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10606 stcb->sctp_ep, stcb, NULL);
10608 stcb->asoc.send_sack = 1;
10612 a_chk->copy_by_ref = 0;
10613 a_chk->rec.chunk_id.id = type;
10614 a_chk->rec.chunk_id.can_take_data = 1;
10616 /* Clear our pkt counts */
10617 asoc->data_pkts_seen = 0;
10620 a_chk->asoc = asoc;
10621 a_chk->snd_count = 0;
10622 a_chk->send_size = 0; /* fill in later */
10623 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10624 a_chk->whoTo = NULL;
10626 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
10628 * Ok, the destination for the SACK is unreachable, lets see if
10629 * we can select an alternate to asoc->last_data_chunk_from
10631 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10632 if (a_chk->whoTo == NULL) {
10633 /* Nope, no alternate */
10634 a_chk->whoTo = asoc->last_data_chunk_from;
10637 a_chk->whoTo = asoc->last_data_chunk_from;
10639 if (a_chk->whoTo) {
10640 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10642 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10643 highest_tsn = asoc->highest_tsn_inside_map;
10645 highest_tsn = asoc->highest_tsn_inside_nr_map;
10647 if (highest_tsn == asoc->cumulative_tsn) {
10649 if (type == SCTP_SELECTIVE_ACK) {
10650 space_req = sizeof(struct sctp_sack_chunk);
10652 space_req = sizeof(struct sctp_nr_sack_chunk);
10655 /* gaps get a cluster */
10656 space_req = MCLBYTES;
10658 /* Ok now lets formulate a MBUF with our sack */
10659 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10660 if ((a_chk->data == NULL) ||
10661 (a_chk->whoTo == NULL)) {
10662 /* rats, no mbuf memory */
10664 /* was a problem with the destination */
10665 sctp_m_freem(a_chk->data);
10666 a_chk->data = NULL;
10668 sctp_free_a_chunk(stcb, a_chk, so_locked);
10669 /* sa_ignore NO_NULL_CHK */
10670 if (stcb->asoc.delayed_ack) {
10671 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10672 stcb->sctp_ep, stcb, NULL,
10673 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
10674 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10675 stcb->sctp_ep, stcb, NULL);
10677 stcb->asoc.send_sack = 1;
10681 /* ok, lets go through and fill it in */
10682 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10683 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10684 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10685 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10687 limit = mtod(a_chk->data, caddr_t);
10692 if ((asoc->sctp_cmt_on_off > 0) &&
10693 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10695 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10696 * received, then set high bit to 1, else 0. Reset
10699 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10700 asoc->cmt_dac_pkts_rcvd = 0;
10702 #ifdef SCTP_ASOCLOG_OF_TSNS
10703 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10704 stcb->asoc.cumack_log_atsnt++;
10705 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10706 stcb->asoc.cumack_log_atsnt = 0;
10709 /* reset the readers interpretation */
10710 stcb->freed_by_sorcv_sincelast = 0;
10712 if (type == SCTP_SELECTIVE_ACK) {
10713 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10715 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10716 if (highest_tsn > asoc->mapping_array_base_tsn) {
10717 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10719 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10723 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10724 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10725 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10726 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10728 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10732 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10735 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10737 if (((type == SCTP_SELECTIVE_ACK) &&
10738 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10739 ((type == SCTP_NR_SELECTIVE_ACK) &&
10740 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10741 /* we have a gap .. maybe */
10742 for (i = 0; i < siz; i++) {
10743 tsn_map = asoc->mapping_array[i];
10744 if (type == SCTP_SELECTIVE_ACK) {
10745 tsn_map |= asoc->nr_mapping_array[i];
10749 * Clear all bits corresponding to TSNs
10750 * smaller or equal to the cumulative TSN.
10752 tsn_map &= (~0U << (1 - offset));
10754 selector = &sack_array[tsn_map];
10755 if (mergeable && selector->right_edge) {
10757 * Backup, left and right edges were ok to
10763 if (selector->num_entries == 0)
10766 for (j = 0; j < selector->num_entries; j++) {
10767 if (mergeable && selector->right_edge) {
10769 * do a merge by NOT setting
10775 * no merge, set the left
10779 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10781 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10784 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10790 if (selector->left_edge) {
10794 if (limit_reached) {
10795 /* Reached the limit stop */
10801 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10802 (limit_reached == 0)) {
10806 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10807 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10809 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10812 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10815 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10817 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10818 /* we have a gap .. maybe */
10819 for (i = 0; i < siz; i++) {
10820 tsn_map = asoc->nr_mapping_array[i];
10823 * Clear all bits corresponding to
10824 * TSNs smaller or equal to the
10827 tsn_map &= (~0U << (1 - offset));
10829 selector = &sack_array[tsn_map];
10830 if (mergeable && selector->right_edge) {
10832 * Backup, left and right edges were
10835 num_nr_gap_blocks--;
10838 if (selector->num_entries == 0)
10841 for (j = 0; j < selector->num_entries; j++) {
10842 if (mergeable && selector->right_edge) {
10844 * do a merge by NOT
10851 * no merge, set the
10855 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10857 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10858 num_nr_gap_blocks++;
10860 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10866 if (selector->left_edge) {
10870 if (limit_reached) {
10871 /* Reached the limit stop */
10878 /* now we must add any dups we are going to report. */
10879 if ((limit_reached == 0) && (asoc->numduptsns)) {
10880 dup = (uint32_t *)gap_descriptor;
10881 for (i = 0; i < asoc->numduptsns; i++) {
10882 *dup = htonl(asoc->dup_tsns[i]);
10885 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10890 asoc->numduptsns = 0;
10893 * now that the chunk is prepared queue it to the control chunk
10896 if (type == SCTP_SELECTIVE_ACK) {
10897 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
10898 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10899 num_dups * sizeof(int32_t));
10900 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10901 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10902 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10903 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10904 sack->sack.num_dup_tsns = htons(num_dups);
10905 sack->ch.chunk_type = type;
10906 sack->ch.chunk_flags = flags;
10907 sack->ch.chunk_length = htons(a_chk->send_size);
10909 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
10910 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10911 num_dups * sizeof(int32_t));
10912 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10913 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10914 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10915 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10916 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10917 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10918 nr_sack->nr_sack.reserved = 0;
10919 nr_sack->ch.chunk_type = type;
10920 nr_sack->ch.chunk_flags = flags;
10921 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10923 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10924 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10925 asoc->ctrl_queue_cnt++;
10926 asoc->send_sack = 0;
10927 SCTP_STAT_INCR(sctps_sendsacks);
10932 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10933 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10938 struct mbuf *m_abort, *m, *m_last;
10939 struct mbuf *m_out, *m_end = NULL;
10940 struct sctp_abort_chunk *abort;
10941 struct sctp_auth_chunk *auth = NULL;
10942 struct sctp_nets *net;
10944 uint32_t auth_offset = 0;
10946 uint16_t cause_len, chunk_len, padding_len;
10948 SCTP_TCB_LOCK_ASSERT(stcb);
10950 * Add an AUTH chunk, if chunk requires it and save the offset into
10951 * the chain for AUTH
10953 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10954 stcb->asoc.peer_auth_chunks)) {
10955 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10956 stcb, SCTP_ABORT_ASSOCIATION);
10957 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10961 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10962 if (m_abort == NULL) {
10964 sctp_m_freem(m_out);
10967 sctp_m_freem(operr);
10971 /* link in any error */
10972 SCTP_BUF_NEXT(m_abort) = operr;
10975 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10976 cause_len += (uint16_t)SCTP_BUF_LEN(m);
10977 if (SCTP_BUF_NEXT(m) == NULL) {
10981 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10982 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
10983 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10984 if (m_out == NULL) {
10985 /* NO Auth chunk prepended, so reserve space in front */
10986 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10989 /* Put AUTH chunk at the front of the chain */
10990 SCTP_BUF_NEXT(m_end) = m_abort;
10992 if (stcb->asoc.alternate) {
10993 net = stcb->asoc.alternate;
10995 net = stcb->asoc.primary_destination;
10997 /* Fill in the ABORT chunk header. */
10998 abort = mtod(m_abort, struct sctp_abort_chunk *);
10999 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11000 if (stcb->asoc.peer_vtag == 0) {
11001 /* This happens iff the assoc is in COOKIE-WAIT state. */
11002 vtag = stcb->asoc.my_vtag;
11003 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
11005 vtag = stcb->asoc.peer_vtag;
11006 abort->ch.chunk_flags = 0;
11008 abort->ch.chunk_length = htons(chunk_len);
11009 /* Add padding, if necessary. */
11010 if (padding_len > 0) {
11011 if ((m_last == NULL) ||
11012 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
11013 sctp_m_freem(m_out);
11017 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11018 (struct sockaddr *)&net->ro._l_addr,
11019 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
11020 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
11021 stcb->asoc.primary_destination->port, NULL,
11024 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11025 if (error == ENOBUFS) {
11026 stcb->asoc.ifp_had_enobuf = 1;
11027 SCTP_STAT_INCR(sctps_lowlevelerr);
11030 stcb->asoc.ifp_had_enobuf = 0;
11032 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11036 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11037 struct sctp_nets *net,
11040 /* formulate and SEND a SHUTDOWN-COMPLETE */
11041 struct mbuf *m_shutdown_comp;
11042 struct sctp_shutdown_complete_chunk *shutdown_complete;
11047 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11048 if (m_shutdown_comp == NULL) {
11052 if (reflect_vtag) {
11053 flags = SCTP_HAD_NO_TCB;
11054 vtag = stcb->asoc.my_vtag;
11057 vtag = stcb->asoc.peer_vtag;
11059 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11060 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11061 shutdown_complete->ch.chunk_flags = flags;
11062 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11063 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11064 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11065 (struct sockaddr *)&net->ro._l_addr,
11066 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11067 stcb->sctp_ep->sctp_lport, stcb->rport,
11071 SCTP_SO_NOT_LOCKED))) {
11072 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11073 if (error == ENOBUFS) {
11074 stcb->asoc.ifp_had_enobuf = 1;
11075 SCTP_STAT_INCR(sctps_lowlevelerr);
11078 stcb->asoc.ifp_had_enobuf = 0;
11080 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11085 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11086 struct sctphdr *sh, uint32_t vtag,
11087 uint8_t type, struct mbuf *cause,
11088 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11089 uint32_t vrf_id, uint16_t port)
11091 struct mbuf *o_pak;
11093 struct sctphdr *shout;
11094 struct sctp_chunkhdr *ch;
11095 #if defined(INET) || defined(INET6)
11096 struct udphdr *udp;
11098 int ret, len, cause_len, padding_len;
11100 struct sockaddr_in *src_sin, *dst_sin;
11104 struct sockaddr_in6 *src_sin6, *dst_sin6;
11105 struct ip6_hdr *ip6;
11108 /* Compute the length of the cause and add final padding. */
11110 if (cause != NULL) {
11111 struct mbuf *m_at, *m_last = NULL;
11113 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11114 if (SCTP_BUF_NEXT(m_at) == NULL)
11116 cause_len += SCTP_BUF_LEN(m_at);
11118 padding_len = cause_len % 4;
11119 if (padding_len != 0) {
11120 padding_len = 4 - padding_len;
11122 if (padding_len != 0) {
11123 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11124 sctp_m_freem(cause);
11131 /* Get an mbuf for the header. */
11132 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11133 switch (dst->sa_family) {
11136 len += sizeof(struct ip);
11141 len += sizeof(struct ip6_hdr);
11147 #if defined(INET) || defined(INET6)
11149 len += sizeof(struct udphdr);
11152 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11153 if (mout == NULL) {
11155 sctp_m_freem(cause);
11159 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11160 SCTP_BUF_LEN(mout) = len;
11161 SCTP_BUF_NEXT(mout) = cause;
11162 M_SETFIB(mout, fibnum);
11163 mout->m_pkthdr.flowid = mflowid;
11164 M_HASHTYPE_SET(mout, mflowtype);
11171 switch (dst->sa_family) {
11174 src_sin = (struct sockaddr_in *)src;
11175 dst_sin = (struct sockaddr_in *)dst;
11176 ip = mtod(mout, struct ip *);
11177 ip->ip_v = IPVERSION;
11178 ip->ip_hl = (sizeof(struct ip) >> 2);
11180 ip->ip_off = htons(IP_DF);
11182 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11184 ip->ip_p = IPPROTO_UDP;
11186 ip->ip_p = IPPROTO_SCTP;
11188 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11189 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11191 len = sizeof(struct ip);
11192 shout = (struct sctphdr *)((caddr_t)ip + len);
11197 src_sin6 = (struct sockaddr_in6 *)src;
11198 dst_sin6 = (struct sockaddr_in6 *)dst;
11199 ip6 = mtod(mout, struct ip6_hdr *);
11200 ip6->ip6_flow = htonl(0x60000000);
11201 if (V_ip6_auto_flowlabel) {
11202 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11204 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11206 ip6->ip6_nxt = IPPROTO_UDP;
11208 ip6->ip6_nxt = IPPROTO_SCTP;
11210 ip6->ip6_src = dst_sin6->sin6_addr;
11211 ip6->ip6_dst = src_sin6->sin6_addr;
11212 len = sizeof(struct ip6_hdr);
11213 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11218 shout = mtod(mout, struct sctphdr *);
11221 #if defined(INET) || defined(INET6)
11223 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11224 sctp_m_freem(mout);
11227 udp = (struct udphdr *)shout;
11228 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11229 udp->uh_dport = port;
11231 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11232 sizeof(struct sctphdr) +
11233 sizeof(struct sctp_chunkhdr) +
11234 cause_len + padding_len));
11235 len += sizeof(struct udphdr);
11236 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11241 shout->src_port = sh->dest_port;
11242 shout->dest_port = sh->src_port;
11243 shout->checksum = 0;
11245 shout->v_tag = htonl(vtag);
11247 shout->v_tag = sh->v_tag;
11249 len += sizeof(struct sctphdr);
11250 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11251 ch->chunk_type = type;
11253 ch->chunk_flags = 0;
11255 ch->chunk_flags = SCTP_HAD_NO_TCB;
11257 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11258 len += sizeof(struct sctp_chunkhdr);
11259 len += cause_len + padding_len;
11261 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11262 sctp_m_freem(mout);
11265 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11266 switch (dst->sa_family) {
11271 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11276 ip->ip_len = htons(len);
11278 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11279 SCTP_STAT_INCR(sctps_sendswcrc);
11281 SCTP_ENABLE_UDP_CSUM(o_pak);
11284 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11285 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11286 SCTP_STAT_INCR(sctps_sendhwcrc);
11288 #ifdef SCTP_PACKET_LOGGING
11289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11290 sctp_packet_log(o_pak);
11293 SCTP_PROBE5(send, NULL, NULL, ip, NULL, shout);
11294 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11299 ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11301 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11302 SCTP_STAT_INCR(sctps_sendswcrc);
11303 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11304 udp->uh_sum = 0xffff;
11307 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11308 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11309 SCTP_STAT_INCR(sctps_sendhwcrc);
11311 #ifdef SCTP_PACKET_LOGGING
11312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11313 sctp_packet_log(o_pak);
11316 SCTP_PROBE5(send, NULL, NULL, ip6, NULL, shout);
11317 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11321 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11323 sctp_m_freem(mout);
11324 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11327 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
11329 UDPSTAT_INC(udps_opackets);
11331 SCTP_STAT_INCR(sctps_sendpackets);
11332 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11333 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11335 SCTP_STAT_INCR(sctps_senderrors);
11341 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11342 struct sctphdr *sh,
11343 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11344 uint32_t vrf_id, uint16_t port)
11346 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11347 mflowtype, mflowid, fibnum,
11352 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11353 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11358 struct sctp_tmit_chunk *chk;
11359 struct sctp_heartbeat_chunk *hb;
11360 struct timeval now;
11362 SCTP_TCB_LOCK_ASSERT(stcb);
11366 (void)SCTP_GETTIME_TIMEVAL(&now);
11367 switch (net->ro._l_addr.sa.sa_family) {
11379 sctp_alloc_a_chunk(stcb, chk);
11381 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11385 chk->copy_by_ref = 0;
11386 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11387 chk->rec.chunk_id.can_take_data = 1;
11389 chk->asoc = &stcb->asoc;
11390 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11392 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11393 if (chk->data == NULL) {
11394 sctp_free_a_chunk(stcb, chk, so_locked);
11397 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11398 SCTP_BUF_LEN(chk->data) = chk->send_size;
11399 chk->sent = SCTP_DATAGRAM_UNSENT;
11400 chk->snd_count = 0;
11402 atomic_add_int(&chk->whoTo->ref_count, 1);
11403 /* Now we have a mbuf that we can fill in with the details */
11404 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11405 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11406 /* fill out chunk header */
11407 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11408 hb->ch.chunk_flags = 0;
11409 hb->ch.chunk_length = htons(chk->send_size);
11410 /* Fill out hb parameter */
11411 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11412 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11413 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11414 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11415 /* Did our user request this one, put it in */
11416 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
11417 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11418 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11420 * we only take from the entropy pool if the address is not
11423 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11424 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11426 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11427 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11429 switch (net->ro._l_addr.sa.sa_family) {
11432 memcpy(hb->heartbeat.hb_info.address,
11433 &net->ro._l_addr.sin.sin_addr,
11434 sizeof(net->ro._l_addr.sin.sin_addr));
11439 memcpy(hb->heartbeat.hb_info.address,
11440 &net->ro._l_addr.sin6.sin6_addr,
11441 sizeof(net->ro._l_addr.sin6.sin6_addr));
11446 sctp_m_freem(chk->data);
11449 sctp_free_a_chunk(stcb, chk, so_locked);
11453 net->hb_responded = 0;
11454 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11455 stcb->asoc.ctrl_queue_cnt++;
11456 SCTP_STAT_INCR(sctps_sendheartbeat);
11461 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11464 struct sctp_association *asoc;
11465 struct sctp_ecne_chunk *ecne;
11466 struct sctp_tmit_chunk *chk;
11471 asoc = &stcb->asoc;
11472 SCTP_TCB_LOCK_ASSERT(stcb);
11473 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11474 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11475 /* found a previous ECN_ECHO update it if needed */
11476 uint32_t cnt, ctsn;
11478 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11479 ctsn = ntohl(ecne->tsn);
11480 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11481 ecne->tsn = htonl(high_tsn);
11482 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11484 cnt = ntohl(ecne->num_pkts_since_cwr);
11486 ecne->num_pkts_since_cwr = htonl(cnt);
11490 /* nope could not find one to update so we must build one */
11491 sctp_alloc_a_chunk(stcb, chk);
11495 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11496 chk->copy_by_ref = 0;
11497 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11498 chk->rec.chunk_id.can_take_data = 0;
11500 chk->asoc = &stcb->asoc;
11501 chk->send_size = sizeof(struct sctp_ecne_chunk);
11502 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11503 if (chk->data == NULL) {
11504 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11507 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11508 SCTP_BUF_LEN(chk->data) = chk->send_size;
11509 chk->sent = SCTP_DATAGRAM_UNSENT;
11510 chk->snd_count = 0;
11512 atomic_add_int(&chk->whoTo->ref_count, 1);
11514 stcb->asoc.ecn_echo_cnt_onq++;
11515 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11516 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11517 ecne->ch.chunk_flags = 0;
11518 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11519 ecne->tsn = htonl(high_tsn);
11520 ecne->num_pkts_since_cwr = htonl(1);
11521 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11522 asoc->ctrl_queue_cnt++;
11526 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11527 struct mbuf *m, int len, int iphlen, int bad_crc)
11529 struct sctp_association *asoc;
11530 struct sctp_pktdrop_chunk *drp;
11531 struct sctp_tmit_chunk *chk;
11537 struct sctp_chunkhdr *ch, chunk_buf;
11538 unsigned int chk_length;
11543 asoc = &stcb->asoc;
11544 SCTP_TCB_LOCK_ASSERT(stcb);
11545 if (asoc->pktdrop_supported == 0) {
11547 * peer must declare support before I send one.
11551 if (stcb->sctp_socket == NULL) {
11554 sctp_alloc_a_chunk(stcb, chk);
11558 chk->copy_by_ref = 0;
11559 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11560 chk->rec.chunk_id.can_take_data = 1;
11563 chk->send_size = len;
11564 /* Validate that we do not have an ABORT in here. */
11565 offset = iphlen + sizeof(struct sctphdr);
11566 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11567 sizeof(*ch), (uint8_t *)&chunk_buf);
11568 while (ch != NULL) {
11569 chk_length = ntohs(ch->chunk_length);
11570 if (chk_length < sizeof(*ch)) {
11571 /* break to abort land */
11574 switch (ch->chunk_type) {
11575 case SCTP_PACKET_DROPPED:
11576 case SCTP_ABORT_ASSOCIATION:
11577 case SCTP_INITIATION_ACK:
11579 * We don't respond with an PKT-DROP to an ABORT
11580 * or PKT-DROP. We also do not respond to an
11581 * INIT-ACK, because we can't know if the initiation
11582 * tag is correct or not.
11584 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11589 offset += SCTP_SIZE32(chk_length);
11590 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11591 sizeof(*ch), (uint8_t *)&chunk_buf);
11594 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11595 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11597 * only send 1 mtu worth, trim off the excess on the end.
11600 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11603 chk->asoc = &stcb->asoc;
11604 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11605 if (chk->data == NULL) {
11607 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11610 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11611 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11613 sctp_m_freem(chk->data);
11617 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11618 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11619 chk->book_size_scale = 0;
11621 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11622 drp->trunc_len = htons(fullsz);
11624 * Len is already adjusted to size minus overhead above take
11625 * out the pkt_drop chunk itself from it.
11627 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
11628 len = chk->send_size;
11630 /* no truncation needed */
11631 drp->ch.chunk_flags = 0;
11632 drp->trunc_len = htons(0);
11635 drp->ch.chunk_flags |= SCTP_BADCRC;
11637 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11638 SCTP_BUF_LEN(chk->data) = chk->send_size;
11639 chk->sent = SCTP_DATAGRAM_UNSENT;
11640 chk->snd_count = 0;
11642 /* we should hit here */
11644 atomic_add_int(&chk->whoTo->ref_count, 1);
11648 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11649 drp->ch.chunk_length = htons(chk->send_size);
11650 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11654 drp->bottle_bw = htonl(spc);
11655 if (asoc->my_rwnd) {
11656 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11657 asoc->size_on_all_streams +
11658 asoc->my_rwnd_control_len +
11659 stcb->sctp_socket->so_rcv.sb_cc);
11662 * If my rwnd is 0, possibly from mbuf depletion as well as
11663 * space used, tell the peer there is NO space aka onq == bw
11665 drp->current_onq = htonl(spc);
11669 m_copydata(m, iphlen, len, (caddr_t)datap);
11670 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11671 asoc->ctrl_queue_cnt++;
11675 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11677 struct sctp_association *asoc;
11678 struct sctp_cwr_chunk *cwr;
11679 struct sctp_tmit_chunk *chk;
11681 SCTP_TCB_LOCK_ASSERT(stcb);
11685 asoc = &stcb->asoc;
11686 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11687 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11689 * found a previous CWR queued to same destination
11690 * update it if needed
11694 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11695 ctsn = ntohl(cwr->tsn);
11696 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11697 cwr->tsn = htonl(high_tsn);
11699 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11700 /* Make sure override is carried */
11701 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11706 sctp_alloc_a_chunk(stcb, chk);
11710 chk->copy_by_ref = 0;
11711 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11712 chk->rec.chunk_id.can_take_data = 1;
11714 chk->asoc = &stcb->asoc;
11715 chk->send_size = sizeof(struct sctp_cwr_chunk);
11716 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11717 if (chk->data == NULL) {
11718 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11721 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11722 SCTP_BUF_LEN(chk->data) = chk->send_size;
11723 chk->sent = SCTP_DATAGRAM_UNSENT;
11724 chk->snd_count = 0;
11726 atomic_add_int(&chk->whoTo->ref_count, 1);
11727 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11728 cwr->ch.chunk_type = SCTP_ECN_CWR;
11729 cwr->ch.chunk_flags = override;
11730 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11731 cwr->tsn = htonl(high_tsn);
11732 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11733 asoc->ctrl_queue_cnt++;
11737 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
11738 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11740 uint16_t len, old_len, i;
11741 struct sctp_stream_reset_out_request *req_out;
11742 struct sctp_chunkhdr *ch;
11744 int number_entries = 0;
11746 ch = mtod(chk->data, struct sctp_chunkhdr *);
11747 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11748 /* get to new offset for the param. */
11749 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11750 /* now how long will this param be? */
11751 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11752 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11753 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11754 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11758 if (number_entries == 0) {
11761 if (number_entries == stcb->asoc.streamoutcnt) {
11762 number_entries = 0;
11764 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11765 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11767 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11768 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11769 req_out->ph.param_length = htons(len);
11770 req_out->request_seq = htonl(seq);
11771 req_out->response_seq = htonl(resp_seq);
11772 req_out->send_reset_at_tsn = htonl(last_sent);
11774 if (number_entries) {
11775 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11776 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11777 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11778 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11779 req_out->list_of_streams[at] = htons(i);
11781 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11782 if (at >= number_entries) {
11788 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11789 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11792 if (SCTP_SIZE32(len) > len) {
11794 * Need to worry about the pad we may end up adding to the
11795 * end. This is easy since the struct is either aligned to 4
11796 * bytes or 2 bytes off.
11798 req_out->list_of_streams[number_entries] = 0;
11800 /* now fix the chunk length */
11801 ch->chunk_length = htons(len + old_len);
11802 chk->book_size = len + old_len;
11803 chk->book_size_scale = 0;
11804 chk->send_size = SCTP_SIZE32(chk->book_size);
11805 SCTP_BUF_LEN(chk->data) = chk->send_size;
11810 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11811 int number_entries, uint16_t *list,
11814 uint16_t len, old_len, i;
11815 struct sctp_stream_reset_in_request *req_in;
11816 struct sctp_chunkhdr *ch;
11818 ch = mtod(chk->data, struct sctp_chunkhdr *);
11819 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11821 /* get to new offset for the param. */
11822 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11823 /* now how long will this param be? */
11824 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11825 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11826 req_in->ph.param_length = htons(len);
11827 req_in->request_seq = htonl(seq);
11828 if (number_entries) {
11829 for (i = 0; i < number_entries; i++) {
11830 req_in->list_of_streams[i] = htons(list[i]);
11833 if (SCTP_SIZE32(len) > len) {
11835 * Need to worry about the pad we may end up adding to the
11836 * end. This is easy since the struct is either aligned to 4
11837 * bytes or 2 bytes off.
11839 req_in->list_of_streams[number_entries] = 0;
11841 /* now fix the chunk length */
11842 ch->chunk_length = htons(len + old_len);
11843 chk->book_size = len + old_len;
11844 chk->book_size_scale = 0;
11845 chk->send_size = SCTP_SIZE32(chk->book_size);
11846 SCTP_BUF_LEN(chk->data) = chk->send_size;
11851 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11854 uint16_t len, old_len;
11855 struct sctp_stream_reset_tsn_request *req_tsn;
11856 struct sctp_chunkhdr *ch;
11858 ch = mtod(chk->data, struct sctp_chunkhdr *);
11859 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11861 /* get to new offset for the param. */
11862 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11863 /* now how long will this param be? */
11864 len = sizeof(struct sctp_stream_reset_tsn_request);
11865 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11866 req_tsn->ph.param_length = htons(len);
11867 req_tsn->request_seq = htonl(seq);
11869 /* now fix the chunk length */
11870 ch->chunk_length = htons(len + old_len);
11871 chk->send_size = len + old_len;
11872 chk->book_size = SCTP_SIZE32(chk->send_size);
11873 chk->book_size_scale = 0;
11874 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11879 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11880 uint32_t resp_seq, uint32_t result)
11882 uint16_t len, old_len;
11883 struct sctp_stream_reset_response *resp;
11884 struct sctp_chunkhdr *ch;
11886 ch = mtod(chk->data, struct sctp_chunkhdr *);
11887 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11889 /* get to new offset for the param. */
11890 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11891 /* now how long will this param be? */
11892 len = sizeof(struct sctp_stream_reset_response);
11893 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11894 resp->ph.param_length = htons(len);
11895 resp->response_seq = htonl(resp_seq);
11896 resp->result = ntohl(result);
11898 /* now fix the chunk length */
11899 ch->chunk_length = htons(len + old_len);
11900 chk->book_size = len + old_len;
11901 chk->book_size_scale = 0;
11902 chk->send_size = SCTP_SIZE32(chk->book_size);
11903 SCTP_BUF_LEN(chk->data) = chk->send_size;
11908 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
11909 struct sctp_stream_reset_list *ent,
11912 struct sctp_association *asoc;
11913 struct sctp_tmit_chunk *chk;
11914 struct sctp_chunkhdr *ch;
11916 asoc = &stcb->asoc;
11919 * Reset our last reset action to the new one IP -> response
11920 * (PERFORMED probably). This assures that if we fail to send, a
11921 * retran from the peer will get the new response.
11923 asoc->last_reset_action[0] = response;
11924 if (asoc->stream_reset_outstanding) {
11927 sctp_alloc_a_chunk(stcb, chk);
11929 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11932 chk->copy_by_ref = 0;
11933 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11934 chk->rec.chunk_id.can_take_data = 0;
11936 chk->asoc = &stcb->asoc;
11937 chk->book_size = sizeof(struct sctp_chunkhdr);
11938 chk->send_size = SCTP_SIZE32(chk->book_size);
11939 chk->book_size_scale = 0;
11940 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11941 if (chk->data == NULL) {
11942 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11943 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11946 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11947 /* setup chunk parameters */
11948 chk->sent = SCTP_DATAGRAM_UNSENT;
11949 chk->snd_count = 0;
11950 if (stcb->asoc.alternate) {
11951 chk->whoTo = stcb->asoc.alternate;
11953 chk->whoTo = stcb->asoc.primary_destination;
11955 ch = mtod(chk->data, struct sctp_chunkhdr *);
11956 ch->chunk_type = SCTP_STREAM_RESET;
11957 ch->chunk_flags = 0;
11958 ch->chunk_length = htons(chk->book_size);
11959 atomic_add_int(&chk->whoTo->ref_count, 1);
11960 SCTP_BUF_LEN(chk->data) = chk->send_size;
11961 sctp_add_stream_reset_result(chk, ent->seq, response);
11962 /* insert the chunk for sending */
11963 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11966 asoc->ctrl_queue_cnt++;
11970 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11971 uint32_t resp_seq, uint32_t result,
11972 uint32_t send_una, uint32_t recv_next)
11974 uint16_t len, old_len;
11975 struct sctp_stream_reset_response_tsn *resp;
11976 struct sctp_chunkhdr *ch;
11978 ch = mtod(chk->data, struct sctp_chunkhdr *);
11979 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11981 /* get to new offset for the param. */
11982 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11983 /* now how long will this param be? */
11984 len = sizeof(struct sctp_stream_reset_response_tsn);
11985 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11986 resp->ph.param_length = htons(len);
11987 resp->response_seq = htonl(resp_seq);
11988 resp->result = htonl(result);
11989 resp->senders_next_tsn = htonl(send_una);
11990 resp->receivers_next_tsn = htonl(recv_next);
11992 /* now fix the chunk length */
11993 ch->chunk_length = htons(len + old_len);
11994 chk->book_size = len + old_len;
11995 chk->send_size = SCTP_SIZE32(chk->book_size);
11996 chk->book_size_scale = 0;
11997 SCTP_BUF_LEN(chk->data) = chk->send_size;
12002 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
12006 uint16_t len, old_len;
12007 struct sctp_chunkhdr *ch;
12008 struct sctp_stream_reset_add_strm *addstr;
12010 ch = mtod(chk->data, struct sctp_chunkhdr *);
12011 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12013 /* get to new offset for the param. */
12014 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12015 /* now how long will this param be? */
12016 len = sizeof(struct sctp_stream_reset_add_strm);
12019 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
12020 addstr->ph.param_length = htons(len);
12021 addstr->request_seq = htonl(seq);
12022 addstr->number_of_streams = htons(adding);
12023 addstr->reserved = 0;
12025 /* now fix the chunk length */
12026 ch->chunk_length = htons(len + old_len);
12027 chk->send_size = len + old_len;
12028 chk->book_size = SCTP_SIZE32(chk->send_size);
12029 chk->book_size_scale = 0;
12030 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12035 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12039 uint16_t len, old_len;
12040 struct sctp_chunkhdr *ch;
12041 struct sctp_stream_reset_add_strm *addstr;
12043 ch = mtod(chk->data, struct sctp_chunkhdr *);
12044 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12046 /* get to new offset for the param. */
12047 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12048 /* now how long will this param be? */
12049 len = sizeof(struct sctp_stream_reset_add_strm);
12051 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12052 addstr->ph.param_length = htons(len);
12053 addstr->request_seq = htonl(seq);
12054 addstr->number_of_streams = htons(adding);
12055 addstr->reserved = 0;
12057 /* now fix the chunk length */
12058 ch->chunk_length = htons(len + old_len);
12059 chk->send_size = len + old_len;
12060 chk->book_size = SCTP_SIZE32(chk->send_size);
12061 chk->book_size_scale = 0;
12062 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12067 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12069 struct sctp_association *asoc;
12070 struct sctp_tmit_chunk *chk;
12071 struct sctp_chunkhdr *ch;
12074 asoc = &stcb->asoc;
12075 asoc->trigger_reset = 0;
12076 if (asoc->stream_reset_outstanding) {
12079 sctp_alloc_a_chunk(stcb, chk);
12081 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12084 chk->copy_by_ref = 0;
12085 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12086 chk->rec.chunk_id.can_take_data = 0;
12088 chk->asoc = &stcb->asoc;
12089 chk->book_size = sizeof(struct sctp_chunkhdr);
12090 chk->send_size = SCTP_SIZE32(chk->book_size);
12091 chk->book_size_scale = 0;
12092 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12093 if (chk->data == NULL) {
12094 sctp_free_a_chunk(stcb, chk, so_locked);
12095 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12098 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12100 /* setup chunk parameters */
12101 chk->sent = SCTP_DATAGRAM_UNSENT;
12102 chk->snd_count = 0;
12103 if (stcb->asoc.alternate) {
12104 chk->whoTo = stcb->asoc.alternate;
12106 chk->whoTo = stcb->asoc.primary_destination;
12108 ch = mtod(chk->data, struct sctp_chunkhdr *);
12109 ch->chunk_type = SCTP_STREAM_RESET;
12110 ch->chunk_flags = 0;
12111 ch->chunk_length = htons(chk->book_size);
12112 atomic_add_int(&chk->whoTo->ref_count, 1);
12113 SCTP_BUF_LEN(chk->data) = chk->send_size;
12114 seq = stcb->asoc.str_reset_seq_out;
12115 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12117 asoc->stream_reset_outstanding++;
12119 m_freem(chk->data);
12121 sctp_free_a_chunk(stcb, chk, so_locked);
12124 asoc->str_reset = chk;
12125 /* insert the chunk for sending */
12126 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12129 asoc->ctrl_queue_cnt++;
12131 if (stcb->asoc.send_sack) {
12132 sctp_send_sack(stcb, so_locked);
12134 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12139 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12140 uint16_t number_entries, uint16_t *list,
12141 uint8_t send_in_req,
12142 uint8_t send_tsn_req,
12143 uint8_t add_stream,
12145 uint16_t adding_i, uint8_t peer_asked)
12147 struct sctp_association *asoc;
12148 struct sctp_tmit_chunk *chk;
12149 struct sctp_chunkhdr *ch;
12150 int can_send_out_req = 0;
12153 asoc = &stcb->asoc;
12154 if (asoc->stream_reset_outstanding) {
12156 * Already one pending, must get ACK back to clear the flag.
12158 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12161 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12162 (add_stream == 0)) {
12163 /* nothing to do */
12164 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12167 if (send_tsn_req && send_in_req) {
12168 /* error, can't do that */
12169 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12171 } else if (send_in_req) {
12172 can_send_out_req = 1;
12174 if (number_entries > (MCLBYTES -
12175 SCTP_MIN_OVERHEAD -
12176 sizeof(struct sctp_chunkhdr) -
12177 sizeof(struct sctp_stream_reset_out_request)) /
12178 sizeof(uint16_t)) {
12179 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12182 sctp_alloc_a_chunk(stcb, chk);
12184 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12187 chk->copy_by_ref = 0;
12188 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12189 chk->rec.chunk_id.can_take_data = 0;
12191 chk->asoc = &stcb->asoc;
12192 chk->book_size = sizeof(struct sctp_chunkhdr);
12193 chk->send_size = SCTP_SIZE32(chk->book_size);
12194 chk->book_size_scale = 0;
12195 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12196 if (chk->data == NULL) {
12197 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12198 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12201 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12203 /* setup chunk parameters */
12204 chk->sent = SCTP_DATAGRAM_UNSENT;
12205 chk->snd_count = 0;
12206 if (stcb->asoc.alternate) {
12207 chk->whoTo = stcb->asoc.alternate;
12209 chk->whoTo = stcb->asoc.primary_destination;
12211 atomic_add_int(&chk->whoTo->ref_count, 1);
12212 ch = mtod(chk->data, struct sctp_chunkhdr *);
12213 ch->chunk_type = SCTP_STREAM_RESET;
12214 ch->chunk_flags = 0;
12215 ch->chunk_length = htons(chk->book_size);
12216 SCTP_BUF_LEN(chk->data) = chk->send_size;
12218 seq = stcb->asoc.str_reset_seq_out;
12219 if (can_send_out_req) {
12222 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12225 asoc->stream_reset_outstanding++;
12228 if ((add_stream & 1) &&
12229 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12230 /* Need to allocate more */
12231 struct sctp_stream_out *oldstream;
12232 struct sctp_stream_queue_pending *sp, *nsp;
12234 #if defined(SCTP_DETAILED_STR_STATS)
12238 oldstream = stcb->asoc.strmout;
12239 /* get some more */
12240 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12241 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12243 if (stcb->asoc.strmout == NULL) {
12246 stcb->asoc.strmout = oldstream;
12247 /* Turn off the bit */
12248 x = add_stream & 0xfe;
12253 * Ok now we proceed with copying the old out stuff and
12254 * initializing the new stuff.
12256 SCTP_TCB_SEND_LOCK(stcb);
12257 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12258 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12259 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12260 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12261 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12262 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12263 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12264 stcb->asoc.strmout[i].sid = i;
12265 stcb->asoc.strmout[i].state = oldstream[i].state;
12266 /* FIX ME FIX ME */
12268 * This should be a SS_COPY operation FIX ME STREAM
12271 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12272 /* now anything on those queues? */
12273 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12274 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12275 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12279 /* now the new streams */
12280 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12281 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12282 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12283 stcb->asoc.strmout[i].chunks_on_queues = 0;
12284 #if defined(SCTP_DETAILED_STR_STATS)
12285 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12286 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12287 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12290 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12291 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12293 stcb->asoc.strmout[i].next_mid_ordered = 0;
12294 stcb->asoc.strmout[i].next_mid_unordered = 0;
12295 stcb->asoc.strmout[i].sid = i;
12296 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12297 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12298 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12300 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12301 SCTP_FREE(oldstream, SCTP_M_STRMO);
12302 SCTP_TCB_SEND_UNLOCK(stcb);
12305 if ((add_stream & 1) && (adding_o > 0)) {
12306 asoc->strm_pending_add_size = adding_o;
12307 asoc->peer_req_out = peer_asked;
12308 sctp_add_an_out_stream(chk, seq, adding_o);
12310 asoc->stream_reset_outstanding++;
12312 if ((add_stream & 2) && (adding_i > 0)) {
12313 sctp_add_an_in_stream(chk, seq, adding_i);
12315 asoc->stream_reset_outstanding++;
12318 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12320 asoc->stream_reset_outstanding++;
12322 if (send_tsn_req) {
12323 sctp_add_stream_reset_tsn(chk, seq);
12324 asoc->stream_reset_outstanding++;
12326 asoc->str_reset = chk;
12327 /* insert the chunk for sending */
12328 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12331 asoc->ctrl_queue_cnt++;
12332 if (stcb->asoc.send_sack) {
12333 sctp_send_sack(stcb, SCTP_SO_LOCKED);
12335 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12340 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12341 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12342 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12343 uint32_t vrf_id, uint16_t port)
12345 /* Don't respond to an ABORT with an ABORT. */
12346 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12348 sctp_m_freem(cause);
12351 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12352 mflowtype, mflowid, fibnum,
12358 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12359 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12360 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12361 uint32_t vrf_id, uint16_t port)
12363 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12364 mflowtype, mflowid, fibnum,
12369 static struct mbuf *
12370 sctp_copy_resume(struct uio *uio,
12372 int user_marks_eor,
12375 struct mbuf **new_tail)
12379 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12380 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12382 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12385 *sndout = m_length(m, NULL);
12386 *new_tail = m_last(m);
12392 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12396 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12398 if (sp->data == NULL) {
12399 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12403 sp->tail_mbuf = m_last(sp->data);
12409 static struct sctp_stream_queue_pending *
12410 sctp_copy_it_in(struct sctp_tcb *stcb,
12411 struct sctp_association *asoc,
12412 struct sctp_sndrcvinfo *srcv,
12414 struct sctp_nets *net,
12415 ssize_t max_send_len,
12416 int user_marks_eor,
12421 * This routine must be very careful in its work. Protocol
12422 * processing is up and running so care must be taken to spl...()
12423 * when you need to do something that may effect the stcb/asoc. The
12424 * sb is locked however. When data is copied the protocol processing
12425 * should be enabled since this is a slower operation...
12427 struct sctp_stream_queue_pending *sp = NULL;
12431 /* Now can we send this? */
12432 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
12433 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12434 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12435 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12436 /* got data while shutting down */
12437 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12438 *error = ECONNRESET;
12441 sctp_alloc_a_strmoq(stcb, sp);
12443 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12448 sp->sender_all_done = 0;
12449 sp->sinfo_flags = srcv->sinfo_flags;
12450 sp->timetolive = srcv->sinfo_timetolive;
12451 sp->ppid = srcv->sinfo_ppid;
12452 sp->context = srcv->sinfo_context;
12454 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12456 sp->sid = srcv->sinfo_stream;
12457 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
12458 if ((sp->length == (uint32_t)uio->uio_resid) &&
12459 ((user_marks_eor == 0) ||
12460 (srcv->sinfo_flags & SCTP_EOF) ||
12461 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12462 sp->msg_is_complete = 1;
12464 sp->msg_is_complete = 0;
12466 sp->sender_all_done = 0;
12467 sp->some_taken = 0;
12468 sp->put_last_out = 0;
12469 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
12470 sp->data = sp->tail_mbuf = NULL;
12471 if (sp->length == 0) {
12474 if (srcv->sinfo_keynumber_valid) {
12475 sp->auth_keyid = srcv->sinfo_keynumber;
12477 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12479 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12480 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12481 sp->holds_key_ref = 1;
12483 *error = sctp_copy_one(sp, uio, resv_in_first);
12486 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12489 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12491 atomic_add_int(&sp->net->ref_count, 1);
12495 sctp_set_prsctp_policy(sp);
12503 sctp_sosend(struct socket *so,
12504 struct sockaddr *addr,
12507 struct mbuf *control,
12512 int error, use_sndinfo = 0;
12513 struct sctp_sndrcvinfo sndrcvninfo;
12514 struct sockaddr *addr_to_use;
12515 #if defined(INET) && defined(INET6)
12516 struct sockaddr_in sin;
12520 /* process cmsg snd/rcv info (maybe a assoc-id) */
12521 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12522 sizeof(sndrcvninfo))) {
12527 addr_to_use = addr;
12528 #if defined(INET) && defined(INET6)
12529 if ((addr) && (addr->sa_family == AF_INET6)) {
12530 struct sockaddr_in6 *sin6;
12532 sin6 = (struct sockaddr_in6 *)addr;
12533 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12534 in6_sin6_2_sin(&sin, sin6);
12535 addr_to_use = (struct sockaddr *)&sin;
12539 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12542 use_sndinfo ? &sndrcvninfo : NULL
12550 sctp_lower_sosend(struct socket *so,
12551 struct sockaddr *addr,
12553 struct mbuf *i_pak,
12554 struct mbuf *control,
12556 struct sctp_sndrcvinfo *srcv
12561 ssize_t sndlen = 0, max_len, local_add_more;
12563 struct mbuf *top = NULL;
12564 int queue_only = 0, queue_only_for_init = 0;
12565 int free_cnt_applied = 0;
12567 int now_filled = 0;
12568 unsigned int inqueue_bytes = 0;
12569 struct sctp_block_entry be;
12570 struct sctp_inpcb *inp;
12571 struct sctp_tcb *stcb = NULL;
12572 struct timeval now;
12573 struct sctp_nets *net;
12574 struct sctp_association *asoc;
12575 struct sctp_inpcb *t_inp;
12576 int user_marks_eor;
12577 int create_lock_applied = 0;
12578 int nagle_applies = 0;
12579 int some_on_control = 0;
12580 int got_all_of_the_send = 0;
12581 int hold_tcblock = 0;
12582 int non_blocking = 0;
12583 ssize_t local_soresv = 0;
12585 uint16_t sinfo_flags;
12586 sctp_assoc_t sinfo_assoc_id;
12593 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12595 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12598 SCTP_RELEASE_PKT(i_pak);
12602 if ((uio == NULL) && (i_pak == NULL)) {
12603 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12606 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12607 atomic_add_int(&inp->total_sends, 1);
12609 if (uio->uio_resid < 0) {
12610 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12613 sndlen = uio->uio_resid;
12615 top = SCTP_HEADER_TO_CHAIN(i_pak);
12616 sndlen = SCTP_HEADER_LEN(i_pak);
12618 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zu\n",
12621 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12622 SCTP_IS_LISTENING(inp)) {
12623 /* The listener can NOT send */
12624 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12629 * Pre-screen address, if one is given the sin-len
12630 * must be set correctly!
12633 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12635 switch (raddr->sa.sa_family) {
12638 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12639 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12643 port = raddr->sin.sin_port;
12648 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12649 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12653 port = raddr->sin6.sin6_port;
12657 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12658 error = EAFNOSUPPORT;
12665 sinfo_flags = srcv->sinfo_flags;
12666 sinfo_assoc_id = srcv->sinfo_assoc_id;
12667 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12668 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12669 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12673 if (srcv->sinfo_flags)
12674 SCTP_STAT_INCR(sctps_sends_with_flags);
12676 sinfo_flags = inp->def_send.sinfo_flags;
12677 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12679 if (flags & MSG_EOR) {
12680 sinfo_flags |= SCTP_EOR;
12682 if (flags & MSG_EOF) {
12683 sinfo_flags |= SCTP_EOF;
12685 if (sinfo_flags & SCTP_SENDALL) {
12686 /* its a sendall */
12687 error = sctp_sendall(inp, uio, top, srcv);
12691 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12692 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12696 /* now we must find the assoc */
12697 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12698 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12699 SCTP_INP_RLOCK(inp);
12700 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12702 SCTP_TCB_LOCK(stcb);
12705 SCTP_INP_RUNLOCK(inp);
12706 } else if (sinfo_assoc_id) {
12707 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
12708 if (stcb != NULL) {
12713 * Since we did not use findep we must
12714 * increment it, and if we don't find a tcb
12717 SCTP_INP_WLOCK(inp);
12718 SCTP_INP_INCR_REF(inp);
12719 SCTP_INP_WUNLOCK(inp);
12720 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12721 if (stcb == NULL) {
12722 SCTP_INP_WLOCK(inp);
12723 SCTP_INP_DECR_REF(inp);
12724 SCTP_INP_WUNLOCK(inp);
12729 if ((stcb == NULL) && (addr)) {
12730 /* Possible implicit send? */
12731 SCTP_ASOC_CREATE_LOCK(inp);
12732 create_lock_applied = 1;
12733 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12734 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12735 /* Should I really unlock ? */
12736 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12741 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12742 (addr->sa_family == AF_INET6)) {
12743 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12747 SCTP_INP_WLOCK(inp);
12748 SCTP_INP_INCR_REF(inp);
12749 SCTP_INP_WUNLOCK(inp);
12750 /* With the lock applied look again */
12751 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12752 #if defined(INET) || defined(INET6)
12753 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12754 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12757 if (stcb == NULL) {
12758 SCTP_INP_WLOCK(inp);
12759 SCTP_INP_DECR_REF(inp);
12760 SCTP_INP_WUNLOCK(inp);
12767 if (t_inp != inp) {
12768 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12773 if (stcb == NULL) {
12774 if (addr == NULL) {
12775 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12779 /* We must go ahead and start the INIT process */
12782 if ((sinfo_flags & SCTP_ABORT) ||
12783 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12785 * User asks to abort a non-existant assoc,
12786 * or EOF a non-existant assoc with no data
12788 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12792 /* get an asoc/stcb struct */
12793 vrf_id = inp->def_vrf_id;
12795 if (create_lock_applied == 0) {
12796 panic("Error, should hold create lock and I don't?");
12799 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12800 inp->sctp_ep.pre_open_stream_count,
12803 SCTP_INITIALIZE_AUTH_PARAMS);
12804 if (stcb == NULL) {
12805 /* Error is setup for us in the call */
12808 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12809 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12811 * Set the connected flag so we can queue
12814 soisconnecting(so);
12817 if (create_lock_applied) {
12818 SCTP_ASOC_CREATE_UNLOCK(inp);
12819 create_lock_applied = 0;
12821 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12824 * Turn on queue only flag to prevent data from
12828 asoc = &stcb->asoc;
12829 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
12830 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12833 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12834 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
12835 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
12841 /* out with the INIT */
12842 queue_only_for_init = 1;
12844 * we may want to dig in after this call and adjust the MTU
12845 * value. It defaulted to 1500 (constant) but the ro
12846 * structure may now have an update and thus we may need to
12847 * change it BEFORE we append the message.
12851 asoc = &stcb->asoc;
12852 if (srcv == NULL) {
12853 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12854 sinfo_flags = srcv->sinfo_flags;
12855 if (flags & MSG_EOR) {
12856 sinfo_flags |= SCTP_EOR;
12858 if (flags & MSG_EOF) {
12859 sinfo_flags |= SCTP_EOF;
12862 if (sinfo_flags & SCTP_ADDR_OVER) {
12864 net = sctp_findnet(stcb, addr);
12867 if ((net == NULL) ||
12868 ((port != 0) && (port != stcb->rport))) {
12869 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12874 if (stcb->asoc.alternate) {
12875 net = stcb->asoc.alternate;
12877 net = stcb->asoc.primary_destination;
12880 atomic_add_int(&stcb->total_sends, 1);
12881 /* Keep the stcb from being freed under our feet */
12882 atomic_add_int(&asoc->refcnt, 1);
12883 free_cnt_applied = 1;
12885 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12886 if (sndlen > (ssize_t)asoc->smallest_mtu) {
12887 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12892 if (SCTP_SO_IS_NBIO(so)
12893 || (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0
12897 /* would we block? */
12898 if (non_blocking) {
12901 if (hold_tcblock == 0) {
12902 SCTP_TCB_LOCK(stcb);
12905 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
12906 if (user_marks_eor == 0) {
12911 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12912 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12913 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12914 if (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(so))
12917 error = EWOULDBLOCK;
12920 stcb->asoc.sb_send_resv += (uint32_t)sndlen;
12921 SCTP_TCB_UNLOCK(stcb);
12924 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12926 local_soresv = sndlen;
12927 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12928 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12929 error = ECONNRESET;
12932 if (create_lock_applied) {
12933 SCTP_ASOC_CREATE_UNLOCK(inp);
12934 create_lock_applied = 0;
12936 /* Is the stream no. valid? */
12937 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12938 /* Invalid stream number */
12939 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12943 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12944 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12946 * Can't queue any data while stream reset is underway.
12948 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12953 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12956 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12957 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
12960 /* we are now done with all control */
12962 sctp_m_freem(control);
12965 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
12966 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12967 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12968 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12969 if (sinfo_flags & SCTP_ABORT) {
12972 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12973 error = ECONNRESET;
12977 /* Ok, we will attempt a msgsnd :> */
12979 p->td_ru.ru_msgsnd++;
12981 /* Are we aborting? */
12982 if (sinfo_flags & SCTP_ABORT) {
12984 ssize_t tot_demand, tot_out = 0, max_out;
12986 SCTP_STAT_INCR(sctps_sends_with_abort);
12987 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12988 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
12989 /* It has to be up before we abort */
12990 /* how big is the user initiated abort? */
12991 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12995 if (hold_tcblock) {
12996 SCTP_TCB_UNLOCK(stcb);
13000 struct mbuf *cntm = NULL;
13002 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
13004 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
13005 tot_out += SCTP_BUF_LEN(cntm);
13009 /* Must fit in a MTU */
13011 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
13012 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
13014 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13018 mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
13021 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
13025 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
13026 max_out -= sizeof(struct sctp_abort_msg);
13027 if (tot_out > max_out) {
13031 struct sctp_paramhdr *ph;
13033 /* now move forward the data pointer */
13034 ph = mtod(mm, struct sctp_paramhdr *);
13035 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
13036 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
13038 SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
13040 error = uiomove((caddr_t)ph, (int)tot_out, uio);
13043 * Here if we can't get his data we
13044 * still abort we just don't get to
13045 * send the users note :-0
13052 SCTP_BUF_NEXT(mm) = top;
13056 if (hold_tcblock == 0) {
13057 SCTP_TCB_LOCK(stcb);
13059 atomic_add_int(&stcb->asoc.refcnt, -1);
13060 free_cnt_applied = 0;
13061 /* release this lock, otherwise we hang on ourselves */
13062 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13063 /* now relock the stcb so everything is sane */
13067 * In this case top is already chained to mm avoid double
13068 * free, since we free it below if top != NULL and driver
13069 * would free it after sending the packet out
13076 /* Calculate the maximum we can send */
13077 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13078 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13079 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13083 if (hold_tcblock) {
13084 SCTP_TCB_UNLOCK(stcb);
13087 if (asoc->strmout == NULL) {
13088 /* huh? software error */
13089 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13094 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13095 if ((user_marks_eor == 0) &&
13096 (sndlen > (ssize_t)SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13097 /* It will NEVER fit */
13098 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13102 if ((uio == NULL) && user_marks_eor) {
13104 * We do not support eeor mode for
13105 * sending with mbuf chains (like sendfile).
13107 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13112 if (user_marks_eor) {
13113 local_add_more = (ssize_t)min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13116 * For non-eeor the whole message must fit in
13117 * the socket send buffer.
13119 local_add_more = sndlen;
13122 if (non_blocking) {
13123 goto skip_preblock;
13125 if (((max_len <= local_add_more) &&
13126 ((ssize_t)SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13128 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13129 /* No room right now ! */
13130 SOCKBUF_LOCK(&so->so_snd);
13131 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13132 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13133 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13134 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %zd) || (%d+%d > %d)\n",
13135 (unsigned int)SCTP_SB_LIMIT_SND(so),
13138 stcb->asoc.stream_queue_cnt,
13139 stcb->asoc.chunks_on_out_queue,
13140 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13142 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13145 stcb->block_entry = &be;
13146 error = sbwait(&so->so_snd);
13147 stcb->block_entry = NULL;
13148 if (error || so->so_error || be.error) {
13151 error = so->so_error;
13156 SOCKBUF_UNLOCK(&so->so_snd);
13159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13160 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13161 asoc, stcb->asoc.total_output_queue_size);
13163 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13164 SOCKBUF_UNLOCK(&so->so_snd);
13167 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13169 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13170 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13174 SOCKBUF_UNLOCK(&so->so_snd);
13178 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13182 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13183 * case NOTE: uio will be null when top/mbuf is passed
13186 if (sinfo_flags & SCTP_EOF) {
13187 got_all_of_the_send = 1;
13190 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13196 struct sctp_stream_queue_pending *sp;
13197 struct sctp_stream_out *strm;
13200 SCTP_TCB_SEND_LOCK(stcb);
13201 if ((asoc->stream_locked) &&
13202 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13203 SCTP_TCB_SEND_UNLOCK(stcb);
13204 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13208 SCTP_TCB_SEND_UNLOCK(stcb);
13210 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13211 if (strm->last_msg_incomplete == 0) {
13213 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13217 SCTP_TCB_SEND_LOCK(stcb);
13218 if (sp->msg_is_complete) {
13219 strm->last_msg_incomplete = 0;
13220 asoc->stream_locked = 0;
13223 * Just got locked to this guy in case of an
13226 strm->last_msg_incomplete = 1;
13227 if (stcb->asoc.idata_supported == 0) {
13228 asoc->stream_locked = 1;
13229 asoc->stream_locked_on = srcv->sinfo_stream;
13231 sp->sender_all_done = 0;
13233 sctp_snd_sb_alloc(stcb, sp->length);
13234 atomic_add_int(&asoc->stream_queue_cnt, 1);
13235 if (sinfo_flags & SCTP_UNORDERED) {
13236 SCTP_STAT_INCR(sctps_sends_with_unord);
13238 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13239 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13240 SCTP_TCB_SEND_UNLOCK(stcb);
13242 SCTP_TCB_SEND_LOCK(stcb);
13243 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13244 SCTP_TCB_SEND_UNLOCK(stcb);
13246 /* ???? Huh ??? last msg is gone */
13248 panic("Warning: Last msg marked incomplete, yet nothing left?");
13250 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13251 strm->last_msg_incomplete = 0;
13257 while (uio->uio_resid > 0) {
13258 /* How much room do we have? */
13259 struct mbuf *new_tail, *mm;
13261 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13262 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13263 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13267 if ((max_len > (ssize_t)SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13268 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13269 (uio->uio_resid && (uio->uio_resid <= max_len))) {
13272 if (hold_tcblock) {
13273 SCTP_TCB_UNLOCK(stcb);
13276 mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
13277 if ((mm == NULL) || error) {
13283 /* Update the mbuf and count */
13284 SCTP_TCB_SEND_LOCK(stcb);
13285 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13287 * we need to get out. Peer probably
13291 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13292 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13293 error = ECONNRESET;
13295 SCTP_TCB_SEND_UNLOCK(stcb);
13298 if (sp->tail_mbuf) {
13299 /* tack it to the end */
13300 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13301 sp->tail_mbuf = new_tail;
13303 /* A stolen mbuf */
13305 sp->tail_mbuf = new_tail;
13307 sctp_snd_sb_alloc(stcb, sndout);
13308 atomic_add_int(&sp->length, sndout);
13310 if (sinfo_flags & SCTP_SACK_IMMEDIATELY) {
13311 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
13314 /* Did we reach EOR? */
13315 if ((uio->uio_resid == 0) &&
13316 ((user_marks_eor == 0) ||
13317 (sinfo_flags & SCTP_EOF) ||
13318 (user_marks_eor && (sinfo_flags & SCTP_EOR)))) {
13319 sp->msg_is_complete = 1;
13321 sp->msg_is_complete = 0;
13323 SCTP_TCB_SEND_UNLOCK(stcb);
13325 if (uio->uio_resid == 0) {
13330 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13332 * This is ugly but we must assure locking
13335 if (hold_tcblock == 0) {
13336 SCTP_TCB_LOCK(stcb);
13339 sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
13340 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13341 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13342 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13348 SCTP_TCB_UNLOCK(stcb);
13351 /* wait for space now */
13352 if (non_blocking) {
13353 /* Non-blocking io in place out */
13356 /* What about the INIT, send it maybe */
13357 if (queue_only_for_init) {
13358 if (hold_tcblock == 0) {
13359 SCTP_TCB_LOCK(stcb);
13362 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13363 /* a collision took us forward? */
13366 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13367 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13371 if ((net->flight_size > net->cwnd) &&
13372 (asoc->sctp_cmt_on_off == 0)) {
13373 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13375 } else if (asoc->ifp_had_enobuf) {
13376 SCTP_STAT_INCR(sctps_ifnomemqueued);
13377 if (net->flight_size > (2 * net->mtu)) {
13380 asoc->ifp_had_enobuf = 0;
13382 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13383 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13384 (stcb->asoc.total_flight > 0) &&
13385 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13386 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13389 * Ok, Nagle is set on and we have data outstanding.
13390 * Don't send anything and let SACKs drive out the
13391 * data unless we have a "full" segment to send.
13393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13394 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13396 SCTP_STAT_INCR(sctps_naglequeued);
13399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13400 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13401 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13403 SCTP_STAT_INCR(sctps_naglesent);
13406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13408 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13409 nagle_applies, un_sent);
13410 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13411 stcb->asoc.total_flight,
13412 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13414 if (queue_only_for_init)
13415 queue_only_for_init = 0;
13416 if ((queue_only == 0) && (nagle_applies == 0)) {
13418 * need to start chunk output
13419 * before blocking.. note that if
13420 * a lock is already applied, then
13421 * the input via the net is happening
13422 * and I don't need to start output :-D
13424 if (hold_tcblock == 0) {
13425 if (SCTP_TCB_TRYLOCK(stcb)) {
13427 sctp_chunk_output(inp,
13429 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13432 sctp_chunk_output(inp,
13434 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13437 if (hold_tcblock == 1) {
13438 SCTP_TCB_UNLOCK(stcb);
13441 SOCKBUF_LOCK(&so->so_snd);
13443 * This is a bit strange, but I think it will
13444 * work. The total_output_queue_size is locked and
13445 * protected by the TCB_LOCK, which we just released.
13446 * There is a race that can occur between releasing it
13447 * above, and me getting the socket lock, where sacks
13448 * come in but we have not put the SB_WAIT on the
13449 * so_snd buffer to get the wakeup. After the LOCK
13450 * is applied the sack_processing will also need to
13451 * LOCK the so->so_snd to do the actual sowwakeup(). So
13452 * once we have the socket buffer lock if we recheck the
13453 * size we KNOW we will get to sleep safely with the
13454 * wakeup flag in place.
13456 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13457 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
13458 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13460 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13461 asoc, uio->uio_resid);
13464 stcb->block_entry = &be;
13465 error = sbwait(&so->so_snd);
13466 stcb->block_entry = NULL;
13468 if (error || so->so_error || be.error) {
13471 error = so->so_error;
13476 SOCKBUF_UNLOCK(&so->so_snd);
13480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13481 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13482 asoc, stcb->asoc.total_output_queue_size);
13485 SOCKBUF_UNLOCK(&so->so_snd);
13486 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13490 SCTP_TCB_SEND_LOCK(stcb);
13491 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13492 SCTP_TCB_SEND_UNLOCK(stcb);
13496 if (sp->msg_is_complete == 0) {
13497 strm->last_msg_incomplete = 1;
13498 if (stcb->asoc.idata_supported == 0) {
13499 asoc->stream_locked = 1;
13500 asoc->stream_locked_on = srcv->sinfo_stream;
13503 sp->sender_all_done = 1;
13504 strm->last_msg_incomplete = 0;
13505 asoc->stream_locked = 0;
13508 SCTP_PRINTF("Huh no sp TSNH?\n");
13509 strm->last_msg_incomplete = 0;
13510 asoc->stream_locked = 0;
13512 SCTP_TCB_SEND_UNLOCK(stcb);
13513 if (uio->uio_resid == 0) {
13514 got_all_of_the_send = 1;
13517 /* We send in a 0, since we do NOT have any locks */
13518 error = sctp_msg_append(stcb, net, top, srcv, 0);
13520 if (sinfo_flags & SCTP_EOF) {
13522 * This should only happen for Panda for the mbuf
13523 * send case, which does NOT yet support EEOR mode.
13524 * Thus, we can just set this flag to do the proper
13527 got_all_of_the_send = 1;
13535 if ((sinfo_flags & SCTP_EOF) &&
13536 (got_all_of_the_send == 1)) {
13537 SCTP_STAT_INCR(sctps_sends_with_eof);
13539 if (hold_tcblock == 0) {
13540 SCTP_TCB_LOCK(stcb);
13543 if (TAILQ_EMPTY(&asoc->send_queue) &&
13544 TAILQ_EMPTY(&asoc->sent_queue) &&
13545 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
13546 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13549 /* there is nothing queued to send, so I'm done... */
13550 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
13551 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13552 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13553 struct sctp_nets *netp;
13555 /* only send SHUTDOWN the first time through */
13556 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13557 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13559 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
13560 sctp_stop_timers_for_shutdown(stcb);
13561 if (stcb->asoc.alternate) {
13562 netp = stcb->asoc.alternate;
13564 netp = stcb->asoc.primary_destination;
13566 sctp_send_shutdown(stcb, netp);
13567 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13569 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13570 asoc->primary_destination);
13574 * we still got (or just got) data to send, so set
13578 * XXX sockets draft says that SCTP_EOF should be
13579 * sent with no data. currently, we will allow user
13580 * data to be sent first and move to
13583 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
13584 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13585 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13586 if (hold_tcblock == 0) {
13587 SCTP_TCB_LOCK(stcb);
13590 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13591 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
13593 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
13594 if (TAILQ_EMPTY(&asoc->send_queue) &&
13595 TAILQ_EMPTY(&asoc->sent_queue) &&
13596 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13597 struct mbuf *op_err;
13598 char msg[SCTP_DIAG_INFO_LEN];
13601 if (free_cnt_applied) {
13602 atomic_add_int(&stcb->asoc.refcnt, -1);
13603 free_cnt_applied = 0;
13605 snprintf(msg, sizeof(msg),
13606 "%s:%d at %s", __FILE__, __LINE__, __func__);
13607 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13609 sctp_abort_an_association(stcb->sctp_ep, stcb,
13610 op_err, SCTP_SO_LOCKED);
13612 * now relock the stcb so everything
13619 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13620 asoc->primary_destination);
13621 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13626 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13627 some_on_control = 1;
13629 if (queue_only_for_init) {
13630 if (hold_tcblock == 0) {
13631 SCTP_TCB_LOCK(stcb);
13634 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13635 /* a collision took us forward? */
13638 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13639 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13643 if ((net->flight_size > net->cwnd) &&
13644 (stcb->asoc.sctp_cmt_on_off == 0)) {
13645 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13647 } else if (asoc->ifp_had_enobuf) {
13648 SCTP_STAT_INCR(sctps_ifnomemqueued);
13649 if (net->flight_size > (2 * net->mtu)) {
13652 asoc->ifp_had_enobuf = 0;
13654 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13655 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13656 (stcb->asoc.total_flight > 0) &&
13657 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13658 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13660 * Ok, Nagle is set on and we have data outstanding.
13661 * Don't send anything and let SACKs drive out the
13662 * data unless wen have a "full" segment to send.
13664 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13665 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13667 SCTP_STAT_INCR(sctps_naglequeued);
13670 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13671 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13672 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13674 SCTP_STAT_INCR(sctps_naglesent);
13677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13678 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13679 nagle_applies, un_sent);
13680 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13681 stcb->asoc.total_flight,
13682 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13684 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13685 /* we can attempt to send too. */
13686 if (hold_tcblock == 0) {
13688 * If there is activity recv'ing sacks no need to
13691 if (SCTP_TCB_TRYLOCK(stcb)) {
13692 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13696 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13698 } else if ((queue_only == 0) &&
13699 (stcb->asoc.peers_rwnd == 0) &&
13700 (stcb->asoc.total_flight == 0)) {
13701 /* We get to have a probe outstanding */
13702 if (hold_tcblock == 0) {
13704 SCTP_TCB_LOCK(stcb);
13706 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13707 } else if (some_on_control) {
13708 int num_out, reason, frag_point;
13710 /* Here we do control only */
13711 if (hold_tcblock == 0) {
13713 SCTP_TCB_LOCK(stcb);
13715 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13716 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13717 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13719 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13720 queue_only, stcb->asoc.peers_rwnd, un_sent,
13721 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13722 stcb->asoc.total_output_queue_size, error);
13727 if (local_soresv && stcb) {
13728 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13730 if (create_lock_applied) {
13731 SCTP_ASOC_CREATE_UNLOCK(inp);
13733 if ((stcb) && hold_tcblock) {
13734 SCTP_TCB_UNLOCK(stcb);
13736 if (stcb && free_cnt_applied) {
13737 atomic_add_int(&stcb->asoc.refcnt, -1);
13741 if (mtx_owned(&stcb->tcb_mtx)) {
13742 panic("Leaving with tcb mtx owned?");
13744 if (mtx_owned(&stcb->tcb_send_mtx)) {
13745 panic("Leaving with tcb send mtx owned?");
13753 sctp_m_freem(control);
13760 * generate an AUTHentication chunk, if required
13763 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13764 struct sctp_auth_chunk **auth_ret, uint32_t *offset,
13765 struct sctp_tcb *stcb, uint8_t chunk)
13767 struct mbuf *m_auth;
13768 struct sctp_auth_chunk *auth;
13772 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13776 if (stcb->asoc.auth_supported == 0) {
13779 /* does the requested chunk require auth? */
13780 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13783 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13784 if (m_auth == NULL) {
13788 /* reserve some space if this will be the first mbuf */
13790 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13791 /* fill in the AUTH chunk details */
13792 auth = mtod(m_auth, struct sctp_auth_chunk *);
13793 memset(auth, 0, sizeof(*auth));
13794 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13795 auth->ch.chunk_flags = 0;
13796 chunk_len = sizeof(*auth) +
13797 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13798 auth->ch.chunk_length = htons(chunk_len);
13799 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13800 /* key id and hmac digest will be computed and filled in upon send */
13802 /* save the offset where the auth was inserted into the chain */
13804 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13805 *offset += SCTP_BUF_LEN(cn);
13808 /* update length and return pointer to the auth chunk */
13809 SCTP_BUF_LEN(m_auth) = chunk_len;
13810 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13811 if (auth_ret != NULL)
13819 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
13821 struct nd_prefix *pfx = NULL;
13822 struct nd_pfxrouter *pfxrtr = NULL;
13823 struct sockaddr_in6 gw6;
13825 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13828 /* get prefix entry of address */
13830 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13831 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13833 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13834 &src6->sin6_addr, &pfx->ndpr_mask))
13837 /* no prefix entry in the prefix list */
13840 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13841 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13845 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13846 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13848 /* search installed gateway from prefix entry */
13849 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13850 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13851 gw6.sin6_family = AF_INET6;
13852 gw6.sin6_len = sizeof(struct sockaddr_in6);
13853 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13854 sizeof(struct in6_addr));
13855 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13856 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13857 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13858 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13859 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
13861 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13866 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13872 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
13875 struct sockaddr_in *sin, *mask;
13876 struct ifaddr *ifa;
13877 struct in_addr srcnetaddr, gwnetaddr;
13879 if (ro == NULL || ro->ro_rt == NULL ||
13880 sifa->address.sa.sa_family != AF_INET) {
13883 ifa = (struct ifaddr *)sifa->ifa;
13884 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13885 sin = &sifa->address.sin;
13886 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13887 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13888 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13889 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13891 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13892 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13893 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13894 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13895 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13896 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {