2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #if defined(INET) || defined(INET6)
54 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <machine/in_cksum.h>
61 #define SCTP_MAX_GAPS_INARRAY 4
63 uint8_t right_edge; /* mergable on the right edge */
64 uint8_t left_edge; /* mergable on the left edge */
67 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
70 const struct sack_track sack_array[256] = {
71 {0, 0, 0, 0, /* 0x00 */
78 {1, 0, 1, 0, /* 0x01 */
85 {0, 0, 1, 0, /* 0x02 */
92 {1, 0, 1, 0, /* 0x03 */
99 {0, 0, 1, 0, /* 0x04 */
106 {1, 0, 2, 0, /* 0x05 */
113 {0, 0, 1, 0, /* 0x06 */
120 {1, 0, 1, 0, /* 0x07 */
127 {0, 0, 1, 0, /* 0x08 */
134 {1, 0, 2, 0, /* 0x09 */
141 {0, 0, 2, 0, /* 0x0a */
148 {1, 0, 2, 0, /* 0x0b */
155 {0, 0, 1, 0, /* 0x0c */
162 {1, 0, 2, 0, /* 0x0d */
169 {0, 0, 1, 0, /* 0x0e */
176 {1, 0, 1, 0, /* 0x0f */
183 {0, 0, 1, 0, /* 0x10 */
190 {1, 0, 2, 0, /* 0x11 */
197 {0, 0, 2, 0, /* 0x12 */
204 {1, 0, 2, 0, /* 0x13 */
211 {0, 0, 2, 0, /* 0x14 */
218 {1, 0, 3, 0, /* 0x15 */
225 {0, 0, 2, 0, /* 0x16 */
232 {1, 0, 2, 0, /* 0x17 */
239 {0, 0, 1, 0, /* 0x18 */
246 {1, 0, 2, 0, /* 0x19 */
253 {0, 0, 2, 0, /* 0x1a */
260 {1, 0, 2, 0, /* 0x1b */
267 {0, 0, 1, 0, /* 0x1c */
274 {1, 0, 2, 0, /* 0x1d */
281 {0, 0, 1, 0, /* 0x1e */
288 {1, 0, 1, 0, /* 0x1f */
295 {0, 0, 1, 0, /* 0x20 */
302 {1, 0, 2, 0, /* 0x21 */
309 {0, 0, 2, 0, /* 0x22 */
316 {1, 0, 2, 0, /* 0x23 */
323 {0, 0, 2, 0, /* 0x24 */
330 {1, 0, 3, 0, /* 0x25 */
337 {0, 0, 2, 0, /* 0x26 */
344 {1, 0, 2, 0, /* 0x27 */
351 {0, 0, 2, 0, /* 0x28 */
358 {1, 0, 3, 0, /* 0x29 */
365 {0, 0, 3, 0, /* 0x2a */
372 {1, 0, 3, 0, /* 0x2b */
379 {0, 0, 2, 0, /* 0x2c */
386 {1, 0, 3, 0, /* 0x2d */
393 {0, 0, 2, 0, /* 0x2e */
400 {1, 0, 2, 0, /* 0x2f */
407 {0, 0, 1, 0, /* 0x30 */
414 {1, 0, 2, 0, /* 0x31 */
421 {0, 0, 2, 0, /* 0x32 */
428 {1, 0, 2, 0, /* 0x33 */
435 {0, 0, 2, 0, /* 0x34 */
442 {1, 0, 3, 0, /* 0x35 */
449 {0, 0, 2, 0, /* 0x36 */
456 {1, 0, 2, 0, /* 0x37 */
463 {0, 0, 1, 0, /* 0x38 */
470 {1, 0, 2, 0, /* 0x39 */
477 {0, 0, 2, 0, /* 0x3a */
484 {1, 0, 2, 0, /* 0x3b */
491 {0, 0, 1, 0, /* 0x3c */
498 {1, 0, 2, 0, /* 0x3d */
505 {0, 0, 1, 0, /* 0x3e */
512 {1, 0, 1, 0, /* 0x3f */
519 {0, 0, 1, 0, /* 0x40 */
526 {1, 0, 2, 0, /* 0x41 */
533 {0, 0, 2, 0, /* 0x42 */
540 {1, 0, 2, 0, /* 0x43 */
547 {0, 0, 2, 0, /* 0x44 */
554 {1, 0, 3, 0, /* 0x45 */
561 {0, 0, 2, 0, /* 0x46 */
568 {1, 0, 2, 0, /* 0x47 */
575 {0, 0, 2, 0, /* 0x48 */
582 {1, 0, 3, 0, /* 0x49 */
589 {0, 0, 3, 0, /* 0x4a */
596 {1, 0, 3, 0, /* 0x4b */
603 {0, 0, 2, 0, /* 0x4c */
610 {1, 0, 3, 0, /* 0x4d */
617 {0, 0, 2, 0, /* 0x4e */
624 {1, 0, 2, 0, /* 0x4f */
631 {0, 0, 2, 0, /* 0x50 */
638 {1, 0, 3, 0, /* 0x51 */
645 {0, 0, 3, 0, /* 0x52 */
652 {1, 0, 3, 0, /* 0x53 */
659 {0, 0, 3, 0, /* 0x54 */
666 {1, 0, 4, 0, /* 0x55 */
673 {0, 0, 3, 0, /* 0x56 */
680 {1, 0, 3, 0, /* 0x57 */
687 {0, 0, 2, 0, /* 0x58 */
694 {1, 0, 3, 0, /* 0x59 */
701 {0, 0, 3, 0, /* 0x5a */
708 {1, 0, 3, 0, /* 0x5b */
715 {0, 0, 2, 0, /* 0x5c */
722 {1, 0, 3, 0, /* 0x5d */
729 {0, 0, 2, 0, /* 0x5e */
736 {1, 0, 2, 0, /* 0x5f */
743 {0, 0, 1, 0, /* 0x60 */
750 {1, 0, 2, 0, /* 0x61 */
757 {0, 0, 2, 0, /* 0x62 */
764 {1, 0, 2, 0, /* 0x63 */
771 {0, 0, 2, 0, /* 0x64 */
778 {1, 0, 3, 0, /* 0x65 */
785 {0, 0, 2, 0, /* 0x66 */
792 {1, 0, 2, 0, /* 0x67 */
799 {0, 0, 2, 0, /* 0x68 */
806 {1, 0, 3, 0, /* 0x69 */
813 {0, 0, 3, 0, /* 0x6a */
820 {1, 0, 3, 0, /* 0x6b */
827 {0, 0, 2, 0, /* 0x6c */
834 {1, 0, 3, 0, /* 0x6d */
841 {0, 0, 2, 0, /* 0x6e */
848 {1, 0, 2, 0, /* 0x6f */
855 {0, 0, 1, 0, /* 0x70 */
862 {1, 0, 2, 0, /* 0x71 */
869 {0, 0, 2, 0, /* 0x72 */
876 {1, 0, 2, 0, /* 0x73 */
883 {0, 0, 2, 0, /* 0x74 */
890 {1, 0, 3, 0, /* 0x75 */
897 {0, 0, 2, 0, /* 0x76 */
904 {1, 0, 2, 0, /* 0x77 */
911 {0, 0, 1, 0, /* 0x78 */
918 {1, 0, 2, 0, /* 0x79 */
925 {0, 0, 2, 0, /* 0x7a */
932 {1, 0, 2, 0, /* 0x7b */
939 {0, 0, 1, 0, /* 0x7c */
946 {1, 0, 2, 0, /* 0x7d */
953 {0, 0, 1, 0, /* 0x7e */
960 {1, 0, 1, 0, /* 0x7f */
967 {0, 1, 1, 0, /* 0x80 */
974 {1, 1, 2, 0, /* 0x81 */
981 {0, 1, 2, 0, /* 0x82 */
988 {1, 1, 2, 0, /* 0x83 */
995 {0, 1, 2, 0, /* 0x84 */
1002 {1, 1, 3, 0, /* 0x85 */
1009 {0, 1, 2, 0, /* 0x86 */
1016 {1, 1, 2, 0, /* 0x87 */
1023 {0, 1, 2, 0, /* 0x88 */
1030 {1, 1, 3, 0, /* 0x89 */
1037 {0, 1, 3, 0, /* 0x8a */
1044 {1, 1, 3, 0, /* 0x8b */
1051 {0, 1, 2, 0, /* 0x8c */
1058 {1, 1, 3, 0, /* 0x8d */
1065 {0, 1, 2, 0, /* 0x8e */
1072 {1, 1, 2, 0, /* 0x8f */
1079 {0, 1, 2, 0, /* 0x90 */
1086 {1, 1, 3, 0, /* 0x91 */
1093 {0, 1, 3, 0, /* 0x92 */
1100 {1, 1, 3, 0, /* 0x93 */
1107 {0, 1, 3, 0, /* 0x94 */
1114 {1, 1, 4, 0, /* 0x95 */
1121 {0, 1, 3, 0, /* 0x96 */
1128 {1, 1, 3, 0, /* 0x97 */
1135 {0, 1, 2, 0, /* 0x98 */
1142 {1, 1, 3, 0, /* 0x99 */
1149 {0, 1, 3, 0, /* 0x9a */
1156 {1, 1, 3, 0, /* 0x9b */
1163 {0, 1, 2, 0, /* 0x9c */
1170 {1, 1, 3, 0, /* 0x9d */
1177 {0, 1, 2, 0, /* 0x9e */
1184 {1, 1, 2, 0, /* 0x9f */
1191 {0, 1, 2, 0, /* 0xa0 */
1198 {1, 1, 3, 0, /* 0xa1 */
1205 {0, 1, 3, 0, /* 0xa2 */
1212 {1, 1, 3, 0, /* 0xa3 */
1219 {0, 1, 3, 0, /* 0xa4 */
1226 {1, 1, 4, 0, /* 0xa5 */
1233 {0, 1, 3, 0, /* 0xa6 */
1240 {1, 1, 3, 0, /* 0xa7 */
1247 {0, 1, 3, 0, /* 0xa8 */
1254 {1, 1, 4, 0, /* 0xa9 */
1261 {0, 1, 4, 0, /* 0xaa */
1268 {1, 1, 4, 0, /* 0xab */
1275 {0, 1, 3, 0, /* 0xac */
1282 {1, 1, 4, 0, /* 0xad */
1289 {0, 1, 3, 0, /* 0xae */
1296 {1, 1, 3, 0, /* 0xaf */
1303 {0, 1, 2, 0, /* 0xb0 */
1310 {1, 1, 3, 0, /* 0xb1 */
1317 {0, 1, 3, 0, /* 0xb2 */
1324 {1, 1, 3, 0, /* 0xb3 */
1331 {0, 1, 3, 0, /* 0xb4 */
1338 {1, 1, 4, 0, /* 0xb5 */
1345 {0, 1, 3, 0, /* 0xb6 */
1352 {1, 1, 3, 0, /* 0xb7 */
1359 {0, 1, 2, 0, /* 0xb8 */
1366 {1, 1, 3, 0, /* 0xb9 */
1373 {0, 1, 3, 0, /* 0xba */
1380 {1, 1, 3, 0, /* 0xbb */
1387 {0, 1, 2, 0, /* 0xbc */
1394 {1, 1, 3, 0, /* 0xbd */
1401 {0, 1, 2, 0, /* 0xbe */
1408 {1, 1, 2, 0, /* 0xbf */
1415 {0, 1, 1, 0, /* 0xc0 */
1422 {1, 1, 2, 0, /* 0xc1 */
1429 {0, 1, 2, 0, /* 0xc2 */
1436 {1, 1, 2, 0, /* 0xc3 */
1443 {0, 1, 2, 0, /* 0xc4 */
1450 {1, 1, 3, 0, /* 0xc5 */
1457 {0, 1, 2, 0, /* 0xc6 */
1464 {1, 1, 2, 0, /* 0xc7 */
1471 {0, 1, 2, 0, /* 0xc8 */
1478 {1, 1, 3, 0, /* 0xc9 */
1485 {0, 1, 3, 0, /* 0xca */
1492 {1, 1, 3, 0, /* 0xcb */
1499 {0, 1, 2, 0, /* 0xcc */
1506 {1, 1, 3, 0, /* 0xcd */
1513 {0, 1, 2, 0, /* 0xce */
1520 {1, 1, 2, 0, /* 0xcf */
1527 {0, 1, 2, 0, /* 0xd0 */
1534 {1, 1, 3, 0, /* 0xd1 */
1541 {0, 1, 3, 0, /* 0xd2 */
1548 {1, 1, 3, 0, /* 0xd3 */
1555 {0, 1, 3, 0, /* 0xd4 */
1562 {1, 1, 4, 0, /* 0xd5 */
1569 {0, 1, 3, 0, /* 0xd6 */
1576 {1, 1, 3, 0, /* 0xd7 */
1583 {0, 1, 2, 0, /* 0xd8 */
1590 {1, 1, 3, 0, /* 0xd9 */
1597 {0, 1, 3, 0, /* 0xda */
1604 {1, 1, 3, 0, /* 0xdb */
1611 {0, 1, 2, 0, /* 0xdc */
1618 {1, 1, 3, 0, /* 0xdd */
1625 {0, 1, 2, 0, /* 0xde */
1632 {1, 1, 2, 0, /* 0xdf */
1639 {0, 1, 1, 0, /* 0xe0 */
1646 {1, 1, 2, 0, /* 0xe1 */
1653 {0, 1, 2, 0, /* 0xe2 */
1660 {1, 1, 2, 0, /* 0xe3 */
1667 {0, 1, 2, 0, /* 0xe4 */
1674 {1, 1, 3, 0, /* 0xe5 */
1681 {0, 1, 2, 0, /* 0xe6 */
1688 {1, 1, 2, 0, /* 0xe7 */
1695 {0, 1, 2, 0, /* 0xe8 */
1702 {1, 1, 3, 0, /* 0xe9 */
1709 {0, 1, 3, 0, /* 0xea */
1716 {1, 1, 3, 0, /* 0xeb */
1723 {0, 1, 2, 0, /* 0xec */
1730 {1, 1, 3, 0, /* 0xed */
1737 {0, 1, 2, 0, /* 0xee */
1744 {1, 1, 2, 0, /* 0xef */
1751 {0, 1, 1, 0, /* 0xf0 */
1758 {1, 1, 2, 0, /* 0xf1 */
1765 {0, 1, 2, 0, /* 0xf2 */
1772 {1, 1, 2, 0, /* 0xf3 */
1779 {0, 1, 2, 0, /* 0xf4 */
1786 {1, 1, 3, 0, /* 0xf5 */
1793 {0, 1, 2, 0, /* 0xf6 */
1800 {1, 1, 2, 0, /* 0xf7 */
1807 {0, 1, 1, 0, /* 0xf8 */
1814 {1, 1, 2, 0, /* 0xf9 */
1821 {0, 1, 2, 0, /* 0xfa */
1828 {1, 1, 2, 0, /* 0xfb */
1835 {0, 1, 1, 0, /* 0xfc */
1842 {1, 1, 2, 0, /* 0xfd */
1849 {0, 1, 1, 0, /* 0xfe */
1856 {1, 1, 1, 0, /* 0xff */
1867 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1868 struct sctp_scoping *scope,
1871 if ((scope->loopback_scope == 0) &&
1872 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1874 * skip loopback if not in scope *
1878 switch (ifa->address.sa.sa_family) {
1881 if (scope->ipv4_addr_legal) {
1882 struct sockaddr_in *sin;
1884 sin = &ifa->address.sin;
1885 if (sin->sin_addr.s_addr == 0) {
1886 /* not in scope , unspecified */
1889 if ((scope->ipv4_local_scope == 0) &&
1890 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1891 /* private address not in scope */
1901 if (scope->ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1909 sctp_gather_internal_ifa_flags(ifa);
1911 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1914 /* ok to use deprecated addresses? */
1915 sin6 = &ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1924 if ((scope->site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1939 static struct mbuf *
1940 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1942 #if defined(INET) || defined(INET6)
1943 struct sctp_paramhdr *paramh;
1948 switch (ifa->address.sa.sa_family) {
1951 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1956 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1962 #if defined(INET) || defined(INET6)
1963 if (M_TRAILINGSPACE(m) >= plen) {
1964 /* easy side we just drop it on the end */
1965 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1968 /* Need more space */
1970 while (SCTP_BUF_NEXT(mret) != NULL) {
1971 mret = SCTP_BUF_NEXT(mret);
1973 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1974 if (SCTP_BUF_NEXT(mret) == NULL) {
1975 /* We are hosed, can't add more addresses */
1978 mret = SCTP_BUF_NEXT(mret);
1979 paramh = mtod(mret, struct sctp_paramhdr *);
1981 /* now add the parameter */
1982 switch (ifa->address.sa.sa_family) {
1986 struct sctp_ipv4addr_param *ipv4p;
1987 struct sockaddr_in *sin;
1989 sin = &ifa->address.sin;
1990 ipv4p = (struct sctp_ipv4addr_param *)paramh;
1991 paramh->param_type = htons(SCTP_IPV4_ADDRESS);
1992 paramh->param_length = htons(plen);
1993 ipv4p->addr = sin->sin_addr.s_addr;
1994 SCTP_BUF_LEN(mret) += plen;
2001 struct sctp_ipv6addr_param *ipv6p;
2002 struct sockaddr_in6 *sin6;
2004 sin6 = &ifa->address.sin6;
2005 ipv6p = (struct sctp_ipv6addr_param *)paramh;
2006 paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2007 paramh->param_length = htons(plen);
2008 memcpy(ipv6p->addr, &sin6->sin6_addr,
2009 sizeof(ipv6p->addr));
2010 /* clear embedded scope in the address */
2011 in6_clearscope((struct in6_addr *)ipv6p->addr);
2012 SCTP_BUF_LEN(mret) += plen;
2028 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2029 struct sctp_scoping *scope,
2030 struct mbuf *m_at, int cnt_inits_to,
2031 uint16_t *padding_len, uint16_t *chunk_len)
2033 struct sctp_vrf *vrf = NULL;
2034 int cnt, limit_out = 0, total_count;
2037 vrf_id = inp->def_vrf_id;
2038 SCTP_IPI_ADDR_RLOCK();
2039 vrf = sctp_find_vrf(vrf_id);
2041 SCTP_IPI_ADDR_RUNLOCK();
2044 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2045 struct sctp_ifa *sctp_ifap;
2046 struct sctp_ifn *sctp_ifnp;
2049 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2051 cnt = SCTP_ADDRESS_LIMIT;
2054 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2055 if ((scope->loopback_scope == 0) &&
2056 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2058 * Skip loopback devices if loopback_scope
2063 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2065 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2066 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2067 &sctp_ifap->address.sin.sin_addr) != 0)) {
2072 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2073 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2074 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2078 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2081 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2085 if (cnt > SCTP_ADDRESS_LIMIT) {
2089 if (cnt > SCTP_ADDRESS_LIMIT) {
2096 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2098 if ((scope->loopback_scope == 0) &&
2099 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2101 * Skip loopback devices if
2102 * loopback_scope not set
2106 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2108 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2109 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2110 &sctp_ifap->address.sin.sin_addr) != 0)) {
2115 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2116 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2117 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2121 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2124 if (sctp_is_address_in_scope(sctp_ifap,
2128 if ((chunk_len != NULL) &&
2129 (padding_len != NULL) &&
2130 (*padding_len > 0)) {
2131 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2132 SCTP_BUF_LEN(m_at) += *padding_len;
2133 *chunk_len += *padding_len;
2136 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2147 if (total_count > SCTP_ADDRESS_LIMIT) {
2148 /* No more addresses */
2156 struct sctp_laddr *laddr;
2159 /* First, how many ? */
2160 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2161 if (laddr->ifa == NULL) {
2164 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2166 * Address being deleted by the system, dont
2170 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2172 * Address being deleted on this ep don't
2177 if (sctp_is_address_in_scope(laddr->ifa,
2184 * To get through a NAT we only list addresses if we have
2185 * more than one. That way if you just bind a single address
2186 * we let the source of the init dictate our address.
2190 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2191 if (laddr->ifa == NULL) {
2194 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2197 if (sctp_is_address_in_scope(laddr->ifa,
2201 if ((chunk_len != NULL) &&
2202 (padding_len != NULL) &&
2203 (*padding_len > 0)) {
2204 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2205 SCTP_BUF_LEN(m_at) += *padding_len;
2206 *chunk_len += *padding_len;
2209 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2211 if (cnt >= SCTP_ADDRESS_LIMIT) {
2217 SCTP_IPI_ADDR_RUNLOCK();
2221 static struct sctp_ifa *
2222 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2223 uint8_t dest_is_loop,
2224 uint8_t dest_is_priv,
2227 uint8_t dest_is_global = 0;
2229 /* dest_is_priv is true if destination is a private address */
2230 /* dest_is_loop is true if destination is a loopback addresses */
2233 * Here we determine if its a preferred address. A preferred address
2234 * means it is the same scope or higher scope then the destination.
2235 * L = loopback, P = private, G = global
2236 * -----------------------------------------
2237 * src | dest | result
2238 * ----------------------------------------
2240 * -----------------------------------------
2241 * P | L | yes-v4 no-v6
2242 * -----------------------------------------
2243 * G | L | yes-v4 no-v6
2244 * -----------------------------------------
2246 * -----------------------------------------
2248 * -----------------------------------------
2250 * -----------------------------------------
2252 * -----------------------------------------
2254 * -----------------------------------------
2256 * -----------------------------------------
2259 if (ifa->address.sa.sa_family != fam) {
2260 /* forget mis-matched family */
2263 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2266 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2267 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2268 /* Ok the address may be ok */
2270 if (fam == AF_INET6) {
2271 /* ok to use deprecated addresses? no lets not! */
2272 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2273 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2276 if (ifa->src_is_priv && !ifa->src_is_loop) {
2278 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2282 if (ifa->src_is_glob) {
2284 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2291 * Now that we know what is what, implement or table this could in
2292 * theory be done slicker (it used to be), but this is
2293 * straightforward and easier to validate :-)
2295 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2296 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2297 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2298 dest_is_loop, dest_is_priv, dest_is_global);
2300 if ((ifa->src_is_loop) && (dest_is_priv)) {
2301 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2304 if ((ifa->src_is_glob) && (dest_is_priv)) {
2305 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2308 if ((ifa->src_is_loop) && (dest_is_global)) {
2309 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2312 if ((ifa->src_is_priv) && (dest_is_global)) {
2313 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2316 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2317 /* its a preferred address */
2321 static struct sctp_ifa *
2322 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2323 uint8_t dest_is_loop,
2324 uint8_t dest_is_priv,
2327 uint8_t dest_is_global = 0;
2330 * Here we determine if its a acceptable address. A acceptable
2331 * address means it is the same scope or higher scope but we can
2332 * allow for NAT which means its ok to have a global dest and a
2335 * L = loopback, P = private, G = global
2336 * -----------------------------------------
2337 * src | dest | result
2338 * -----------------------------------------
2340 * -----------------------------------------
2341 * P | L | yes-v4 no-v6
2342 * -----------------------------------------
2344 * -----------------------------------------
2346 * -----------------------------------------
2348 * -----------------------------------------
2349 * G | P | yes - May not work
2350 * -----------------------------------------
2352 * -----------------------------------------
2353 * P | G | yes - May not work
2354 * -----------------------------------------
2356 * -----------------------------------------
2359 if (ifa->address.sa.sa_family != fam) {
2360 /* forget non matching family */
2361 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2362 ifa->address.sa.sa_family, fam);
2365 /* Ok the address may be ok */
2366 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2368 dest_is_loop, dest_is_priv);
2369 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2373 if (fam == AF_INET6) {
2374 /* ok to use deprecated addresses? */
2375 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2378 if (ifa->src_is_priv) {
2379 /* Special case, linklocal to loop */
2386 * Now that we know what is what, implement our table. This could in
2387 * theory be done slicker (it used to be), but this is
2388 * straightforward and easier to validate :-)
2390 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2393 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2396 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2399 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2402 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2403 /* its an acceptable address */
2408 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2410 struct sctp_laddr *laddr;
2413 /* There are no restrictions, no TCB :-) */
2416 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2417 if (laddr->ifa == NULL) {
2418 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2422 if (laddr->ifa == ifa) {
2423 /* Yes it is on the list */
2432 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2434 struct sctp_laddr *laddr;
2438 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2439 if (laddr->ifa == NULL) {
2440 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2444 if ((laddr->ifa == ifa) && laddr->action == 0)
2453 static struct sctp_ifa *
2454 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2457 int non_asoc_addr_ok,
2458 uint8_t dest_is_priv,
2459 uint8_t dest_is_loop,
2462 struct sctp_laddr *laddr, *starting_point;
2465 struct sctp_ifn *sctp_ifn;
2466 struct sctp_ifa *sctp_ifa, *sifa;
2467 struct sctp_vrf *vrf;
2470 vrf = sctp_find_vrf(vrf_id);
2474 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2475 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2476 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2478 * first question, is the ifn we will emit on in our list, if so, we
2479 * want such an address. Note that we first looked for a preferred
2483 /* is a preferred one on the interface we route out? */
2484 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2486 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2487 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2488 &sctp_ifa->address.sin.sin_addr) != 0)) {
2493 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2494 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2495 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2499 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2500 (non_asoc_addr_ok == 0))
2502 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2507 if (sctp_is_addr_in_ep(inp, sifa)) {
2508 atomic_add_int(&sifa->refcount, 1);
2514 * ok, now we now need to find one on the list of the addresses. We
2515 * can't get one on the emitting interface so let's find first a
2516 * preferred one. If not that an acceptable one otherwise... we
2519 starting_point = inp->next_addr_touse;
2521 if (inp->next_addr_touse == NULL) {
2522 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2525 for (laddr = inp->next_addr_touse; laddr;
2526 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2527 if (laddr->ifa == NULL) {
2528 /* address has been removed */
2531 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2532 /* address is being deleted */
2535 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2539 atomic_add_int(&sifa->refcount, 1);
2542 if (resettotop == 0) {
2543 inp->next_addr_touse = NULL;
2547 inp->next_addr_touse = starting_point;
2550 if (inp->next_addr_touse == NULL) {
2551 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2555 /* ok, what about an acceptable address in the inp */
2556 for (laddr = inp->next_addr_touse; laddr;
2557 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2558 if (laddr->ifa == NULL) {
2559 /* address has been removed */
2562 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2563 /* address is being deleted */
2566 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2570 atomic_add_int(&sifa->refcount, 1);
2573 if (resettotop == 0) {
2574 inp->next_addr_touse = NULL;
2575 goto once_again_too;
2579 * no address bound can be a source for the destination we are in
2587 static struct sctp_ifa *
2588 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2589 struct sctp_tcb *stcb,
2592 uint8_t dest_is_priv,
2593 uint8_t dest_is_loop,
2594 int non_asoc_addr_ok,
2597 struct sctp_laddr *laddr, *starting_point;
2599 struct sctp_ifn *sctp_ifn;
2600 struct sctp_ifa *sctp_ifa, *sifa;
2601 uint8_t start_at_beginning = 0;
2602 struct sctp_vrf *vrf;
2606 * first question, is the ifn we will emit on in our list, if so, we
2609 vrf = sctp_find_vrf(vrf_id);
2613 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2614 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2615 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2618 * first question, is the ifn we will emit on in our list? If so,
2619 * we want that one. First we look for a preferred. Second, we go
2620 * for an acceptable.
2623 /* first try for a preferred address on the ep */
2624 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2626 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2627 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2628 &sctp_ifa->address.sin.sin_addr) != 0)) {
2633 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2634 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2635 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2639 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2641 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2642 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2645 if (((non_asoc_addr_ok == 0) &&
2646 (sctp_is_addr_restricted(stcb, sifa))) ||
2647 (non_asoc_addr_ok &&
2648 (sctp_is_addr_restricted(stcb, sifa)) &&
2649 (!sctp_is_addr_pending(stcb, sifa)))) {
2650 /* on the no-no list */
2653 atomic_add_int(&sifa->refcount, 1);
2657 /* next try for an acceptable address on the ep */
2658 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2660 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2661 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2662 &sctp_ifa->address.sin.sin_addr) != 0)) {
2667 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2668 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2669 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2673 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2675 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2676 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2679 if (((non_asoc_addr_ok == 0) &&
2680 (sctp_is_addr_restricted(stcb, sifa))) ||
2681 (non_asoc_addr_ok &&
2682 (sctp_is_addr_restricted(stcb, sifa)) &&
2683 (!sctp_is_addr_pending(stcb, sifa)))) {
2684 /* on the no-no list */
2687 atomic_add_int(&sifa->refcount, 1);
2694 * if we can't find one like that then we must look at all addresses
2695 * bound to pick one at first preferable then secondly acceptable.
2697 starting_point = stcb->asoc.last_used_address;
2699 if (stcb->asoc.last_used_address == NULL) {
2700 start_at_beginning = 1;
2701 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2703 /* search beginning with the last used address */
2704 for (laddr = stcb->asoc.last_used_address; laddr;
2705 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2706 if (laddr->ifa == NULL) {
2707 /* address has been removed */
2710 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2711 /* address is being deleted */
2714 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2717 if (((non_asoc_addr_ok == 0) &&
2718 (sctp_is_addr_restricted(stcb, sifa))) ||
2719 (non_asoc_addr_ok &&
2720 (sctp_is_addr_restricted(stcb, sifa)) &&
2721 (!sctp_is_addr_pending(stcb, sifa)))) {
2722 /* on the no-no list */
2725 stcb->asoc.last_used_address = laddr;
2726 atomic_add_int(&sifa->refcount, 1);
2729 if (start_at_beginning == 0) {
2730 stcb->asoc.last_used_address = NULL;
2731 goto sctp_from_the_top;
2733 /* now try for any higher scope than the destination */
2734 stcb->asoc.last_used_address = starting_point;
2735 start_at_beginning = 0;
2737 if (stcb->asoc.last_used_address == NULL) {
2738 start_at_beginning = 1;
2739 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2741 /* search beginning with the last used address */
2742 for (laddr = stcb->asoc.last_used_address; laddr;
2743 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2744 if (laddr->ifa == NULL) {
2745 /* address has been removed */
2748 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2749 /* address is being deleted */
2752 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2756 if (((non_asoc_addr_ok == 0) &&
2757 (sctp_is_addr_restricted(stcb, sifa))) ||
2758 (non_asoc_addr_ok &&
2759 (sctp_is_addr_restricted(stcb, sifa)) &&
2760 (!sctp_is_addr_pending(stcb, sifa)))) {
2761 /* on the no-no list */
2764 stcb->asoc.last_used_address = laddr;
2765 atomic_add_int(&sifa->refcount, 1);
2768 if (start_at_beginning == 0) {
2769 stcb->asoc.last_used_address = NULL;
2770 goto sctp_from_the_top2;
2775 static struct sctp_ifa *
2776 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2777 struct sctp_inpcb *inp,
2778 struct sctp_tcb *stcb,
2779 int non_asoc_addr_ok,
2780 uint8_t dest_is_loop,
2781 uint8_t dest_is_priv,
2787 struct sctp_ifa *ifa, *sifa;
2788 int num_eligible_addr = 0;
2790 struct sockaddr_in6 sin6, lsa6;
2792 if (fam == AF_INET6) {
2793 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2794 (void)sa6_recoverscope(&sin6);
2797 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2799 if ((ifa->address.sa.sa_family == AF_INET) &&
2800 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2801 &ifa->address.sin.sin_addr) != 0)) {
2806 if ((ifa->address.sa.sa_family == AF_INET6) &&
2807 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2808 &ifa->address.sin6.sin6_addr) != 0)) {
2812 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2813 (non_asoc_addr_ok == 0))
2815 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2820 if (fam == AF_INET6 &&
2822 sifa->src_is_loop && sifa->src_is_priv) {
2824 * don't allow fe80::1 to be a src on loop ::1, we
2825 * don't list it to the peer so we will get an
2830 if (fam == AF_INET6 &&
2831 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2832 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2834 * link-local <-> link-local must belong to the same
2837 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2838 (void)sa6_recoverscope(&lsa6);
2839 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2846 * Check if the IPv6 address matches to next-hop. In the
2847 * mobile case, old IPv6 address may be not deleted from the
2848 * interface. Then, the interface has previous and new
2849 * addresses. We should use one corresponding to the
2850 * next-hop. (by micchie)
2853 if (stcb && fam == AF_INET6 &&
2854 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2855 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2862 /* Avoid topologically incorrect IPv4 address */
2863 if (stcb && fam == AF_INET &&
2864 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2865 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2871 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2874 if (((non_asoc_addr_ok == 0) &&
2875 (sctp_is_addr_restricted(stcb, sifa))) ||
2876 (non_asoc_addr_ok &&
2877 (sctp_is_addr_restricted(stcb, sifa)) &&
2878 (!sctp_is_addr_pending(stcb, sifa)))) {
2880 * It is restricted for some reason..
2881 * probably not yet added.
2886 if (num_eligible_addr >= addr_wanted) {
2889 num_eligible_addr++;
2896 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2897 struct sctp_inpcb *inp,
2898 struct sctp_tcb *stcb,
2899 int non_asoc_addr_ok,
2900 uint8_t dest_is_loop,
2901 uint8_t dest_is_priv,
2904 struct sctp_ifa *ifa, *sifa;
2905 int num_eligible_addr = 0;
2907 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2909 if ((ifa->address.sa.sa_family == AF_INET) &&
2910 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2911 &ifa->address.sin.sin_addr) != 0)) {
2916 if ((ifa->address.sa.sa_family == AF_INET6) &&
2918 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2919 &ifa->address.sin6.sin6_addr) != 0)) {
2923 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2924 (non_asoc_addr_ok == 0)) {
2927 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2933 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2936 if (((non_asoc_addr_ok == 0) &&
2937 (sctp_is_addr_restricted(stcb, sifa))) ||
2938 (non_asoc_addr_ok &&
2939 (sctp_is_addr_restricted(stcb, sifa)) &&
2940 (!sctp_is_addr_pending(stcb, sifa)))) {
2942 * It is restricted for some reason..
2943 * probably not yet added.
2948 num_eligible_addr++;
2950 return (num_eligible_addr);
2953 static struct sctp_ifa *
2954 sctp_choose_boundall(struct sctp_inpcb *inp,
2955 struct sctp_tcb *stcb,
2956 struct sctp_nets *net,
2959 uint8_t dest_is_priv,
2960 uint8_t dest_is_loop,
2961 int non_asoc_addr_ok,
2964 int cur_addr_num = 0, num_preferred = 0;
2966 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2967 struct sctp_ifa *sctp_ifa, *sifa;
2969 struct sctp_vrf *vrf;
2975 * For boundall we can use any address in the association.
2976 * If non_asoc_addr_ok is set we can use any address (at least in
2977 * theory). So we look for preferred addresses first. If we find one,
2978 * we use it. Otherwise we next try to get an address on the
2979 * interface, which we should be able to do (unless non_asoc_addr_ok
2980 * is false and we are routed out that way). In these cases where we
2981 * can't use the address of the interface we go through all the
2982 * ifn's looking for an address we can use and fill that in. Punting
2983 * means we send back address 0, which will probably cause problems
2984 * actually since then IP will fill in the address of the route ifn,
2985 * which means we probably already rejected it.. i.e. here comes an
2988 vrf = sctp_find_vrf(vrf_id);
2992 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2993 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2994 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2995 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2996 if (sctp_ifn == NULL) {
2997 /* ?? We don't have this guy ?? */
2998 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2999 goto bound_all_plan_b;
3001 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
3002 ifn_index, sctp_ifn->ifn_name);
3005 cur_addr_num = net->indx_of_eligible_next_to_use;
3007 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3012 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3013 num_preferred, sctp_ifn->ifn_name);
3014 if (num_preferred == 0) {
3016 * no eligible addresses, we must use some other interface
3017 * address if we can find one.
3019 goto bound_all_plan_b;
3022 * Ok we have num_eligible_addr set with how many we can use, this
3023 * may vary from call to call due to addresses being deprecated
3026 if (cur_addr_num >= num_preferred) {
3030 * select the nth address from the list (where cur_addr_num is the
3031 * nth) and 0 is the first one, 1 is the second one etc...
3033 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3035 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3036 dest_is_priv, cur_addr_num, fam, ro);
3038 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3040 atomic_add_int(&sctp_ifa->refcount, 1);
3042 /* save off where the next one we will want */
3043 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3048 * plan_b: Look at all interfaces and find a preferred address. If
3049 * no preferred fall through to plan_c.
3052 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3053 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3054 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3055 sctp_ifn->ifn_name);
3056 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3057 /* wrong base scope */
3058 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3061 if ((sctp_ifn == looked_at) && looked_at) {
3062 /* already looked at this guy */
3063 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3066 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3067 dest_is_loop, dest_is_priv, fam);
3068 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3069 "Found ifn:%p %d preferred source addresses\n",
3070 ifn, num_preferred);
3071 if (num_preferred == 0) {
3072 /* None on this interface. */
3073 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3076 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3077 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3078 num_preferred, (void *)sctp_ifn, cur_addr_num);
3081 * Ok we have num_eligible_addr set with how many we can
3082 * use, this may vary from call to call due to addresses
3083 * being deprecated etc..
3085 if (cur_addr_num >= num_preferred) {
3088 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3089 dest_is_priv, cur_addr_num, fam, ro);
3093 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3094 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3096 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3097 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3098 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3099 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3101 atomic_add_int(&sifa->refcount, 1);
3105 again_with_private_addresses_allowed:
3107 /* plan_c: do we have an acceptable address on the emit interface */
3109 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3110 if (emit_ifn == NULL) {
3111 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3114 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3115 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3117 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3118 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3119 &sctp_ifa->address.sin.sin_addr) != 0)) {
3120 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3125 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3126 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3127 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3128 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3132 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3133 (non_asoc_addr_ok == 0)) {
3134 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3137 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3140 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3144 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3145 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3149 if (((non_asoc_addr_ok == 0) &&
3150 (sctp_is_addr_restricted(stcb, sifa))) ||
3151 (non_asoc_addr_ok &&
3152 (sctp_is_addr_restricted(stcb, sifa)) &&
3153 (!sctp_is_addr_pending(stcb, sifa)))) {
3155 * It is restricted for some reason..
3156 * probably not yet added.
3158 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3163 atomic_add_int(&sifa->refcount, 1);
3168 * plan_d: We are in trouble. No preferred address on the emit
3169 * interface. And not even a preferred address on all interfaces. Go
3170 * out and see if we can find an acceptable address somewhere
3171 * amongst all interfaces.
3173 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3174 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3175 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3176 /* wrong base scope */
3179 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3181 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3182 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3183 &sctp_ifa->address.sin.sin_addr) != 0)) {
3188 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3189 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3190 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3194 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3195 (non_asoc_addr_ok == 0))
3197 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3203 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3207 if (((non_asoc_addr_ok == 0) &&
3208 (sctp_is_addr_restricted(stcb, sifa))) ||
3209 (non_asoc_addr_ok &&
3210 (sctp_is_addr_restricted(stcb, sifa)) &&
3211 (!sctp_is_addr_pending(stcb, sifa)))) {
3213 * It is restricted for some
3214 * reason.. probably not yet added.
3225 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3226 stcb->asoc.scope.ipv4_local_scope = 1;
3228 goto again_with_private_addresses_allowed;
3229 } else if (retried == 1) {
3230 stcb->asoc.scope.ipv4_local_scope = 0;
3238 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3239 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3240 /* wrong base scope */
3243 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3244 struct sctp_ifa *tmp_sifa;
3247 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3248 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3249 &sctp_ifa->address.sin.sin_addr) != 0)) {
3254 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3255 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3256 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3260 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3261 (non_asoc_addr_ok == 0))
3263 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3266 if (tmp_sifa == NULL) {
3269 if (tmp_sifa == sifa) {
3273 if (sctp_is_address_in_scope(tmp_sifa,
3274 &stcb->asoc.scope, 0) == 0) {
3277 if (((non_asoc_addr_ok == 0) &&
3278 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3279 (non_asoc_addr_ok &&
3280 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3281 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3291 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3292 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3293 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3298 atomic_add_int(&sifa->refcount, 1);
3306 /* tcb may be NULL */
3308 sctp_source_address_selection(struct sctp_inpcb *inp,
3309 struct sctp_tcb *stcb,
3311 struct sctp_nets *net,
3312 int non_asoc_addr_ok, uint32_t vrf_id)
3314 struct sctp_ifa *answer;
3315 uint8_t dest_is_priv, dest_is_loop;
3318 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3321 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3326 * - Find the route if needed, cache if I can.
3327 * - Look at interface address in route, Is it in the bound list. If so we
3328 * have the best source.
3329 * - If not we must rotate amongst the addresses.
3333 * Do we need to pay attention to scope. We can have a private address
3334 * or a global address we are sourcing or sending to. So if we draw
3336 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3338 * ------------------------------------------
3339 * source * dest * result
3340 * -----------------------------------------
3341 * <a> Private * Global * NAT
3342 * -----------------------------------------
3343 * <b> Private * Private * No problem
3344 * -----------------------------------------
3345 * <c> Global * Private * Huh, How will this work?
3346 * -----------------------------------------
3347 * <d> Global * Global * No Problem
3348 *------------------------------------------
3349 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3351 *------------------------------------------
3352 * source * dest * result
3353 * -----------------------------------------
3354 * <a> Linklocal * Global *
3355 * -----------------------------------------
3356 * <b> Linklocal * Linklocal * No problem
3357 * -----------------------------------------
3358 * <c> Global * Linklocal * Huh, How will this work?
3359 * -----------------------------------------
3360 * <d> Global * Global * No Problem
3361 *------------------------------------------
3362 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3364 * And then we add to that what happens if there are multiple addresses
3365 * assigned to an interface. Remember the ifa on a ifn is a linked
3366 * list of addresses. So one interface can have more than one IP
3367 * address. What happens if we have both a private and a global
3368 * address? Do we then use context of destination to sort out which
3369 * one is best? And what about NAT's sending P->G may get you a NAT
3370 * translation, or should you select the G thats on the interface in
3375 * - count the number of addresses on the interface.
3376 * - if it is one, no problem except case <c>.
3377 * For <a> we will assume a NAT out there.
3378 * - if there are more than one, then we need to worry about scope P
3379 * or G. We should prefer G -> G and P -> P if possible.
3380 * Then as a secondary fall back to mixed types G->P being a last
3382 * - The above all works for bound all, but bound specific we need to
3383 * use the same concept but instead only consider the bound
3384 * addresses. If the bound set is NOT assigned to the interface then
3385 * we must use rotation amongst the bound addresses..
3387 if (ro->ro_rt == NULL) {
3389 * Need a route to cache.
3391 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3393 if (ro->ro_rt == NULL) {
3396 fam = ro->ro_dst.sa_family;
3397 dest_is_priv = dest_is_loop = 0;
3398 /* Setup our scopes for the destination */
3402 /* Scope based on outbound address */
3403 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3406 /* mark it as local */
3407 net->addr_is_local = 1;
3409 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3416 /* Scope based on outbound address */
3417 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3418 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3420 * If the address is a loopback address, which
3421 * consists of "::1" OR "fe80::1%lo0", we are
3422 * loopback scope. But we don't use dest_is_priv
3423 * (link local addresses).
3427 /* mark it as local */
3428 net->addr_is_local = 1;
3430 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3436 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3437 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3438 SCTP_IPI_ADDR_RLOCK();
3439 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3443 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3444 dest_is_priv, dest_is_loop,
3445 non_asoc_addr_ok, fam);
3446 SCTP_IPI_ADDR_RUNLOCK();
3453 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3454 vrf_id, dest_is_priv,
3456 non_asoc_addr_ok, fam);
3458 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3463 SCTP_IPI_ADDR_RUNLOCK();
3468 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3471 struct sctp_sndinfo sndinfo;
3472 struct sctp_prinfo prinfo;
3473 struct sctp_authinfo authinfo;
3474 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3478 * Independent of how many mbufs, find the c_type inside the control
3479 * structure and copy out the data.
3482 tot_len = SCTP_BUF_LEN(control);
3483 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3484 rem_len = tot_len - off;
3485 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3486 /* There is not enough room for one more. */
3489 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3490 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3491 /* We dont't have a complete CMSG header. */
3494 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3495 /* We don't have the complete CMSG. */
3498 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3499 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3500 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3501 ((c_type == cmh.cmsg_type) ||
3502 ((c_type == SCTP_SNDRCV) &&
3503 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3504 (cmh.cmsg_type == SCTP_PRINFO) ||
3505 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3506 if (c_type == cmh.cmsg_type) {
3507 if (cpsize > INT_MAX) {
3510 if (cmsg_data_len < (int)cpsize) {
3513 /* It is exactly what we want. Copy it out. */
3514 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3517 struct sctp_sndrcvinfo *sndrcvinfo;
3519 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3521 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3524 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3526 switch (cmh.cmsg_type) {
3528 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3531 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3532 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3533 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3534 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3535 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3536 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3539 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3542 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3543 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3544 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3546 sndrcvinfo->sinfo_timetolive = 0;
3548 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3551 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3554 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3555 sndrcvinfo->sinfo_keynumber_valid = 1;
3556 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3569 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3572 struct sctp_initmsg initmsg;
3574 struct sockaddr_in sin;
3577 struct sockaddr_in6 sin6;
3579 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3581 tot_len = SCTP_BUF_LEN(control);
3582 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3583 rem_len = tot_len - off;
3584 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3585 /* There is not enough room for one more. */
3589 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3590 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3591 /* We dont't have a complete CMSG header. */
3595 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3596 /* We don't have the complete CMSG. */
3600 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3601 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3602 if (cmh.cmsg_level == IPPROTO_SCTP) {
3603 switch (cmh.cmsg_type) {
3605 if (cmsg_data_len < (int)sizeof(struct sctp_initmsg)) {
3609 m_copydata(control, cmsg_data_off, sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3610 if (initmsg.sinit_max_attempts)
3611 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3612 if (initmsg.sinit_num_ostreams)
3613 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3614 if (initmsg.sinit_max_instreams)
3615 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3616 if (initmsg.sinit_max_init_timeo)
3617 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3618 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3619 struct sctp_stream_out *tmp_str;
3621 #if defined(SCTP_DETAILED_STR_STATS)
3625 /* Default is NOT correct */
3626 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3627 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3628 SCTP_TCB_UNLOCK(stcb);
3629 SCTP_MALLOC(tmp_str,
3630 struct sctp_stream_out *,
3631 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3633 SCTP_TCB_LOCK(stcb);
3634 if (tmp_str != NULL) {
3635 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3636 stcb->asoc.strmout = tmp_str;
3637 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3639 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3641 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3642 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3643 stcb->asoc.strmout[i].chunks_on_queues = 0;
3644 stcb->asoc.strmout[i].next_mid_ordered = 0;
3645 stcb->asoc.strmout[i].next_mid_unordered = 0;
3646 #if defined(SCTP_DETAILED_STR_STATS)
3647 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3648 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3649 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3652 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3653 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3655 stcb->asoc.strmout[i].sid = i;
3656 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3657 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3658 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3663 case SCTP_DSTADDRV4:
3664 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3668 memset(&sin, 0, sizeof(struct sockaddr_in));
3669 sin.sin_family = AF_INET;
3670 sin.sin_len = sizeof(struct sockaddr_in);
3671 sin.sin_port = stcb->rport;
3672 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3673 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3674 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3675 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3679 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3680 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3687 case SCTP_DSTADDRV6:
3688 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3692 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3693 sin6.sin6_family = AF_INET6;
3694 sin6.sin6_len = sizeof(struct sockaddr_in6);
3695 sin6.sin6_port = stcb->rport;
3696 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3697 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3698 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3703 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3704 in6_sin6_2_sin(&sin, &sin6);
3705 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3706 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3707 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3711 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3712 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3718 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3719 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3733 static struct sctp_tcb *
3734 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3736 struct mbuf *control,
3737 struct sctp_nets **net_p,
3741 struct sctp_tcb *stcb;
3742 struct sockaddr *addr;
3744 struct sockaddr_in sin;
3747 struct sockaddr_in6 sin6;
3749 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3751 tot_len = SCTP_BUF_LEN(control);
3752 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3753 rem_len = tot_len - off;
3754 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3755 /* There is not enough room for one more. */
3759 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3760 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3761 /* We dont't have a complete CMSG header. */
3765 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3766 /* We don't have the complete CMSG. */
3770 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3771 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3772 if (cmh.cmsg_level == IPPROTO_SCTP) {
3773 switch (cmh.cmsg_type) {
3775 case SCTP_DSTADDRV4:
3776 if (cmsg_data_len < (int)sizeof(struct in_addr)) {
3780 memset(&sin, 0, sizeof(struct sockaddr_in));
3781 sin.sin_family = AF_INET;
3782 sin.sin_len = sizeof(struct sockaddr_in);
3783 sin.sin_port = port;
3784 m_copydata(control, cmsg_data_off, sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3785 addr = (struct sockaddr *)&sin;
3789 case SCTP_DSTADDRV6:
3790 if (cmsg_data_len < (int)sizeof(struct in6_addr)) {
3794 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3795 sin6.sin6_family = AF_INET6;
3796 sin6.sin6_len = sizeof(struct sockaddr_in6);
3797 sin6.sin6_port = port;
3798 m_copydata(control, cmsg_data_off, sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3800 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3801 in6_sin6_2_sin(&sin, &sin6);
3802 addr = (struct sockaddr *)&sin;
3805 addr = (struct sockaddr *)&sin6;
3813 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3823 static struct mbuf *
3824 sctp_add_cookie(struct mbuf *init, int init_offset,
3825 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3827 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3828 struct sctp_state_cookie *stc;
3829 struct sctp_paramhdr *ph;
3834 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3835 sizeof(struct sctp_paramhdr)), 0,
3836 M_NOWAIT, 1, MT_DATA);
3840 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3841 if (copy_init == NULL) {
3845 #ifdef SCTP_MBUF_LOGGING
3846 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3847 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3850 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3852 if (copy_initack == NULL) {
3854 sctp_m_freem(copy_init);
3857 #ifdef SCTP_MBUF_LOGGING
3858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3859 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3862 /* easy side we just drop it on the end */
3863 ph = mtod(mret, struct sctp_paramhdr *);
3864 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3865 sizeof(struct sctp_paramhdr);
3866 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3867 sizeof(struct sctp_paramhdr));
3868 ph->param_type = htons(SCTP_STATE_COOKIE);
3869 ph->param_length = 0; /* fill in at the end */
3870 /* Fill in the stc cookie data */
3871 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3873 /* tack the INIT and then the INIT-ACK onto the chain */
3875 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3876 cookie_sz += SCTP_BUF_LEN(m_at);
3877 if (SCTP_BUF_NEXT(m_at) == NULL) {
3878 SCTP_BUF_NEXT(m_at) = copy_init;
3882 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3883 cookie_sz += SCTP_BUF_LEN(m_at);
3884 if (SCTP_BUF_NEXT(m_at) == NULL) {
3885 SCTP_BUF_NEXT(m_at) = copy_initack;
3889 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3890 cookie_sz += SCTP_BUF_LEN(m_at);
3891 if (SCTP_BUF_NEXT(m_at) == NULL) {
3895 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3897 /* no space, so free the entire chain */
3901 SCTP_BUF_LEN(sig) = 0;
3902 SCTP_BUF_NEXT(m_at) = sig;
3904 foo = (uint8_t *)(mtod(sig, caddr_t)+sig_offset);
3905 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3907 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3908 cookie_sz += SCTP_SIGNATURE_SIZE;
3909 ph->param_length = htons(cookie_sz);
3915 sctp_get_ect(struct sctp_tcb *stcb)
3917 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3918 return (SCTP_ECT0_BIT);
3924 #if defined(INET) || defined(INET6)
3926 sctp_handle_no_route(struct sctp_tcb *stcb,
3927 struct sctp_nets *net,
3930 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3933 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3934 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3935 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3936 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3937 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3938 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3942 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3943 net->dest_state &= ~SCTP_ADDR_PF;
3947 if (net == stcb->asoc.primary_destination) {
3948 /* need a new primary */
3949 struct sctp_nets *alt;
3951 alt = sctp_find_alternate_net(stcb, net, 0);
3953 if (stcb->asoc.alternate) {
3954 sctp_free_remote_addr(stcb->asoc.alternate);
3956 stcb->asoc.alternate = alt;
3957 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3958 if (net->ro._s_addr) {
3959 sctp_free_ifa(net->ro._s_addr);
3960 net->ro._s_addr = NULL;
3962 net->src_addr_selected = 0;
3971 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3972 struct sctp_tcb *stcb, /* may be NULL */
3973 struct sctp_nets *net,
3974 struct sockaddr *to,
3976 uint32_t auth_offset,
3977 struct sctp_auth_chunk *auth,
3978 uint16_t auth_keyid,
3979 int nofragment_flag,
3986 union sctp_sockstore *over_addr,
3987 uint8_t mflowtype, uint32_t mflowid,
3988 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3989 int so_locked SCTP_UNUSED
3995 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3997 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3998 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3999 * - fill in the HMAC digest of any AUTH chunk in the packet.
4000 * - calculate and fill in the SCTP checksum.
4001 * - prepend an IP address header.
4002 * - if boundall use INADDR_ANY.
4003 * - if boundspecific do source address selection.
4004 * - set fragmentation option for ipV4.
4005 * - On return from IP output, check/adjust mtu size of output
4006 * interface and smallest_mtu size as well.
4008 /* Will need ifdefs around this */
4010 struct sctphdr *sctphdr;
4013 #if defined(INET) || defined(INET6)
4016 #if defined(INET) || defined(INET6)
4018 sctp_route_t *ro = NULL;
4019 struct udphdr *udp = NULL;
4022 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4023 struct socket *so = NULL;
4026 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4027 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4031 #if defined(INET) || defined(INET6)
4033 vrf_id = stcb->asoc.vrf_id;
4035 vrf_id = inp->def_vrf_id;
4038 /* fill in the HMAC digest for any AUTH chunk in the packet */
4039 if ((auth != NULL) && (stcb != NULL)) {
4040 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4044 tos_value = net->dscp;
4046 tos_value = stcb->asoc.default_dscp;
4048 tos_value = inp->sctp_ep.default_dscp;
4051 switch (to->sa_family) {
4055 struct ip *ip = NULL;
4056 sctp_route_t iproute;
4059 len = SCTP_MIN_V4_OVERHEAD;
4061 len += sizeof(struct udphdr);
4063 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4066 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4069 SCTP_ALIGN_TO_END(newm, len);
4070 SCTP_BUF_LEN(newm) = len;
4071 SCTP_BUF_NEXT(newm) = m;
4074 m->m_pkthdr.flowid = net->flowid;
4075 M_HASHTYPE_SET(m, net->flowtype);
4077 m->m_pkthdr.flowid = mflowid;
4078 M_HASHTYPE_SET(m, mflowtype);
4080 packet_length = sctp_calculate_len(m);
4081 ip = mtod(m, struct ip *);
4082 ip->ip_v = IPVERSION;
4083 ip->ip_hl = (sizeof(struct ip) >> 2);
4084 if (tos_value == 0) {
4086 * This means especially, that it is not set
4087 * at the SCTP layer. So use the value from
4090 tos_value = inp->ip_inp.inp.inp_ip_tos;
4094 tos_value |= sctp_get_ect(stcb);
4096 if ((nofragment_flag) && (port == 0)) {
4097 ip->ip_off = htons(IP_DF);
4099 ip->ip_off = htons(0);
4101 /* FreeBSD has a function for ip_id's */
4104 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4105 ip->ip_len = htons(packet_length);
4106 ip->ip_tos = tos_value;
4108 ip->ip_p = IPPROTO_UDP;
4110 ip->ip_p = IPPROTO_SCTP;
4115 memset(&iproute, 0, sizeof(iproute));
4116 memcpy(&ro->ro_dst, to, to->sa_len);
4118 ro = (sctp_route_t *)&net->ro;
4120 /* Now the address selection part */
4121 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4123 /* call the routine to select the src address */
4124 if (net && out_of_asoc_ok == 0) {
4125 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4126 sctp_free_ifa(net->ro._s_addr);
4127 net->ro._s_addr = NULL;
4128 net->src_addr_selected = 0;
4134 if (net->src_addr_selected == 0) {
4135 /* Cache the source address */
4136 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4139 net->src_addr_selected = 1;
4141 if (net->ro._s_addr == NULL) {
4142 /* No route to host */
4143 net->src_addr_selected = 0;
4144 sctp_handle_no_route(stcb, net, so_locked);
4145 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4147 return (EHOSTUNREACH);
4149 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4151 if (over_addr == NULL) {
4152 struct sctp_ifa *_lsrc;
4154 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4158 if (_lsrc == NULL) {
4159 sctp_handle_no_route(stcb, net, so_locked);
4160 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4162 return (EHOSTUNREACH);
4164 ip->ip_src = _lsrc->address.sin.sin_addr;
4165 sctp_free_ifa(_lsrc);
4167 ip->ip_src = over_addr->sin.sin_addr;
4168 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4172 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4173 sctp_handle_no_route(stcb, net, so_locked);
4174 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4176 return (EHOSTUNREACH);
4178 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4179 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4180 udp->uh_dport = port;
4181 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4183 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4187 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4189 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4192 sctphdr->src_port = src_port;
4193 sctphdr->dest_port = dest_port;
4194 sctphdr->v_tag = v_tag;
4195 sctphdr->checksum = 0;
4198 * If source address selection fails and we find no
4199 * route then the ip_output should fail as well with
4200 * a NO_ROUTE_TO_HOST type error. We probably should
4201 * catch that somewhere and abort the association
4202 * right away (assuming this is an INIT being sent).
4204 if (ro->ro_rt == NULL) {
4206 * src addr selection failed to find a route
4207 * (or valid source addr), so we can't get
4208 * there from here (yet)!
4210 sctp_handle_no_route(stcb, net, so_locked);
4211 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4213 return (EHOSTUNREACH);
4215 if (ro != &iproute) {
4216 memcpy(&iproute, ro, sizeof(*ro));
4218 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4219 (uint32_t)(ntohl(ip->ip_src.s_addr)));
4220 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4221 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4222 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4225 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4226 /* failed to prepend data, give up */
4227 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4231 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4233 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4234 SCTP_STAT_INCR(sctps_sendswcrc);
4236 SCTP_ENABLE_UDP_CSUM(o_pak);
4239 m->m_pkthdr.csum_flags = CSUM_SCTP;
4240 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4241 SCTP_STAT_INCR(sctps_sendhwcrc);
4243 #ifdef SCTP_PACKET_LOGGING
4244 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4245 sctp_packet_log(o_pak);
4247 /* send it out. table id is taken from stcb */
4248 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4249 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4250 so = SCTP_INP_SO(inp);
4251 SCTP_SOCKET_UNLOCK(so, 0);
4254 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4255 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4256 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4257 atomic_add_int(&stcb->asoc.refcnt, 1);
4258 SCTP_TCB_UNLOCK(stcb);
4259 SCTP_SOCKET_LOCK(so, 0);
4260 SCTP_TCB_LOCK(stcb);
4261 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4265 UDPSTAT_INC(udps_opackets);
4267 SCTP_STAT_INCR(sctps_sendpackets);
4268 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4270 SCTP_STAT_INCR(sctps_senderrors);
4272 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4274 /* free tempy routes */
4277 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4278 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4281 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4284 mtu -= sizeof(struct udphdr);
4286 if (mtu < net->mtu) {
4287 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4288 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4293 } else if (ro->ro_rt == NULL) {
4294 /* route was freed */
4295 if (net->ro._s_addr &&
4296 net->src_addr_selected) {
4297 sctp_free_ifa(net->ro._s_addr);
4298 net->ro._s_addr = NULL;
4300 net->src_addr_selected = 0;
4309 uint32_t flowlabel, flowinfo;
4310 struct ip6_hdr *ip6h;
4311 struct route_in6 ip6route;
4313 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4315 struct sockaddr_in6 lsa6_storage;
4317 u_short prev_port = 0;
4321 flowlabel = net->flowlabel;
4323 flowlabel = stcb->asoc.default_flowlabel;
4325 flowlabel = inp->sctp_ep.default_flowlabel;
4327 if (flowlabel == 0) {
4329 * This means especially, that it is not set
4330 * at the SCTP layer. So use the value from
4333 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4335 flowlabel &= 0x000fffff;
4336 len = SCTP_MIN_OVERHEAD;
4338 len += sizeof(struct udphdr);
4340 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4343 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4346 SCTP_ALIGN_TO_END(newm, len);
4347 SCTP_BUF_LEN(newm) = len;
4348 SCTP_BUF_NEXT(newm) = m;
4351 m->m_pkthdr.flowid = net->flowid;
4352 M_HASHTYPE_SET(m, net->flowtype);
4354 m->m_pkthdr.flowid = mflowid;
4355 M_HASHTYPE_SET(m, mflowtype);
4357 packet_length = sctp_calculate_len(m);
4359 ip6h = mtod(m, struct ip6_hdr *);
4360 /* protect *sin6 from overwrite */
4361 sin6 = (struct sockaddr_in6 *)to;
4365 /* KAME hack: embed scopeid */
4366 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4367 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4372 memset(&ip6route, 0, sizeof(ip6route));
4373 ro = (sctp_route_t *)&ip6route;
4374 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4376 ro = (sctp_route_t *)&net->ro;
4379 * We assume here that inp_flow is in host byte
4380 * order within the TCB!
4382 if (tos_value == 0) {
4384 * This means especially, that it is not set
4385 * at the SCTP layer. So use the value from
4388 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4392 tos_value |= sctp_get_ect(stcb);
4396 flowinfo |= tos_value;
4398 flowinfo |= flowlabel;
4399 ip6h->ip6_flow = htonl(flowinfo);
4401 ip6h->ip6_nxt = IPPROTO_UDP;
4403 ip6h->ip6_nxt = IPPROTO_SCTP;
4405 ip6h->ip6_plen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4406 ip6h->ip6_dst = sin6->sin6_addr;
4409 * Add SRC address selection here: we can only reuse
4410 * to a limited degree the kame src-addr-sel, since
4411 * we can try their selection but it may not be
4414 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4415 lsa6_tmp.sin6_family = AF_INET6;
4416 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4418 if (net && out_of_asoc_ok == 0) {
4419 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4420 sctp_free_ifa(net->ro._s_addr);
4421 net->ro._s_addr = NULL;
4422 net->src_addr_selected = 0;
4428 if (net->src_addr_selected == 0) {
4429 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4430 /* KAME hack: embed scopeid */
4431 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4432 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4436 /* Cache the source address */
4437 net->ro._s_addr = sctp_source_address_selection(inp,
4443 (void)sa6_recoverscope(sin6);
4444 net->src_addr_selected = 1;
4446 if (net->ro._s_addr == NULL) {
4447 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4448 net->src_addr_selected = 0;
4449 sctp_handle_no_route(stcb, net, so_locked);
4450 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4452 return (EHOSTUNREACH);
4454 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4456 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4457 /* KAME hack: embed scopeid */
4458 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4459 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4463 if (over_addr == NULL) {
4464 struct sctp_ifa *_lsrc;
4466 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4470 if (_lsrc == NULL) {
4471 sctp_handle_no_route(stcb, net, so_locked);
4472 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4474 return (EHOSTUNREACH);
4476 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4477 sctp_free_ifa(_lsrc);
4479 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4480 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4482 (void)sa6_recoverscope(sin6);
4484 lsa6->sin6_port = inp->sctp_lport;
4486 if (ro->ro_rt == NULL) {
4488 * src addr selection failed to find a route
4489 * (or valid source addr), so we can't get
4492 sctp_handle_no_route(stcb, net, so_locked);
4493 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4495 return (EHOSTUNREACH);
4498 * XXX: sa6 may not have a valid sin6_scope_id in
4499 * the non-SCOPEDROUTING case.
4501 memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4502 lsa6_storage.sin6_family = AF_INET6;
4503 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4504 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4505 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4506 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4511 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4512 lsa6_storage.sin6_port = inp->sctp_lport;
4513 lsa6 = &lsa6_storage;
4514 ip6h->ip6_src = lsa6->sin6_addr;
4517 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4518 sctp_handle_no_route(stcb, net, so_locked);
4519 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4521 return (EHOSTUNREACH);
4523 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4524 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4525 udp->uh_dport = port;
4526 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4528 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4530 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4533 sctphdr->src_port = src_port;
4534 sctphdr->dest_port = dest_port;
4535 sctphdr->v_tag = v_tag;
4536 sctphdr->checksum = 0;
4539 * We set the hop limit now since there is a good
4540 * chance that our ro pointer is now filled
4542 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4543 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4546 /* Copy to be sure something bad is not happening */
4547 sin6->sin6_addr = ip6h->ip6_dst;
4548 lsa6->sin6_addr = ip6h->ip6_src;
4551 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4552 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4553 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4554 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4555 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4557 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4559 * preserve the port and scope for link
4562 prev_scope = sin6->sin6_scope_id;
4563 prev_port = sin6->sin6_port;
4566 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4567 /* failed to prepend data, give up */
4569 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4572 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4574 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4575 SCTP_STAT_INCR(sctps_sendswcrc);
4576 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4577 udp->uh_sum = 0xffff;
4580 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4581 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4582 SCTP_STAT_INCR(sctps_sendhwcrc);
4584 /* send it out. table id is taken from stcb */
4585 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4586 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4587 so = SCTP_INP_SO(inp);
4588 SCTP_SOCKET_UNLOCK(so, 0);
4591 #ifdef SCTP_PACKET_LOGGING
4592 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4593 sctp_packet_log(o_pak);
4595 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4596 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4597 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4598 atomic_add_int(&stcb->asoc.refcnt, 1);
4599 SCTP_TCB_UNLOCK(stcb);
4600 SCTP_SOCKET_LOCK(so, 0);
4601 SCTP_TCB_LOCK(stcb);
4602 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4606 /* for link local this must be done */
4607 sin6->sin6_scope_id = prev_scope;
4608 sin6->sin6_port = prev_port;
4610 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4612 UDPSTAT_INC(udps_opackets);
4614 SCTP_STAT_INCR(sctps_sendpackets);
4615 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4617 SCTP_STAT_INCR(sctps_senderrors);
4620 /* Now if we had a temp route free it */
4624 * PMTU check versus smallest asoc MTU goes
4627 if (ro->ro_rt == NULL) {
4628 /* Route was freed */
4629 if (net->ro._s_addr &&
4630 net->src_addr_selected) {
4631 sctp_free_ifa(net->ro._s_addr);
4632 net->ro._s_addr = NULL;
4634 net->src_addr_selected = 0;
4636 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4637 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4640 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4643 mtu -= sizeof(struct udphdr);
4645 if (mtu < net->mtu) {
4646 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4647 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4653 if (ND_IFINFO(ifp)->linkmtu &&
4654 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4655 sctp_mtu_size_reset(inp,
4657 ND_IFINFO(ifp)->linkmtu);
4665 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4666 ((struct sockaddr *)to)->sa_family);
4668 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4675 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4676 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4681 struct mbuf *m, *m_last;
4682 struct sctp_nets *net;
4683 struct sctp_init_chunk *init;
4684 struct sctp_supported_addr_param *sup_addr;
4685 struct sctp_adaptation_layer_indication *ali;
4686 struct sctp_supported_chunk_types_param *pr_supported;
4687 struct sctp_paramhdr *ph;
4688 int cnt_inits_to = 0;
4690 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4692 /* INIT's always go to the primary (and usually ONLY address) */
4693 net = stcb->asoc.primary_destination;
4695 net = TAILQ_FIRST(&stcb->asoc.nets);
4700 /* we confirm any address we send an INIT to */
4701 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4702 (void)sctp_set_primary_addr(stcb, NULL, net);
4704 /* we confirm any address we send an INIT to */
4705 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4707 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4709 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4711 * special hook, if we are sending to link local it will not
4712 * show up in our private address count.
4714 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4718 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4719 /* This case should not happen */
4720 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4723 /* start the INIT timer */
4724 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4726 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4728 /* No memory, INIT timer will re-attempt. */
4729 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4732 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
4734 /* Now lets put the chunk header in place */
4735 init = mtod(m, struct sctp_init_chunk *);
4736 /* now the chunk header */
4737 init->ch.chunk_type = SCTP_INITIATION;
4738 init->ch.chunk_flags = 0;
4739 /* fill in later from mbuf we build */
4740 init->ch.chunk_length = 0;
4741 /* place in my tag */
4742 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4743 /* set up some of the credits. */
4744 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4745 SCTP_MINIMAL_RWND));
4746 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4747 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4748 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4750 /* Adaptation layer indication parameter */
4751 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4752 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
4753 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4754 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4755 ali->ph.param_length = htons(parameter_len);
4756 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
4757 chunk_len += parameter_len;
4761 if (stcb->asoc.ecn_supported == 1) {
4762 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4763 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4764 ph->param_type = htons(SCTP_ECN_CAPABLE);
4765 ph->param_length = htons(parameter_len);
4766 chunk_len += parameter_len;
4769 /* PR-SCTP supported parameter */
4770 if (stcb->asoc.prsctp_supported == 1) {
4771 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4772 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4773 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4774 ph->param_length = htons(parameter_len);
4775 chunk_len += parameter_len;
4778 /* Add NAT friendly parameter. */
4779 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4780 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4781 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4782 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4783 ph->param_length = htons(parameter_len);
4784 chunk_len += parameter_len;
4787 /* And now tell the peer which extensions we support */
4789 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4790 if (stcb->asoc.prsctp_supported == 1) {
4791 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4792 if (stcb->asoc.idata_supported) {
4793 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4796 if (stcb->asoc.auth_supported == 1) {
4797 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4799 if (stcb->asoc.asconf_supported == 1) {
4800 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4801 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4803 if (stcb->asoc.reconfig_supported == 1) {
4804 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4806 if (stcb->asoc.idata_supported) {
4807 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4809 if (stcb->asoc.nrsack_supported == 1) {
4810 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4812 if (stcb->asoc.pktdrop_supported == 1) {
4813 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4816 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4817 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4818 pr_supported->ph.param_length = htons(parameter_len);
4819 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4820 chunk_len += parameter_len;
4822 /* add authentication parameters */
4823 if (stcb->asoc.auth_supported) {
4824 /* attach RANDOM parameter, if available */
4825 if (stcb->asoc.authinfo.random != NULL) {
4826 struct sctp_auth_random *randp;
4828 if (padding_len > 0) {
4829 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4830 chunk_len += padding_len;
4833 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4834 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4835 /* random key already contains the header */
4836 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4837 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4838 chunk_len += parameter_len;
4840 /* add HMAC_ALGO parameter */
4841 if (stcb->asoc.local_hmacs != NULL) {
4842 struct sctp_auth_hmac_algo *hmacs;
4844 if (padding_len > 0) {
4845 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4846 chunk_len += padding_len;
4849 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4850 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
4851 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4852 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4853 hmacs->ph.param_length = htons(parameter_len);
4854 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
4855 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4856 chunk_len += parameter_len;
4858 /* add CHUNKS parameter */
4859 if (stcb->asoc.local_auth_chunks != NULL) {
4860 struct sctp_auth_chunk_list *chunks;
4862 if (padding_len > 0) {
4863 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4864 chunk_len += padding_len;
4867 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4868 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
4869 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4870 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4871 chunks->ph.param_length = htons(parameter_len);
4872 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4873 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4874 chunk_len += parameter_len;
4878 /* now any cookie time extensions */
4879 if (stcb->asoc.cookie_preserve_req) {
4880 struct sctp_cookie_perserve_param *cookie_preserve;
4882 if (padding_len > 0) {
4883 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4884 chunk_len += padding_len;
4887 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
4888 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4889 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4890 cookie_preserve->ph.param_length = htons(parameter_len);
4891 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4892 stcb->asoc.cookie_preserve_req = 0;
4893 chunk_len += parameter_len;
4896 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4899 if (padding_len > 0) {
4900 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4901 chunk_len += padding_len;
4904 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4905 if (stcb->asoc.scope.ipv4_addr_legal) {
4906 parameter_len += (uint16_t)sizeof(uint16_t);
4908 if (stcb->asoc.scope.ipv6_addr_legal) {
4909 parameter_len += (uint16_t)sizeof(uint16_t);
4911 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4912 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4913 sup_addr->ph.param_length = htons(parameter_len);
4915 if (stcb->asoc.scope.ipv4_addr_legal) {
4916 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4918 if (stcb->asoc.scope.ipv6_addr_legal) {
4919 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4921 padding_len = 4 - 2 * i;
4922 chunk_len += parameter_len;
4925 SCTP_BUF_LEN(m) = chunk_len;
4926 /* now the addresses */
4928 * To optimize this we could put the scoping stuff into a structure
4929 * and remove the individual uint8's from the assoc structure. Then
4930 * we could just sifa in the address within the stcb. But for now
4931 * this is a quick hack to get the address stuff teased apart.
4933 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4935 &padding_len, &chunk_len);
4937 init->ch.chunk_length = htons(chunk_len);
4938 if (padding_len > 0) {
4939 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4944 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4945 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
4946 (struct sockaddr *)&net->ro._l_addr,
4947 m, 0, NULL, 0, 0, 0, 0,
4948 inp->sctp_lport, stcb->rport, htonl(0),
4952 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
4953 if (error == ENOBUFS) {
4954 stcb->asoc.ifp_had_enobuf = 1;
4955 SCTP_STAT_INCR(sctps_lowlevelerr);
4958 stcb->asoc.ifp_had_enobuf = 0;
4960 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4961 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4965 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4966 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4969 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4970 * being equal to the beginning of the params i.e. (iphlen +
4971 * sizeof(struct sctp_init_msg) parse through the parameters to the
4972 * end of the mbuf verifying that all parameters are known.
4974 * For unknown parameters build and return a mbuf with
4975 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4976 * processing this chunk stop, and set *abort_processing to 1.
4978 * By having param_offset be pre-set to where parameters begin it is
4979 * hoped that this routine may be reused in the future by new
4982 struct sctp_paramhdr *phdr, params;
4984 struct mbuf *mat, *op_err;
4985 int at, limit, pad_needed;
4986 uint16_t ptype, plen, padded_size;
4989 *abort_processing = 0;
4992 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4995 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4996 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4997 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4998 ptype = ntohs(phdr->param_type);
4999 plen = ntohs(phdr->param_length);
5000 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
5001 /* wacked parameter */
5002 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
5005 limit -= SCTP_SIZE32(plen);
5007 * All parameters for all chunks that we know/understand are
5008 * listed here. We process them other places and make
5009 * appropriate stop actions per the upper bits. However this
5010 * is the generic routine processor's can call to get back
5011 * an operr.. to either incorporate (init-ack) or send.
5013 padded_size = SCTP_SIZE32(plen);
5015 /* Param's with variable size */
5016 case SCTP_HEARTBEAT_INFO:
5017 case SCTP_STATE_COOKIE:
5018 case SCTP_UNRECOG_PARAM:
5019 case SCTP_ERROR_CAUSE_IND:
5023 /* Param's with variable size within a range */
5024 case SCTP_CHUNK_LIST:
5025 case SCTP_SUPPORTED_CHUNK_EXT:
5026 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5027 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5032 case SCTP_SUPPORTED_ADDRTYPE:
5033 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5034 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5040 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5041 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5046 case SCTP_SET_PRIM_ADDR:
5047 case SCTP_DEL_IP_ADDRESS:
5048 case SCTP_ADD_IP_ADDRESS:
5049 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5050 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5051 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5056 /* Param's with a fixed size */
5057 case SCTP_IPV4_ADDRESS:
5058 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5059 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5064 case SCTP_IPV6_ADDRESS:
5065 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5066 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5071 case SCTP_COOKIE_PRESERVE:
5072 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5073 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5078 case SCTP_HAS_NAT_SUPPORT:
5081 case SCTP_PRSCTP_SUPPORTED:
5082 if (padded_size != sizeof(struct sctp_paramhdr)) {
5083 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5088 case SCTP_ECN_CAPABLE:
5089 if (padded_size != sizeof(struct sctp_paramhdr)) {
5090 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5095 case SCTP_ULP_ADAPTATION:
5096 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5097 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5102 case SCTP_SUCCESS_REPORT:
5103 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5104 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5109 case SCTP_HOSTNAME_ADDRESS:
5111 /* We can NOT handle HOST NAME addresses!! */
5114 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5115 *abort_processing = 1;
5116 if (op_err == NULL) {
5117 /* Ok need to try to get a mbuf */
5119 l_len = SCTP_MIN_OVERHEAD;
5121 l_len = SCTP_MIN_V4_OVERHEAD;
5123 l_len += sizeof(struct sctp_chunkhdr);
5124 l_len += sizeof(struct sctp_gen_error_cause);
5125 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5127 SCTP_BUF_LEN(op_err) = 0;
5129 * Pre-reserve space for IP,
5130 * SCTP, and chunk header.
5133 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5135 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5137 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5138 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5142 /* If we have space */
5143 struct sctp_gen_error_cause cause;
5146 uint32_t cpthis = 0;
5148 pad_needed = 4 - (err_at % 4);
5149 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5150 err_at += pad_needed;
5152 cause.code = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5153 cause.length = htons((uint16_t)(sizeof(struct sctp_gen_error_cause) + plen));
5154 m_copyback(op_err, err_at, sizeof(struct sctp_gen_error_cause), (caddr_t)&cause);
5155 err_at += sizeof(struct sctp_gen_error_cause);
5156 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5157 if (SCTP_BUF_NEXT(op_err) == NULL) {
5158 sctp_m_freem(op_err);
5167 * we do not recognize the parameter figure out what
5170 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5171 if ((ptype & 0x4000) == 0x4000) {
5172 /* Report bit is set?? */
5173 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5174 if (op_err == NULL) {
5177 /* Ok need to try to get an mbuf */
5179 l_len = SCTP_MIN_OVERHEAD;
5181 l_len = SCTP_MIN_V4_OVERHEAD;
5183 l_len += sizeof(struct sctp_chunkhdr);
5184 l_len += sizeof(struct sctp_paramhdr);
5185 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5187 SCTP_BUF_LEN(op_err) = 0;
5189 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5191 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5193 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5194 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5198 /* If we have space */
5199 struct sctp_paramhdr s;
5202 uint32_t cpthis = 0;
5204 pad_needed = 4 - (err_at % 4);
5205 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5206 err_at += pad_needed;
5208 s.param_type = htons(SCTP_UNRECOG_PARAM);
5209 s.param_length = htons((uint16_t)sizeof(struct sctp_paramhdr) + plen);
5210 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)&s);
5211 err_at += sizeof(struct sctp_paramhdr);
5212 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(mat, at, plen, M_NOWAIT);
5213 if (SCTP_BUF_NEXT(op_err) == NULL) {
5214 sctp_m_freem(op_err);
5216 * we are out of memory but
5217 * we still need to have a
5218 * look at what to do (the
5219 * system is in trouble
5223 goto more_processing;
5229 if ((ptype & 0x8000) == 0x0000) {
5230 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5233 /* skip this chunk and continue processing */
5234 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5235 at += SCTP_SIZE32(plen);
5240 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5244 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5245 *abort_processing = 1;
5246 if ((op_err == NULL) && phdr) {
5249 l_len = SCTP_MIN_OVERHEAD;
5251 l_len = SCTP_MIN_V4_OVERHEAD;
5253 l_len += sizeof(struct sctp_chunkhdr);
5254 l_len += (2 * sizeof(struct sctp_paramhdr));
5255 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5257 SCTP_BUF_LEN(op_err) = 0;
5259 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5261 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5263 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5264 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5267 if ((op_err) && phdr) {
5268 struct sctp_paramhdr s;
5271 uint32_t cpthis = 0;
5273 pad_needed = 4 - (err_at % 4);
5274 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5275 err_at += pad_needed;
5277 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5278 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5279 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5280 err_at += sizeof(s);
5281 /* Only copy back the p-hdr that caused the issue */
5282 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5288 sctp_are_there_new_addresses(struct sctp_association *asoc,
5289 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5292 * Given a INIT packet, look through the packet to verify that there
5293 * are NO new addresses. As we go through the parameters add reports
5294 * of any un-understood parameters that require an error. Also we
5295 * must return (1) to drop the packet if we see a un-understood
5296 * parameter that tells us to drop the chunk.
5298 struct sockaddr *sa_touse;
5299 struct sockaddr *sa;
5300 struct sctp_paramhdr *phdr, params;
5301 uint16_t ptype, plen;
5303 struct sctp_nets *net;
5306 struct sockaddr_in sin4, *sa4;
5309 struct sockaddr_in6 sin6, *sa6;
5313 memset(&sin4, 0, sizeof(sin4));
5314 sin4.sin_family = AF_INET;
5315 sin4.sin_len = sizeof(sin4);
5318 memset(&sin6, 0, sizeof(sin6));
5319 sin6.sin6_family = AF_INET6;
5320 sin6.sin6_len = sizeof(sin6);
5322 /* First what about the src address of the pkt ? */
5324 switch (src->sa_family) {
5327 if (asoc->scope.ipv4_addr_legal) {
5334 if (asoc->scope.ipv6_addr_legal) {
5345 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5346 sa = (struct sockaddr *)&net->ro._l_addr;
5347 if (sa->sa_family == src->sa_family) {
5349 if (sa->sa_family == AF_INET) {
5350 struct sockaddr_in *src4;
5352 sa4 = (struct sockaddr_in *)sa;
5353 src4 = (struct sockaddr_in *)src;
5354 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5361 if (sa->sa_family == AF_INET6) {
5362 struct sockaddr_in6 *src6;
5364 sa6 = (struct sockaddr_in6 *)sa;
5365 src6 = (struct sockaddr_in6 *)src;
5366 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5375 /* New address added! no need to look further. */
5379 /* Ok so far lets munge through the rest of the packet */
5380 offset += sizeof(struct sctp_init_chunk);
5381 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5384 ptype = ntohs(phdr->param_type);
5385 plen = ntohs(phdr->param_length);
5388 case SCTP_IPV4_ADDRESS:
5390 struct sctp_ipv4addr_param *p4, p4_buf;
5392 if (plen != sizeof(struct sctp_ipv4addr_param)) {
5395 phdr = sctp_get_next_param(in_initpkt, offset,
5396 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5400 if (asoc->scope.ipv4_addr_legal) {
5401 p4 = (struct sctp_ipv4addr_param *)phdr;
5402 sin4.sin_addr.s_addr = p4->addr;
5403 sa_touse = (struct sockaddr *)&sin4;
5409 case SCTP_IPV6_ADDRESS:
5411 struct sctp_ipv6addr_param *p6, p6_buf;
5413 if (plen != sizeof(struct sctp_ipv6addr_param)) {
5416 phdr = sctp_get_next_param(in_initpkt, offset,
5417 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5421 if (asoc->scope.ipv6_addr_legal) {
5422 p6 = (struct sctp_ipv6addr_param *)phdr;
5423 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5425 sa_touse = (struct sockaddr *)&sin6;
5435 /* ok, sa_touse points to one to check */
5437 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5438 sa = (struct sockaddr *)&net->ro._l_addr;
5439 if (sa->sa_family != sa_touse->sa_family) {
5443 if (sa->sa_family == AF_INET) {
5444 sa4 = (struct sockaddr_in *)sa;
5445 if (sa4->sin_addr.s_addr ==
5446 sin4.sin_addr.s_addr) {
5453 if (sa->sa_family == AF_INET6) {
5454 sa6 = (struct sockaddr_in6 *)sa;
5455 if (SCTP6_ARE_ADDR_EQUAL(
5464 /* New addr added! no need to look further */
5468 offset += SCTP_SIZE32(plen);
5469 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5475 * Given a MBUF chain that was sent into us containing an INIT. Build a
5476 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5477 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5478 * message (i.e. the struct sctp_init_msg).
5481 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5482 struct sctp_nets *src_net, struct mbuf *init_pkt,
5483 int iphlen, int offset,
5484 struct sockaddr *src, struct sockaddr *dst,
5485 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5486 uint8_t mflowtype, uint32_t mflowid,
5487 uint32_t vrf_id, uint16_t port)
5489 struct sctp_association *asoc;
5490 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5491 struct sctp_init_ack_chunk *initack;
5492 struct sctp_adaptation_layer_indication *ali;
5493 struct sctp_supported_chunk_types_param *pr_supported;
5494 struct sctp_paramhdr *ph;
5495 union sctp_sockstore *over_addr;
5496 struct sctp_scoping scp;
5499 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5500 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5501 struct sockaddr_in *sin;
5504 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5505 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5506 struct sockaddr_in6 *sin6;
5508 struct sockaddr *to;
5509 struct sctp_state_cookie stc;
5510 struct sctp_nets *net = NULL;
5511 uint8_t *signature = NULL;
5512 int cnt_inits_to = 0;
5513 uint16_t his_limit, i_want;
5515 int nat_friendly = 0;
5518 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5525 if ((asoc != NULL) &&
5526 (SCTP_GET_STATE(stcb) != SCTP_STATE_COOKIE_WAIT)) {
5527 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5529 * new addresses, out of here in non-cookie-wait
5532 * Send an ABORT, without the new address error
5533 * cause. This looks no different than if no
5534 * listener was present.
5536 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5538 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5539 mflowtype, mflowid, inp->fibnum,
5543 if (src_net != NULL && (src_net->port != port)) {
5545 * change of remote encapsulation port, out of here
5546 * in non-cookie-wait states
5548 * Send an ABORT, without an specific error cause.
5549 * This looks no different than if no listener was
5552 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5553 "Remote encapsulation port changed");
5554 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5555 mflowtype, mflowid, inp->fibnum,
5561 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5562 (offset + sizeof(struct sctp_init_chunk)),
5563 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5566 if (op_err == NULL) {
5567 char msg[SCTP_DIAG_INFO_LEN];
5569 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5570 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5573 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5574 init_chk->init.initiate_tag, op_err,
5575 mflowtype, mflowid, inp->fibnum,
5579 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5581 /* No memory, INIT timer will re-attempt. */
5583 sctp_m_freem(op_err);
5586 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
5590 * We might not overwrite the identification[] completely and on
5591 * some platforms time_entered will contain some padding. Therefore
5592 * zero out the cookie to avoid putting uninitialized memory on the
5595 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5597 /* the time I built cookie */
5598 (void)SCTP_GETTIME_TIMEVAL(&now);
5599 stc.time_entered.tv_sec = now.tv_sec;
5600 stc.time_entered.tv_usec = now.tv_usec;
5602 /* populate any tie tags */
5604 /* unlock before tag selections */
5605 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5606 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5607 stc.cookie_life = asoc->cookie_life;
5608 net = asoc->primary_destination;
5610 stc.tie_tag_my_vtag = 0;
5611 stc.tie_tag_peer_vtag = 0;
5612 /* life I will award this cookie */
5613 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5616 /* copy in the ports for later check */
5617 stc.myport = sh->dest_port;
5618 stc.peerport = sh->src_port;
5621 * If we wanted to honor cookie life extensions, we would add to
5622 * stc.cookie_life. For now we should NOT honor any extension
5624 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5625 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5626 stc.ipv6_addr_legal = 1;
5627 if (SCTP_IPV6_V6ONLY(inp)) {
5628 stc.ipv4_addr_legal = 0;
5630 stc.ipv4_addr_legal = 1;
5633 stc.ipv6_addr_legal = 0;
5634 stc.ipv4_addr_legal = 1;
5639 switch (dst->sa_family) {
5643 /* lookup address */
5644 stc.address[0] = src4->sin_addr.s_addr;
5648 stc.addr_type = SCTP_IPV4_ADDRESS;
5649 /* local from address */
5650 stc.laddress[0] = dst4->sin_addr.s_addr;
5651 stc.laddress[1] = 0;
5652 stc.laddress[2] = 0;
5653 stc.laddress[3] = 0;
5654 stc.laddr_type = SCTP_IPV4_ADDRESS;
5655 /* scope_id is only for v6 */
5657 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5658 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5661 /* Must use the address in this case */
5662 if (sctp_is_address_on_local_host(src, vrf_id)) {
5663 stc.loopback_scope = 1;
5666 stc.local_scope = 0;
5674 stc.addr_type = SCTP_IPV6_ADDRESS;
5675 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5676 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
5677 if (sctp_is_address_on_local_host(src, vrf_id)) {
5678 stc.loopback_scope = 1;
5679 stc.local_scope = 0;
5682 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5683 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5685 * If the new destination or source
5686 * is a LINK_LOCAL we must have
5687 * common both site and local scope.
5688 * Don't set local scope though
5689 * since we must depend on the
5690 * source to be added implicitly. We
5691 * cannot assure just because we
5692 * share one link that all links are
5695 stc.local_scope = 0;
5699 * we start counting for the private
5700 * address stuff at 1. since the
5701 * link local we source from won't
5702 * show up in our scoped count.
5706 * pull out the scope_id from
5709 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5710 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5712 * If the new destination or source
5713 * is SITE_LOCAL then we must have
5714 * site scope in common.
5718 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5719 stc.laddr_type = SCTP_IPV6_ADDRESS;
5729 /* set the scope per the existing tcb */
5732 struct sctp_nets *lnet;
5735 stc.loopback_scope = asoc->scope.loopback_scope;
5736 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5737 stc.site_scope = asoc->scope.site_scope;
5738 stc.local_scope = asoc->scope.local_scope;
5740 /* Why do we not consider IPv4 LL addresses? */
5741 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5742 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5743 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5745 * if we have a LL address, start
5753 /* use the net pointer */
5754 to = (struct sockaddr *)&net->ro._l_addr;
5755 switch (to->sa_family) {
5758 sin = (struct sockaddr_in *)to;
5759 stc.address[0] = sin->sin_addr.s_addr;
5763 stc.addr_type = SCTP_IPV4_ADDRESS;
5764 if (net->src_addr_selected == 0) {
5766 * strange case here, the INIT should have
5767 * did the selection.
5769 net->ro._s_addr = sctp_source_address_selection(inp,
5770 stcb, (sctp_route_t *)&net->ro,
5772 if (net->ro._s_addr == NULL)
5775 net->src_addr_selected = 1;
5778 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5779 stc.laddress[1] = 0;
5780 stc.laddress[2] = 0;
5781 stc.laddress[3] = 0;
5782 stc.laddr_type = SCTP_IPV4_ADDRESS;
5783 /* scope_id is only for v6 */
5789 sin6 = (struct sockaddr_in6 *)to;
5790 memcpy(&stc.address, &sin6->sin6_addr,
5791 sizeof(struct in6_addr));
5792 stc.addr_type = SCTP_IPV6_ADDRESS;
5793 stc.scope_id = sin6->sin6_scope_id;
5794 if (net->src_addr_selected == 0) {
5796 * strange case here, the INIT should have
5797 * done the selection.
5799 net->ro._s_addr = sctp_source_address_selection(inp,
5800 stcb, (sctp_route_t *)&net->ro,
5802 if (net->ro._s_addr == NULL)
5805 net->src_addr_selected = 1;
5807 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5808 sizeof(struct in6_addr));
5809 stc.laddr_type = SCTP_IPV6_ADDRESS;
5814 /* Now lets put the SCTP header in place */
5815 initack = mtod(m, struct sctp_init_ack_chunk *);
5816 /* Save it off for quick ref */
5817 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
5819 memcpy(stc.identification, SCTP_VERSION_STRING,
5820 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5821 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5822 /* now the chunk header */
5823 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5824 initack->ch.chunk_flags = 0;
5825 /* fill in later from mbuf we build */
5826 initack->ch.chunk_length = 0;
5827 /* place in my tag */
5828 if ((asoc != NULL) &&
5829 ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
5830 (SCTP_GET_STATE(stcb) == SCTP_STATE_INUSE) ||
5831 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED))) {
5832 /* re-use the v-tags and init-seq here */
5833 initack->init.initiate_tag = htonl(asoc->my_vtag);
5834 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5836 uint32_t vtag, itsn;
5839 atomic_add_int(&asoc->refcnt, 1);
5840 SCTP_TCB_UNLOCK(stcb);
5842 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5843 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5845 * Got a duplicate vtag on some guy behind a
5846 * nat make sure we don't use it.
5850 initack->init.initiate_tag = htonl(vtag);
5851 /* get a TSN to use too */
5852 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5853 initack->init.initial_tsn = htonl(itsn);
5854 SCTP_TCB_LOCK(stcb);
5855 atomic_add_int(&asoc->refcnt, -1);
5857 SCTP_INP_INCR_REF(inp);
5858 SCTP_INP_RUNLOCK(inp);
5859 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5860 initack->init.initiate_tag = htonl(vtag);
5861 /* get a TSN to use too */
5862 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5863 SCTP_INP_RLOCK(inp);
5864 SCTP_INP_DECR_REF(inp);
5867 /* save away my tag to */
5868 stc.my_vtag = initack->init.initiate_tag;
5870 /* set up some of the credits. */
5871 so = inp->sctp_socket;
5873 /* memory problem */
5877 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5879 /* set what I want */
5880 his_limit = ntohs(init_chk->init.num_inbound_streams);
5881 /* choose what I want */
5883 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5884 i_want = asoc->streamoutcnt;
5886 i_want = asoc->pre_open_streams;
5889 i_want = inp->sctp_ep.pre_open_stream_count;
5891 if (his_limit < i_want) {
5892 /* I Want more :< */
5893 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5895 /* I can have what I want :> */
5896 initack->init.num_outbound_streams = htons(i_want);
5898 /* tell him his limit. */
5899 initack->init.num_inbound_streams =
5900 htons(inp->sctp_ep.max_open_streams_intome);
5902 /* adaptation layer indication parameter */
5903 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5904 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5905 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5906 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5907 ali->ph.param_length = htons(parameter_len);
5908 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5909 chunk_len += parameter_len;
5913 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5914 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5915 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5916 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5917 ph->param_type = htons(SCTP_ECN_CAPABLE);
5918 ph->param_length = htons(parameter_len);
5919 chunk_len += parameter_len;
5922 /* PR-SCTP supported parameter */
5923 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5924 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5925 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5926 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5927 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5928 ph->param_length = htons(parameter_len);
5929 chunk_len += parameter_len;
5932 /* Add NAT friendly parameter */
5934 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5935 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5936 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5937 ph->param_length = htons(parameter_len);
5938 chunk_len += parameter_len;
5941 /* And now tell the peer which extensions we support */
5943 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5944 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5945 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5946 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5947 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5948 ((asoc == NULL) && (inp->idata_supported == 1))) {
5949 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5952 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5953 ((asoc == NULL) && (inp->auth_supported == 1))) {
5954 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5956 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5957 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5958 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5959 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5961 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5962 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5963 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5965 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5966 ((asoc == NULL) && (inp->idata_supported == 1))) {
5967 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5969 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5970 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5971 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5973 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
5974 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
5975 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5978 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5979 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5980 pr_supported->ph.param_length = htons(parameter_len);
5981 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5982 chunk_len += parameter_len;
5985 /* add authentication parameters */
5986 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5987 ((asoc == NULL) && (inp->auth_supported == 1))) {
5988 struct sctp_auth_random *randp;
5989 struct sctp_auth_hmac_algo *hmacs;
5990 struct sctp_auth_chunk_list *chunks;
5992 if (padding_len > 0) {
5993 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5994 chunk_len += padding_len;
5997 /* generate and add RANDOM parameter */
5998 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
5999 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
6000 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
6001 randp->ph.param_type = htons(SCTP_RANDOM);
6002 randp->ph.param_length = htons(parameter_len);
6003 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
6004 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6005 chunk_len += parameter_len;
6007 if (padding_len > 0) {
6008 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6009 chunk_len += padding_len;
6012 /* add HMAC_ALGO parameter */
6013 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
6014 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6015 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6016 (uint8_t *)hmacs->hmac_ids);
6017 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6018 hmacs->ph.param_length = htons(parameter_len);
6019 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6020 chunk_len += parameter_len;
6022 if (padding_len > 0) {
6023 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6024 chunk_len += padding_len;
6027 /* add CHUNKS parameter */
6028 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6029 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6030 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6031 chunks->chunk_types);
6032 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6033 chunks->ph.param_length = htons(parameter_len);
6034 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6035 chunk_len += parameter_len;
6037 SCTP_BUF_LEN(m) = chunk_len;
6039 /* now the addresses */
6041 * To optimize this we could put the scoping stuff into a structure
6042 * and remove the individual uint8's from the stc structure. Then we
6043 * could just sifa in the address within the stc.. but for now this
6044 * is a quick hack to get the address stuff teased apart.
6046 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6047 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6048 scp.loopback_scope = stc.loopback_scope;
6049 scp.ipv4_local_scope = stc.ipv4_scope;
6050 scp.local_scope = stc.local_scope;
6051 scp.site_scope = stc.site_scope;
6052 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6054 &padding_len, &chunk_len);
6055 /* padding_len can only be positive, if no addresses have been added */
6056 if (padding_len > 0) {
6057 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6058 chunk_len += padding_len;
6059 SCTP_BUF_LEN(m) += padding_len;
6063 /* tack on the operational error if present */
6066 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6067 parameter_len += SCTP_BUF_LEN(m_tmp);
6069 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6070 SCTP_BUF_NEXT(m_last) = op_err;
6071 while (SCTP_BUF_NEXT(m_last) != NULL) {
6072 m_last = SCTP_BUF_NEXT(m_last);
6074 chunk_len += parameter_len;
6076 if (padding_len > 0) {
6077 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6078 if (m_last == NULL) {
6079 /* Houston we have a problem, no space */
6083 chunk_len += padding_len;
6086 /* Now we must build a cookie */
6087 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6088 if (m_cookie == NULL) {
6089 /* memory problem */
6093 /* Now append the cookie to the end and update the space/size */
6094 SCTP_BUF_NEXT(m_last) = m_cookie;
6096 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6097 parameter_len += SCTP_BUF_LEN(m_tmp);
6098 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6102 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6103 chunk_len += parameter_len;
6106 * Place in the size, but we don't include the last pad (if any) in
6109 initack->ch.chunk_length = htons(chunk_len);
6112 * Time to sign the cookie, we don't sign over the cookie signature
6113 * though thus we set trailer.
6115 (void)sctp_hmac_m(SCTP_HMAC,
6116 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6117 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6118 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6120 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6121 * here since the timer will drive a retranmission.
6123 if (padding_len > 0) {
6124 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6129 if (stc.loopback_scope) {
6130 over_addr = (union sctp_sockstore *)dst;
6135 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6137 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6140 SCTP_SO_NOT_LOCKED))) {
6141 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6142 if (error == ENOBUFS) {
6144 asoc->ifp_had_enobuf = 1;
6146 SCTP_STAT_INCR(sctps_lowlevelerr);
6150 asoc->ifp_had_enobuf = 0;
6153 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6158 sctp_prune_prsctp(struct sctp_tcb *stcb,
6159 struct sctp_association *asoc,
6160 struct sctp_sndrcvinfo *srcv,
6164 struct sctp_tmit_chunk *chk, *nchk;
6166 SCTP_TCB_LOCK_ASSERT(stcb);
6167 if ((asoc->prsctp_supported) &&
6168 (asoc->sent_queue_cnt_removeable > 0)) {
6169 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6171 * Look for chunks marked with the PR_SCTP flag AND
6172 * the buffer space flag. If the one being sent is
6173 * equal or greater priority then purge the old one
6174 * and free some space.
6176 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6178 * This one is PR-SCTP AND buffer space
6181 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6183 * Lower numbers equates to higher
6184 * priority so if the one we are
6185 * looking at has a larger or equal
6186 * priority we want to drop the data
6187 * and NOT retransmit it.
6191 * We release the book_size
6192 * if the mbuf is here
6197 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6201 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6204 freed_spc += ret_spc;
6205 if (freed_spc >= dataout) {
6208 } /* if chunk was present */
6209 } /* if of sufficient priority */
6210 } /* if chunk has enabled */
6211 } /* tailqforeach */
6213 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6214 /* Here we must move to the sent queue and mark */
6215 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6216 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6219 * We release the book_size
6220 * if the mbuf is here
6224 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6227 freed_spc += ret_spc;
6228 if (freed_spc >= dataout) {
6231 } /* end if chk->data */
6232 } /* end if right class */
6233 } /* end if chk pr-sctp */
6234 } /* tailqforeachsafe (chk) */
6235 } /* if enabled in asoc */
6239 sctp_get_frag_point(struct sctp_tcb *stcb,
6240 struct sctp_association *asoc)
6245 * For endpoints that have both v6 and v4 addresses we must reserve
6246 * room for the ipv6 header, for those that are only dealing with V4
6247 * we use a larger frag point.
6249 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6250 ovh = SCTP_MIN_OVERHEAD;
6252 ovh = SCTP_MIN_V4_OVERHEAD;
6254 ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6255 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6256 siz = asoc->smallest_mtu - ovh;
6258 siz = (stcb->asoc.sctp_frag_point - ovh);
6260 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6262 /* A data chunk MUST fit in a cluster */
6263 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6266 /* adjust for an AUTH chunk if DATA requires auth */
6267 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6268 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6271 /* make it an even word boundary please */
6278 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6281 * We assume that the user wants PR_SCTP_TTL if the user provides a
6282 * positive lifetime but does not specify any PR_SCTP policy.
6284 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6285 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6286 } else if (sp->timetolive > 0) {
6287 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6288 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6292 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6293 case CHUNK_FLAGS_PR_SCTP_BUF:
6295 * Time to live is a priority stored in tv_sec when doing
6296 * the buffer drop thing.
6298 sp->ts.tv_sec = sp->timetolive;
6301 case CHUNK_FLAGS_PR_SCTP_TTL:
6305 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6306 tv.tv_sec = sp->timetolive / 1000;
6307 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6309 * TODO sctp_constants.h needs alternative time
6310 * macros when _KERNEL is undefined.
6312 timevaladd(&sp->ts, &tv);
6315 case CHUNK_FLAGS_PR_SCTP_RTX:
6317 * Time to live is a the number or retransmissions stored in
6320 sp->ts.tv_sec = sp->timetolive;
6324 SCTPDBG(SCTP_DEBUG_USRREQ1,
6325 "Unknown PR_SCTP policy %u.\n",
6326 PR_SCTP_POLICY(sp->sinfo_flags));
6332 sctp_msg_append(struct sctp_tcb *stcb,
6333 struct sctp_nets *net,
6335 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6339 struct sctp_stream_queue_pending *sp = NULL;
6340 struct sctp_stream_out *strm;
6343 * Given an mbuf chain, put it into the association send queue and
6344 * place it on the wheel
6346 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6347 /* Invalid stream number */
6348 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6352 if ((stcb->asoc.stream_locked) &&
6353 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6354 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6358 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6359 /* Now can we send this? */
6360 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
6361 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6362 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6363 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6364 /* got data while shutting down */
6365 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6369 sctp_alloc_a_strmoq(stcb, sp);
6371 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6375 sp->sinfo_flags = srcv->sinfo_flags;
6376 sp->timetolive = srcv->sinfo_timetolive;
6377 sp->ppid = srcv->sinfo_ppid;
6378 sp->context = srcv->sinfo_context;
6380 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6382 atomic_add_int(&sp->net->ref_count, 1);
6386 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6387 sp->sid = srcv->sinfo_stream;
6388 sp->msg_is_complete = 1;
6389 sp->sender_all_done = 1;
6392 sp->tail_mbuf = NULL;
6393 sctp_set_prsctp_policy(sp);
6395 * We could in theory (for sendall) sifa the length in, but we would
6396 * still have to hunt through the chain since we need to setup the
6400 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6401 if (SCTP_BUF_NEXT(at) == NULL)
6403 sp->length += SCTP_BUF_LEN(at);
6405 if (srcv->sinfo_keynumber_valid) {
6406 sp->auth_keyid = srcv->sinfo_keynumber;
6408 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6410 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6411 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6412 sp->holds_key_ref = 1;
6414 if (hold_stcb_lock == 0) {
6415 SCTP_TCB_SEND_LOCK(stcb);
6417 sctp_snd_sb_alloc(stcb, sp->length);
6418 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6419 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6420 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6422 if (hold_stcb_lock == 0) {
6423 SCTP_TCB_SEND_UNLOCK(stcb);
6433 static struct mbuf *
6434 sctp_copy_mbufchain(struct mbuf *clonechain,
6435 struct mbuf *outchain,
6436 struct mbuf **endofchain,
6439 uint8_t copy_by_ref)
6442 struct mbuf *appendchain;
6446 if (endofchain == NULL) {
6450 sctp_m_freem(outchain);
6453 if (can_take_mbuf) {
6454 appendchain = clonechain;
6457 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6459 /* Its not in a cluster */
6460 if (*endofchain == NULL) {
6461 /* lets get a mbuf cluster */
6462 if (outchain == NULL) {
6463 /* This is the general case */
6465 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6466 if (outchain == NULL) {
6469 SCTP_BUF_LEN(outchain) = 0;
6470 *endofchain = outchain;
6471 /* get the prepend space */
6472 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6475 * We really should not get a NULL
6481 if (SCTP_BUF_NEXT(m) == NULL) {
6485 m = SCTP_BUF_NEXT(m);
6488 if (*endofchain == NULL) {
6490 * huh, TSNH XXX maybe we
6493 sctp_m_freem(outchain);
6497 /* get the new end of length */
6498 len = (int)M_TRAILINGSPACE(*endofchain);
6500 /* how much is left at the end? */
6501 len = (int)M_TRAILINGSPACE(*endofchain);
6503 /* Find the end of the data, for appending */
6504 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6506 /* Now lets copy it out */
6507 if (len >= sizeofcpy) {
6508 /* It all fits, copy it in */
6509 m_copydata(clonechain, 0, sizeofcpy, cp);
6510 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6512 /* fill up the end of the chain */
6514 m_copydata(clonechain, 0, len, cp);
6515 SCTP_BUF_LEN((*endofchain)) += len;
6516 /* now we need another one */
6519 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6524 SCTP_BUF_NEXT((*endofchain)) = m;
6526 cp = mtod((*endofchain), caddr_t);
6527 m_copydata(clonechain, len, sizeofcpy, cp);
6528 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6532 /* copy the old fashion way */
6533 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6534 #ifdef SCTP_MBUF_LOGGING
6535 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6536 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6541 if (appendchain == NULL) {
6544 sctp_m_freem(outchain);
6548 /* tack on to the end */
6549 if (*endofchain != NULL) {
6550 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6554 if (SCTP_BUF_NEXT(m) == NULL) {
6555 SCTP_BUF_NEXT(m) = appendchain;
6558 m = SCTP_BUF_NEXT(m);
6562 * save off the end and update the end-chain position
6566 if (SCTP_BUF_NEXT(m) == NULL) {
6570 m = SCTP_BUF_NEXT(m);
6574 /* save off the end and update the end-chain position */
6577 if (SCTP_BUF_NEXT(m) == NULL) {
6581 m = SCTP_BUF_NEXT(m);
6583 return (appendchain);
6588 sctp_med_chunk_output(struct sctp_inpcb *inp,
6589 struct sctp_tcb *stcb,
6590 struct sctp_association *asoc,
6593 int control_only, int from_where,
6594 struct timeval *now, int *now_filled, int frag_point, int so_locked
6595 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6601 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6602 uint32_t val SCTP_UNUSED)
6604 struct sctp_copy_all *ca;
6607 int added_control = 0;
6608 int un_sent, do_chunk_output = 1;
6609 struct sctp_association *asoc;
6610 struct sctp_nets *net;
6612 ca = (struct sctp_copy_all *)ptr;
6613 if (ca->m == NULL) {
6616 if (ca->inp != inp) {
6620 if (ca->sndlen > 0) {
6621 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6623 /* can't copy so we are done */
6627 #ifdef SCTP_MBUF_LOGGING
6628 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6629 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6635 SCTP_TCB_LOCK_ASSERT(stcb);
6636 if (stcb->asoc.alternate) {
6637 net = stcb->asoc.alternate;
6639 net = stcb->asoc.primary_destination;
6641 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6642 /* Abort this assoc with m as the user defined reason */
6644 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6646 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6647 0, M_NOWAIT, 1, MT_DATA);
6648 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6651 struct sctp_paramhdr *ph;
6653 ph = mtod(m, struct sctp_paramhdr *);
6654 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6655 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
6658 * We add one here to keep the assoc from dis-appearing on
6661 atomic_add_int(&stcb->asoc.refcnt, 1);
6662 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6664 * sctp_abort_an_association calls sctp_free_asoc() free
6665 * association will NOT free it since we incremented the
6666 * refcnt .. we do this to prevent it being freed and things
6667 * getting tricky since we could end up (from free_asoc)
6668 * calling inpcb_free which would get a recursive lock call
6669 * to the iterator lock.. But as a consequence of that the
6670 * stcb will return to us un-locked.. since free_asoc
6671 * returns with either no TCB or the TCB unlocked, we must
6672 * relock.. to unlock in the iterator timer :-0
6674 SCTP_TCB_LOCK(stcb);
6675 atomic_add_int(&stcb->asoc.refcnt, -1);
6676 goto no_chunk_output;
6679 ret = sctp_msg_append(stcb, net, m,
6683 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6684 /* shutdown this assoc */
6685 if (TAILQ_EMPTY(&asoc->send_queue) &&
6686 TAILQ_EMPTY(&asoc->sent_queue) &&
6687 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
6688 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6692 * there is nothing queued to send, so I'm
6695 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
6696 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6697 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6699 * only send SHUTDOWN the first time
6702 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
6703 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6705 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
6706 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
6707 sctp_stop_timers_for_shutdown(stcb);
6708 sctp_send_shutdown(stcb, net);
6709 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6711 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6712 asoc->primary_destination);
6714 do_chunk_output = 0;
6718 * we still got (or just got) data to send,
6719 * so set SHUTDOWN_PENDING
6722 * XXX sockets draft says that SCTP_EOF
6723 * should be sent with no data. currently,
6724 * we will allow user data to be sent first
6725 * and move to SHUTDOWN-PENDING
6727 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
6728 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6729 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6730 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6731 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
6733 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
6734 if (TAILQ_EMPTY(&asoc->send_queue) &&
6735 TAILQ_EMPTY(&asoc->sent_queue) &&
6736 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6737 struct mbuf *op_err;
6738 char msg[SCTP_DIAG_INFO_LEN];
6741 snprintf(msg, sizeof(msg),
6742 "%s:%d at %s", __FILE__, __LINE__, __func__);
6743 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6745 atomic_add_int(&stcb->asoc.refcnt, 1);
6746 sctp_abort_an_association(stcb->sctp_ep, stcb,
6747 op_err, SCTP_SO_NOT_LOCKED);
6748 atomic_add_int(&stcb->asoc.refcnt, -1);
6749 goto no_chunk_output;
6751 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6752 asoc->primary_destination);
6758 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6759 (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
6761 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6762 (stcb->asoc.total_flight > 0) &&
6763 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6764 do_chunk_output = 0;
6766 if (do_chunk_output)
6767 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6768 else if (added_control) {
6769 int num_out, reason, now_filled = 0;
6773 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6774 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6775 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6786 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6788 struct sctp_copy_all *ca;
6790 ca = (struct sctp_copy_all *)ptr;
6792 * Do a notify here? Kacheong suggests that the notify be done at
6793 * the send time.. so you would push up a notification if any send
6794 * failed. Don't know if this is feasible since the only failures we
6795 * have is "memory" related and if you cannot get an mbuf to send
6796 * the data you surely can't get an mbuf to send up to notify the
6797 * user you can't send the data :->
6800 /* now free everything */
6802 /* Lets clear the flag to allow others to run. */
6803 ca->inp->sctp_flags &= ~SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6805 sctp_m_freem(ca->m);
6806 SCTP_FREE(ca, SCTP_M_COPYAL);
6809 static struct mbuf *
6810 sctp_copy_out_all(struct uio *uio, ssize_t len)
6812 struct mbuf *ret, *at;
6813 ssize_t left, willcpy, cancpy, error;
6815 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6821 SCTP_BUF_LEN(ret) = 0;
6822 /* save space for the data chunk header */
6823 cancpy = (int)M_TRAILINGSPACE(ret);
6824 willcpy = min(cancpy, left);
6827 /* Align data to the end */
6828 error = uiomove(mtod(at, caddr_t), (int)willcpy, uio);
6834 SCTP_BUF_LEN(at) = (int)willcpy;
6835 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6838 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg((unsigned int)left, 0, M_WAITOK, 1, MT_DATA);
6839 if (SCTP_BUF_NEXT(at) == NULL) {
6842 at = SCTP_BUF_NEXT(at);
6843 SCTP_BUF_LEN(at) = 0;
6844 cancpy = (int)M_TRAILINGSPACE(at);
6845 willcpy = min(cancpy, left);
6852 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6853 struct sctp_sndrcvinfo *srcv)
6856 struct sctp_copy_all *ca;
6858 if (inp->sctp_flags & SCTP_PCB_FLAGS_SND_ITERATOR_UP) {
6859 /* There is another. */
6862 if (uio->uio_resid > SCTP_MAX_SENDALL_LIMIT) {
6863 /* You must be less than the max! */
6866 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6870 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6873 memset(ca, 0, sizeof(struct sctp_copy_all));
6877 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6880 * take off the sendall flag, it would be bad if we failed to do
6883 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6884 /* get length and mbuf chain */
6886 ca->sndlen = uio->uio_resid;
6887 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6888 if (ca->m == NULL) {
6889 SCTP_FREE(ca, SCTP_M_COPYAL);
6890 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6894 /* Gather the length of the send */
6898 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6899 ca->sndlen += SCTP_BUF_LEN(mat);
6902 inp->sctp_flags |= SCTP_PCB_FLAGS_SND_ITERATOR_UP;
6903 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6904 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6905 SCTP_ASOC_ANY_STATE,
6907 sctp_sendall_completes, inp, 1);
6909 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6910 SCTP_FREE(ca, SCTP_M_COPYAL);
6911 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6919 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6921 struct sctp_tmit_chunk *chk, *nchk;
6923 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6924 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6925 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6926 asoc->ctrl_queue_cnt--;
6928 sctp_m_freem(chk->data);
6931 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6937 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6939 struct sctp_association *asoc;
6940 struct sctp_tmit_chunk *chk, *nchk;
6941 struct sctp_asconf_chunk *acp;
6944 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6945 /* find SCTP_ASCONF chunk in queue */
6946 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6948 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6949 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6954 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6955 asoc->ctrl_queue_cnt--;
6957 sctp_m_freem(chk->data);
6960 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6967 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6968 struct sctp_association *asoc,
6969 struct sctp_tmit_chunk **data_list,
6971 struct sctp_nets *net)
6974 struct sctp_tmit_chunk *tp1;
6976 for (i = 0; i < bundle_at; i++) {
6977 /* off of the send queue */
6978 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6979 asoc->send_queue_cnt--;
6982 * Any chunk NOT 0 you zap the time chunk 0 gets
6983 * zapped or set based on if a RTO measurment is
6986 data_list[i]->do_rtt = 0;
6989 data_list[i]->sent_rcv_time = net->last_sent_time;
6990 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6991 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
6992 if (data_list[i]->whoTo == NULL) {
6993 data_list[i]->whoTo = net;
6994 atomic_add_int(&net->ref_count, 1);
6996 /* on to the sent queue */
6997 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6998 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
6999 struct sctp_tmit_chunk *tpp;
7001 /* need to move back */
7003 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
7005 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
7009 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
7012 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
7014 TAILQ_INSERT_TAIL(&asoc->sent_queue,
7019 /* This does not lower until the cum-ack passes it */
7020 asoc->sent_queue_cnt++;
7021 if ((asoc->peers_rwnd <= 0) &&
7022 (asoc->total_flight == 0) &&
7024 /* Mark the chunk as being a window probe */
7025 SCTP_STAT_INCR(sctps_windowprobed);
7027 #ifdef SCTP_AUDITING_ENABLED
7028 sctp_audit_log(0xC2, 3);
7030 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7031 data_list[i]->snd_count = 1;
7032 data_list[i]->rec.data.chunk_was_revoked = 0;
7033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7034 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7035 data_list[i]->whoTo->flight_size,
7036 data_list[i]->book_size,
7037 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7038 data_list[i]->rec.data.tsn);
7040 sctp_flight_size_increase(data_list[i]);
7041 sctp_total_flight_increase(stcb, data_list[i]);
7042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7043 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7044 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7046 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7047 (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7048 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7049 /* SWS sender side engages */
7050 asoc->peers_rwnd = 0;
7053 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7054 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7059 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7060 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7065 struct sctp_tmit_chunk *chk, *nchk;
7067 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7068 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7069 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7070 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7071 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7072 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7073 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7074 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7075 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7076 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7077 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7078 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7079 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7080 /* Stray chunks must be cleaned up */
7082 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7083 asoc->ctrl_queue_cnt--;
7085 sctp_m_freem(chk->data);
7088 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7089 asoc->fwd_tsn_cnt--;
7091 sctp_free_a_chunk(stcb, chk, so_locked);
7092 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7093 /* special handling, we must look into the param */
7094 if (chk != asoc->str_reset) {
7095 goto clean_up_anyway;
7102 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7103 uint32_t space_left, uint32_t frag_point, int eeor_on)
7106 * Make a decision on if I should split a msg into multiple parts.
7107 * This is only asked of incomplete messages.
7111 * If we are doing EEOR we need to always send it if its the
7112 * entire thing, since it might be all the guy is putting in
7115 if (space_left >= length) {
7117 * If we have data outstanding,
7118 * we get another chance when the sack
7119 * arrives to transmit - wait for more data
7121 if (stcb->asoc.total_flight == 0) {
7123 * If nothing is in flight, we zero the
7131 /* You can fill the rest */
7132 return (space_left);
7136 * For those strange folk that make the send buffer
7137 * smaller than our fragmentation point, we can't
7138 * get a full msg in so we have to allow splitting.
7140 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7143 if ((length <= space_left) ||
7144 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7145 /* Sub-optimial residual don't split in non-eeor mode. */
7149 * If we reach here length is larger than the space_left. Do we wish
7150 * to split it for the sake of packet putting together?
7152 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7153 /* Its ok to split it */
7154 return (min(space_left, frag_point));
7156 /* Nope, can't split */
7161 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7162 struct sctp_stream_out *strq,
7163 uint32_t space_left,
7164 uint32_t frag_point,
7169 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7174 /* Move from the stream to the send_queue keeping track of the total */
7175 struct sctp_association *asoc;
7176 struct sctp_stream_queue_pending *sp;
7177 struct sctp_tmit_chunk *chk;
7178 struct sctp_data_chunk *dchkh = NULL;
7179 struct sctp_idata_chunk *ndchkh = NULL;
7180 uint32_t to_move, length;
7182 uint8_t rcv_flags = 0;
7184 uint8_t send_lock_up = 0;
7186 SCTP_TCB_LOCK_ASSERT(stcb);
7189 /* sa_ignore FREED_MEMORY */
7190 sp = TAILQ_FIRST(&strq->outqueue);
7192 if (send_lock_up == 0) {
7193 SCTP_TCB_SEND_LOCK(stcb);
7196 sp = TAILQ_FIRST(&strq->outqueue);
7200 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7201 (stcb->asoc.idata_supported == 0) &&
7202 (strq->last_msg_incomplete)) {
7203 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7205 strq->last_msg_incomplete);
7206 strq->last_msg_incomplete = 0;
7210 SCTP_TCB_SEND_UNLOCK(stcb);
7215 if ((sp->msg_is_complete) && (sp->length == 0)) {
7216 if (sp->sender_all_done) {
7218 * We are doing differed cleanup. Last time through
7219 * when we took all the data the sender_all_done was
7222 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7223 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7224 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7225 sp->sender_all_done,
7227 sp->msg_is_complete,
7231 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7232 SCTP_TCB_SEND_LOCK(stcb);
7235 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7236 TAILQ_REMOVE(&strq->outqueue, sp, next);
7237 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7238 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7239 (strq->chunks_on_queues == 0) &&
7240 TAILQ_EMPTY(&strq->outqueue)) {
7241 stcb->asoc.trigger_reset = 1;
7244 sctp_free_remote_addr(sp->net);
7248 sctp_m_freem(sp->data);
7251 sctp_free_a_strmoq(stcb, sp, so_locked);
7252 /* we can't be locked to it */
7254 SCTP_TCB_SEND_UNLOCK(stcb);
7257 /* back to get the next msg */
7261 * sender just finished this but still holds a
7269 /* is there some to get */
7270 if (sp->length == 0) {
7275 } else if (sp->discard_rest) {
7276 if (send_lock_up == 0) {
7277 SCTP_TCB_SEND_LOCK(stcb);
7280 /* Whack down the size */
7281 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7282 if ((stcb->sctp_socket != NULL) &&
7283 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7284 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7285 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7288 sctp_m_freem(sp->data);
7290 sp->tail_mbuf = NULL;
7299 some_taken = sp->some_taken;
7301 length = sp->length;
7302 if (sp->msg_is_complete) {
7303 /* The message is complete */
7304 to_move = min(length, frag_point);
7305 if (to_move == length) {
7306 /* All of it fits in the MTU */
7307 if (sp->some_taken) {
7308 rcv_flags |= SCTP_DATA_LAST_FRAG;
7310 rcv_flags |= SCTP_DATA_NOT_FRAG;
7312 sp->put_last_out = 1;
7313 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7314 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7317 /* Not all of it fits, we fragment */
7318 if (sp->some_taken == 0) {
7319 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7324 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7327 * We use a snapshot of length in case it
7328 * is expanding during the compare.
7333 if (to_move >= llen) {
7335 if (send_lock_up == 0) {
7337 * We are taking all of an incomplete msg
7338 * thus we need a send lock.
7340 SCTP_TCB_SEND_LOCK(stcb);
7342 if (sp->msg_is_complete) {
7344 * the sender finished the
7351 if (sp->some_taken == 0) {
7352 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7356 /* Nothing to take. */
7363 /* If we reach here, we can copy out a chunk */
7364 sctp_alloc_a_chunk(stcb, chk);
7366 /* No chunk memory */
7372 * Setup for unordered if needed by looking at the user sent info
7375 if (sp->sinfo_flags & SCTP_UNORDERED) {
7376 rcv_flags |= SCTP_DATA_UNORDERED;
7378 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7379 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7380 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7382 /* clear out the chunk before setting up */
7383 memset(chk, 0, sizeof(*chk));
7384 chk->rec.data.rcv_flags = rcv_flags;
7386 if (to_move >= length) {
7387 /* we think we can steal the whole thing */
7388 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7389 SCTP_TCB_SEND_LOCK(stcb);
7392 if (to_move < sp->length) {
7393 /* bail, it changed */
7396 chk->data = sp->data;
7397 chk->last_mbuf = sp->tail_mbuf;
7398 /* register the stealing */
7399 sp->data = sp->tail_mbuf = NULL;
7404 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7405 chk->last_mbuf = NULL;
7406 if (chk->data == NULL) {
7407 sp->some_taken = some_taken;
7408 sctp_free_a_chunk(stcb, chk, so_locked);
7413 #ifdef SCTP_MBUF_LOGGING
7414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7415 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7418 /* Pull off the data */
7419 m_adj(sp->data, to_move);
7420 /* Now lets work our way down and compact it */
7422 while (m && (SCTP_BUF_LEN(m) == 0)) {
7423 sp->data = SCTP_BUF_NEXT(m);
7424 SCTP_BUF_NEXT(m) = NULL;
7425 if (sp->tail_mbuf == m) {
7427 * Freeing tail? TSNH since
7428 * we supposedly were taking less
7429 * than the sp->length.
7432 panic("Huh, freing tail? - TSNH");
7434 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7435 sp->tail_mbuf = sp->data = NULL;
7444 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7445 chk->copy_by_ref = 1;
7447 chk->copy_by_ref = 0;
7450 * get last_mbuf and counts of mb usage This is ugly but hopefully
7451 * its only one mbuf.
7453 if (chk->last_mbuf == NULL) {
7454 chk->last_mbuf = chk->data;
7455 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7456 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7460 if (to_move > length) {
7461 /*- This should not happen either
7462 * since we always lower to_move to the size
7463 * of sp->length if its larger.
7466 panic("Huh, how can to_move be larger?");
7468 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7472 atomic_subtract_int(&sp->length, to_move);
7474 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
7475 if (M_LEADINGSPACE(chk->data) < leading) {
7476 /* Not enough room for a chunk header, get some */
7479 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 1, MT_DATA);
7482 * we're in trouble here. _PREPEND below will free
7483 * all the data if there is no leading space, so we
7484 * must put the data back and restore.
7486 if (send_lock_up == 0) {
7487 SCTP_TCB_SEND_LOCK(stcb);
7490 if (sp->data == NULL) {
7491 /* unsteal the data */
7492 sp->data = chk->data;
7493 sp->tail_mbuf = chk->last_mbuf;
7497 /* reassemble the data */
7499 sp->data = chk->data;
7500 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7502 sp->some_taken = some_taken;
7503 atomic_add_int(&sp->length, to_move);
7506 sctp_free_a_chunk(stcb, chk, so_locked);
7510 SCTP_BUF_LEN(m) = 0;
7511 SCTP_BUF_NEXT(m) = chk->data;
7513 M_ALIGN(chk->data, 4);
7516 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
7517 if (chk->data == NULL) {
7518 /* HELP, TSNH since we assured it would not above? */
7520 panic("prepend failes HELP?");
7522 SCTP_PRINTF("prepend fails HELP?\n");
7523 sctp_free_a_chunk(stcb, chk, so_locked);
7529 sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
7530 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
7531 chk->book_size_scale = 0;
7532 chk->sent = SCTP_DATAGRAM_UNSENT;
7535 chk->asoc = &stcb->asoc;
7536 chk->pad_inplace = 0;
7537 chk->no_fr_allowed = 0;
7538 if (stcb->asoc.idata_supported == 0) {
7539 if (rcv_flags & SCTP_DATA_UNORDERED) {
7540 /* Just use 0. The receiver ignores the values. */
7541 chk->rec.data.mid = 0;
7543 chk->rec.data.mid = strq->next_mid_ordered;
7544 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7545 strq->next_mid_ordered++;
7549 if (rcv_flags & SCTP_DATA_UNORDERED) {
7550 chk->rec.data.mid = strq->next_mid_unordered;
7551 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7552 strq->next_mid_unordered++;
7555 chk->rec.data.mid = strq->next_mid_ordered;
7556 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7557 strq->next_mid_ordered++;
7561 chk->rec.data.sid = sp->sid;
7562 chk->rec.data.ppid = sp->ppid;
7563 chk->rec.data.context = sp->context;
7564 chk->rec.data.doing_fast_retransmit = 0;
7566 chk->rec.data.timetodrop = sp->ts;
7567 chk->flags = sp->act_flags;
7570 chk->whoTo = sp->net;
7571 atomic_add_int(&chk->whoTo->ref_count, 1);
7575 if (sp->holds_key_ref) {
7576 chk->auth_keyid = sp->auth_keyid;
7577 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7578 chk->holds_key_ref = 1;
7580 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
7581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7582 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7583 (uint32_t)(uintptr_t)stcb, sp->length,
7584 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
7587 if (stcb->asoc.idata_supported == 0) {
7588 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7590 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7593 * Put the rest of the things in place now. Size was done earlier in
7594 * previous loop prior to padding.
7597 #ifdef SCTP_ASOCLOG_OF_TSNS
7598 SCTP_TCB_LOCK_ASSERT(stcb);
7599 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7600 asoc->tsn_out_at = 0;
7601 asoc->tsn_out_wrapped = 1;
7603 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
7604 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
7605 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
7606 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7607 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7608 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7609 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7610 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7613 if (stcb->asoc.idata_supported == 0) {
7614 dchkh->ch.chunk_type = SCTP_DATA;
7615 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7616 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
7617 dchkh->dp.sid = htons(strq->sid);
7618 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
7619 dchkh->dp.ppid = chk->rec.data.ppid;
7620 dchkh->ch.chunk_length = htons(chk->send_size);
7622 ndchkh->ch.chunk_type = SCTP_IDATA;
7623 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7624 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
7625 ndchkh->dp.sid = htons(strq->sid);
7626 ndchkh->dp.reserved = htons(0);
7627 ndchkh->dp.mid = htonl(chk->rec.data.mid);
7629 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
7631 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
7633 ndchkh->ch.chunk_length = htons(chk->send_size);
7635 /* Now advance the chk->send_size by the actual pad needed. */
7636 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7641 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7642 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7644 chk->last_mbuf = lm;
7645 chk->pad_inplace = 1;
7647 chk->send_size += pads;
7649 if (PR_SCTP_ENABLED(chk->flags)) {
7650 asoc->pr_sctp_cnt++;
7652 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7653 /* All done pull and kill the message */
7654 if (sp->put_last_out == 0) {
7655 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7656 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7657 sp->sender_all_done,
7659 sp->msg_is_complete,
7663 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7664 SCTP_TCB_SEND_LOCK(stcb);
7667 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7668 TAILQ_REMOVE(&strq->outqueue, sp, next);
7669 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7670 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7671 (strq->chunks_on_queues == 0) &&
7672 TAILQ_EMPTY(&strq->outqueue)) {
7673 stcb->asoc.trigger_reset = 1;
7676 sctp_free_remote_addr(sp->net);
7680 sctp_m_freem(sp->data);
7683 sctp_free_a_strmoq(stcb, sp, so_locked);
7685 asoc->chunks_on_out_queue++;
7686 strq->chunks_on_queues++;
7687 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7688 asoc->send_queue_cnt++;
7691 SCTP_TCB_SEND_UNLOCK(stcb);
7698 sctp_fill_outqueue(struct sctp_tcb *stcb,
7699 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7700 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7705 struct sctp_association *asoc;
7706 struct sctp_stream_out *strq;
7707 uint32_t space_left, moved, total_moved;
7710 SCTP_TCB_LOCK_ASSERT(stcb);
7713 switch (net->ro._l_addr.sa.sa_family) {
7716 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
7721 space_left = net->mtu - SCTP_MIN_OVERHEAD;
7726 space_left = net->mtu;
7729 /* Need an allowance for the data chunk header too */
7730 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7732 /* must make even word boundary */
7733 space_left &= 0xfffffffc;
7734 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7737 while ((space_left > 0) && (strq != NULL)) {
7738 moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
7739 &giveup, eeor_mode, &bail, so_locked);
7740 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
7741 if ((giveup != 0) || (bail != 0)) {
7744 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7745 total_moved += moved;
7746 space_left -= moved;
7747 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
7748 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7752 space_left &= 0xfffffffc;
7757 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7759 if (total_moved == 0) {
7760 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7761 (net == stcb->asoc.primary_destination)) {
7762 /* ran dry for primary network net */
7763 SCTP_STAT_INCR(sctps_primary_randry);
7764 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7765 /* ran dry with CMT on */
7766 SCTP_STAT_INCR(sctps_cmt_randry);
7772 sctp_fix_ecn_echo(struct sctp_association *asoc)
7774 struct sctp_tmit_chunk *chk;
7776 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7777 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7778 chk->sent = SCTP_DATAGRAM_UNSENT;
7784 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7786 struct sctp_association *asoc;
7787 struct sctp_tmit_chunk *chk;
7788 struct sctp_stream_queue_pending *sp;
7795 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7796 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7797 if (sp->net == net) {
7798 sctp_free_remote_addr(sp->net);
7803 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7804 if (chk->whoTo == net) {
7805 sctp_free_remote_addr(chk->whoTo);
7812 sctp_med_chunk_output(struct sctp_inpcb *inp,
7813 struct sctp_tcb *stcb,
7814 struct sctp_association *asoc,
7817 int control_only, int from_where,
7818 struct timeval *now, int *now_filled, int frag_point, int so_locked
7819 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7825 * Ok this is the generic chunk service queue. we must do the
7827 * - Service the stream queue that is next, moving any
7828 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7829 * LAST to the out queue in one pass) and assigning TSN's. This
7830 * only applys though if the peer does not support NDATA. For NDATA
7831 * chunks its ok to not send the entire message ;-)
7832 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
7833 * fomulate and send the low level chunks. Making sure to combine
7834 * any control in the control chunk queue also.
7836 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7837 struct mbuf *outchain, *endoutchain;
7838 struct sctp_tmit_chunk *chk, *nchk;
7840 /* temp arrays for unlinking */
7841 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7842 int no_fragmentflg, error;
7843 unsigned int max_rwnd_per_dest, max_send_per_dest;
7844 int one_chunk, hbflag, skip_data_for_this_net;
7845 int asconf, cookie, no_out_cnt;
7846 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7847 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7849 uint32_t auth_offset = 0;
7850 struct sctp_auth_chunk *auth = NULL;
7851 uint16_t auth_keyid;
7852 int override_ok = 1;
7853 int skip_fill_up = 0;
7854 int data_auth_reqd = 0;
7857 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7864 auth_keyid = stcb->asoc.authinfo.active_keyid;
7865 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7866 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7867 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7872 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7874 * First lets prime the pump. For each destination, if there is room
7875 * in the flight size, attempt to pull an MTU's worth out of the
7876 * stream queues into the general send_queue
7878 #ifdef SCTP_AUDITING_ENABLED
7879 sctp_audit_log(0xC2, 2);
7881 SCTP_TCB_LOCK_ASSERT(stcb);
7888 /* Nothing to possible to send? */
7889 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7890 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7891 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7892 TAILQ_EMPTY(&asoc->send_queue) &&
7893 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
7898 if (asoc->peers_rwnd == 0) {
7899 /* No room in peers rwnd */
7901 if (asoc->total_flight > 0) {
7902 /* we are allowed one chunk in flight */
7906 if (stcb->asoc.ecn_echo_cnt_onq) {
7907 /* Record where a sack goes, if any */
7908 if (no_data_chunks &&
7909 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7910 /* Nothing but ECNe to send - we don't do that */
7911 goto nothing_to_send;
7913 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7914 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7915 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7916 sack_goes_to = chk->whoTo;
7921 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7922 if (stcb->sctp_socket)
7923 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7925 max_send_per_dest = 0;
7926 if (no_data_chunks == 0) {
7927 /* How many non-directed chunks are there? */
7928 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7929 if (chk->whoTo == NULL) {
7931 * We already have non-directed chunks on
7932 * the queue, no need to do a fill-up.
7940 if ((no_data_chunks == 0) &&
7941 (skip_fill_up == 0) &&
7942 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7943 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7945 * This for loop we are in takes in each net, if
7946 * its's got space in cwnd and has data sent to it
7947 * (when CMT is off) then it calls
7948 * sctp_fill_outqueue for the net. This gets data on
7949 * the send queue for that network.
7951 * In sctp_fill_outqueue TSN's are assigned and data
7952 * is copied out of the stream buffers. Note mostly
7953 * copy by reference (we hope).
7955 net->window_probe = 0;
7956 if ((net != stcb->asoc.alternate) &&
7957 ((net->dest_state & SCTP_ADDR_PF) ||
7958 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7959 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7960 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7961 sctp_log_cwnd(stcb, net, 1,
7962 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7966 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7967 (net->flight_size == 0)) {
7968 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7970 if (net->flight_size >= net->cwnd) {
7971 /* skip this network, no room - can't fill */
7972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7973 sctp_log_cwnd(stcb, net, 3,
7974 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7978 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7979 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7981 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7983 /* memory alloc failure */
7989 /* now service each destination and send out what we can for it */
7990 /* Nothing to send? */
7991 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7992 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7993 TAILQ_EMPTY(&asoc->send_queue)) {
7998 if (asoc->sctp_cmt_on_off > 0) {
7999 /* get the last start point */
8000 start_at = asoc->last_net_cmt_send_started;
8001 if (start_at == NULL) {
8002 /* null so to beginning */
8003 start_at = TAILQ_FIRST(&asoc->nets);
8005 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
8006 if (start_at == NULL) {
8007 start_at = TAILQ_FIRST(&asoc->nets);
8010 asoc->last_net_cmt_send_started = start_at;
8012 start_at = TAILQ_FIRST(&asoc->nets);
8014 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8015 if (chk->whoTo == NULL) {
8016 if (asoc->alternate) {
8017 chk->whoTo = asoc->alternate;
8019 chk->whoTo = asoc->primary_destination;
8021 atomic_add_int(&chk->whoTo->ref_count, 1);
8024 old_start_at = NULL;
8025 again_one_more_time:
8026 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
8027 /* how much can we send? */
8028 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8029 if (old_start_at && (old_start_at == net)) {
8030 /* through list ocmpletely. */
8034 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8035 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8036 (net->flight_size >= net->cwnd)) {
8038 * Nothing on control or asconf and flight is full,
8039 * we can skip even in the CMT case.
8044 endoutchain = outchain = NULL;
8047 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8048 skip_data_for_this_net = 1;
8050 skip_data_for_this_net = 0;
8052 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8055 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8060 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8070 if (mtu > asoc->peers_rwnd) {
8071 if (asoc->total_flight > 0) {
8072 /* We have a packet in flight somewhere */
8073 r_mtu = asoc->peers_rwnd;
8075 /* We are always allowed to send one MTU out */
8083 /************************/
8084 /* ASCONF transmission */
8085 /************************/
8086 /* Now first lets go through the asconf queue */
8087 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8088 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8091 if (chk->whoTo == NULL) {
8092 if (asoc->alternate == NULL) {
8093 if (asoc->primary_destination != net) {
8097 if (asoc->alternate != net) {
8102 if (chk->whoTo != net) {
8106 if (chk->data == NULL) {
8109 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8110 chk->sent != SCTP_DATAGRAM_RESEND) {
8114 * if no AUTH is yet included and this chunk
8115 * requires it, make sure to account for it. We
8116 * don't apply the size until the AUTH chunk is
8117 * actually added below in case there is no room for
8118 * this chunk. NOTE: we overload the use of "omtu"
8121 if ((auth == NULL) &&
8122 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8123 stcb->asoc.peer_auth_chunks)) {
8124 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8127 /* Here we do NOT factor the r_mtu */
8128 if ((chk->send_size < (int)(mtu - omtu)) ||
8129 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8131 * We probably should glom the mbuf chain
8132 * from the chk->data for control but the
8133 * problem is it becomes yet one more level
8134 * of tracking to do if for some reason
8135 * output fails. Then I have got to
8136 * reconstruct the merged control chain.. el
8137 * yucko.. for now we take the easy way and
8141 * Add an AUTH chunk, if chunk requires it
8142 * save the offset into the chain for AUTH
8144 if ((auth == NULL) &&
8145 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8146 stcb->asoc.peer_auth_chunks))) {
8147 outchain = sctp_add_auth_chunk(outchain,
8152 chk->rec.chunk_id.id);
8153 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8155 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8156 (int)chk->rec.chunk_id.can_take_data,
8157 chk->send_size, chk->copy_by_ref);
8158 if (outchain == NULL) {
8160 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8163 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8164 /* update our MTU size */
8165 if (mtu > (chk->send_size + omtu))
8166 mtu -= (chk->send_size + omtu);
8169 to_out += (chk->send_size + omtu);
8170 /* Do clear IP_DF ? */
8171 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8174 if (chk->rec.chunk_id.can_take_data)
8177 * set hb flag since we can use these for
8183 * should sysctl this: don't bundle data
8184 * with ASCONF since it requires AUTH
8187 chk->sent = SCTP_DATAGRAM_SENT;
8188 if (chk->whoTo == NULL) {
8190 atomic_add_int(&net->ref_count, 1);
8195 * Ok we are out of room but we can
8196 * output without effecting the
8197 * flight size since this little guy
8198 * is a control only packet.
8200 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8202 * do NOT clear the asconf flag as
8203 * it is used to do appropriate
8204 * source address selection.
8206 if (*now_filled == 0) {
8207 (void)SCTP_GETTIME_TIMEVAL(now);
8210 net->last_sent_time = *now;
8212 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8213 (struct sockaddr *)&net->ro._l_addr,
8214 outchain, auth_offset, auth,
8215 stcb->asoc.authinfo.active_keyid,
8216 no_fragmentflg, 0, asconf,
8217 inp->sctp_lport, stcb->rport,
8218 htonl(stcb->asoc.peer_vtag),
8223 * error, we could not
8226 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8227 if (from_where == 0) {
8228 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8230 if (error == ENOBUFS) {
8231 asoc->ifp_had_enobuf = 1;
8232 SCTP_STAT_INCR(sctps_lowlevelerr);
8234 /* error, could not output */
8235 if (error == EHOSTUNREACH) {
8241 sctp_move_chunks_from_net(stcb, net);
8246 asoc->ifp_had_enobuf = 0;
8249 * increase the number we sent, if a
8250 * cookie is sent we don't tell them
8253 outchain = endoutchain = NULL;
8257 *num_out += ctl_cnt;
8258 /* recalc a clean slate and setup */
8259 switch (net->ro._l_addr.sa.sa_family) {
8262 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8267 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8284 /************************/
8285 /* Control transmission */
8286 /************************/
8287 /* Now first lets go through the control queue */
8288 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8289 if ((sack_goes_to) &&
8290 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8291 (chk->whoTo != sack_goes_to)) {
8293 * if we have a sack in queue, and we are
8294 * looking at an ecn echo that is NOT queued
8295 * to where the sack is going..
8297 if (chk->whoTo == net) {
8299 * Don't transmit it to where its
8300 * going (current net)
8303 } else if (sack_goes_to == net) {
8305 * But do transmit it to this
8308 goto skip_net_check;
8311 if (chk->whoTo == NULL) {
8312 if (asoc->alternate == NULL) {
8313 if (asoc->primary_destination != net) {
8317 if (asoc->alternate != net) {
8322 if (chk->whoTo != net) {
8327 if (chk->data == NULL) {
8330 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8332 * It must be unsent. Cookies and ASCONF's
8333 * hang around but there timers will force
8334 * when marked for resend.
8339 * if no AUTH is yet included and this chunk
8340 * requires it, make sure to account for it. We
8341 * don't apply the size until the AUTH chunk is
8342 * actually added below in case there is no room for
8343 * this chunk. NOTE: we overload the use of "omtu"
8346 if ((auth == NULL) &&
8347 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8348 stcb->asoc.peer_auth_chunks)) {
8349 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8352 /* Here we do NOT factor the r_mtu */
8353 if ((chk->send_size <= (int)(mtu - omtu)) ||
8354 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8356 * We probably should glom the mbuf chain
8357 * from the chk->data for control but the
8358 * problem is it becomes yet one more level
8359 * of tracking to do if for some reason
8360 * output fails. Then I have got to
8361 * reconstruct the merged control chain.. el
8362 * yucko.. for now we take the easy way and
8366 * Add an AUTH chunk, if chunk requires it
8367 * save the offset into the chain for AUTH
8369 if ((auth == NULL) &&
8370 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8371 stcb->asoc.peer_auth_chunks))) {
8372 outchain = sctp_add_auth_chunk(outchain,
8377 chk->rec.chunk_id.id);
8378 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8380 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8381 (int)chk->rec.chunk_id.can_take_data,
8382 chk->send_size, chk->copy_by_ref);
8383 if (outchain == NULL) {
8385 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8388 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8389 /* update our MTU size */
8390 if (mtu > (chk->send_size + omtu))
8391 mtu -= (chk->send_size + omtu);
8394 to_out += (chk->send_size + omtu);
8395 /* Do clear IP_DF ? */
8396 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8399 if (chk->rec.chunk_id.can_take_data)
8401 /* Mark things to be removed, if needed */
8402 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8403 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8404 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8405 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8406 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8407 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8408 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8409 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8410 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8411 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8412 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8413 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8416 /* remove these chunks at the end */
8417 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8418 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8419 /* turn off the timer */
8420 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8421 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8423 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8429 * Other chunks, since they have
8430 * timers running (i.e. COOKIE) we
8431 * just "trust" that it gets sent or
8435 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8438 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8440 * Increment ecne send count
8441 * here this means we may be
8442 * over-zealous in our
8443 * counting if the send
8444 * fails, but its the best
8445 * place to do it (we used
8446 * to do it in the queue of
8447 * the chunk, but that did
8448 * not tell how many times
8451 SCTP_STAT_INCR(sctps_sendecne);
8453 chk->sent = SCTP_DATAGRAM_SENT;
8454 if (chk->whoTo == NULL) {
8456 atomic_add_int(&net->ref_count, 1);
8462 * Ok we are out of room but we can
8463 * output without effecting the
8464 * flight size since this little guy
8465 * is a control only packet.
8468 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8470 * do NOT clear the asconf
8471 * flag as it is used to do
8472 * appropriate source
8473 * address selection.
8477 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8480 /* Only HB or ASCONF advances time */
8482 if (*now_filled == 0) {
8483 (void)SCTP_GETTIME_TIMEVAL(now);
8486 net->last_sent_time = *now;
8489 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8490 (struct sockaddr *)&net->ro._l_addr,
8493 stcb->asoc.authinfo.active_keyid,
8494 no_fragmentflg, 0, asconf,
8495 inp->sctp_lport, stcb->rport,
8496 htonl(stcb->asoc.peer_vtag),
8501 * error, we could not
8504 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8505 if (from_where == 0) {
8506 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8508 if (error == ENOBUFS) {
8509 asoc->ifp_had_enobuf = 1;
8510 SCTP_STAT_INCR(sctps_lowlevelerr);
8512 if (error == EHOSTUNREACH) {
8518 sctp_move_chunks_from_net(stcb, net);
8523 asoc->ifp_had_enobuf = 0;
8526 * increase the number we sent, if a
8527 * cookie is sent we don't tell them
8530 outchain = endoutchain = NULL;
8534 *num_out += ctl_cnt;
8535 /* recalc a clean slate and setup */
8536 switch (net->ro._l_addr.sa.sa_family) {
8539 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8544 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8561 /* JRI: if dest is in PF state, do not send data to it */
8562 if ((asoc->sctp_cmt_on_off > 0) &&
8563 (net != stcb->asoc.alternate) &&
8564 (net->dest_state & SCTP_ADDR_PF)) {
8567 if (net->flight_size >= net->cwnd) {
8570 if ((asoc->sctp_cmt_on_off > 0) &&
8571 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8572 (net->flight_size > max_rwnd_per_dest)) {
8576 * We need a specific accounting for the usage of the send
8577 * buffer. We also need to check the number of messages per
8578 * net. For now, this is better than nothing and it disabled
8581 if ((asoc->sctp_cmt_on_off > 0) &&
8582 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8583 (max_send_per_dest > 0) &&
8584 (net->flight_size > max_send_per_dest)) {
8587 /*********************/
8588 /* Data transmission */
8589 /*********************/
8591 * if AUTH for DATA is required and no AUTH has been added
8592 * yet, account for this in the mtu now... if no data can be
8593 * bundled, this adjustment won't matter anyways since the
8594 * packet will be going out...
8596 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8597 stcb->asoc.peer_auth_chunks);
8598 if (data_auth_reqd && (auth == NULL)) {
8599 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8601 /* now lets add any data within the MTU constraints */
8602 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8605 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8606 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8613 if (net->mtu > SCTP_MIN_OVERHEAD)
8614 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8624 if ((((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
8625 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8626 (skip_data_for_this_net == 0)) ||
8628 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8629 if (no_data_chunks) {
8630 /* let only control go out */
8634 if (net->flight_size >= net->cwnd) {
8635 /* skip this net, no room for data */
8639 if ((chk->whoTo != NULL) &&
8640 (chk->whoTo != net)) {
8641 /* Don't send the chunk on this net */
8645 if (asoc->sctp_cmt_on_off == 0) {
8646 if ((asoc->alternate) &&
8647 (asoc->alternate != net) &&
8648 (chk->whoTo == NULL)) {
8650 } else if ((net != asoc->primary_destination) &&
8651 (asoc->alternate == NULL) &&
8652 (chk->whoTo == NULL)) {
8656 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8658 * strange, we have a chunk that is
8659 * to big for its destination and
8660 * yet no fragment ok flag.
8661 * Something went wrong when the
8662 * PMTU changed...we did not mark
8663 * this chunk for some reason?? I
8664 * will fix it here by letting IP
8665 * fragment it for now and printing
8666 * a warning. This really should not
8669 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8670 chk->send_size, mtu);
8671 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8673 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8674 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
8675 struct sctp_data_chunk *dchkh;
8677 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8678 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8680 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8681 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8682 /* ok we will add this one */
8685 * Add an AUTH chunk, if chunk
8686 * requires it, save the offset into
8687 * the chain for AUTH
8689 if (data_auth_reqd) {
8691 outchain = sctp_add_auth_chunk(outchain,
8697 auth_keyid = chk->auth_keyid;
8699 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8700 } else if (override_ok) {
8705 auth_keyid = chk->auth_keyid;
8707 } else if (auth_keyid != chk->auth_keyid) {
8715 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8716 chk->send_size, chk->copy_by_ref);
8717 if (outchain == NULL) {
8718 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8719 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8720 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8723 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8726 /* upate our MTU size */
8727 /* Do clear IP_DF ? */
8728 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8731 /* unsigned subtraction of mtu */
8732 if (mtu > chk->send_size)
8733 mtu -= chk->send_size;
8736 /* unsigned subtraction of r_mtu */
8737 if (r_mtu > chk->send_size)
8738 r_mtu -= chk->send_size;
8742 to_out += chk->send_size;
8743 if ((to_out > mx_mtu) && no_fragmentflg) {
8745 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8747 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8751 chk->window_probe = 0;
8752 data_list[bundle_at++] = chk;
8753 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8756 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8757 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8758 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8760 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8762 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8763 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8773 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8775 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8776 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8777 data_list[0]->window_probe = 1;
8778 net->window_probe = 1;
8784 * Must be sent in order of the
8785 * TSN's (on a network)
8789 } /* for (chunk gather loop for this net) */
8790 } /* if asoc.state OPEN */
8792 /* Is there something to send for this destination? */
8794 /* We may need to start a control timer or two */
8796 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8799 * do NOT clear the asconf flag as it is
8800 * used to do appropriate source address
8805 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8808 /* must start a send timer if data is being sent */
8809 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8811 * no timer running on this destination
8814 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8816 if (bundle_at || hbflag) {
8817 /* For data/asconf and hb set time */
8818 if (*now_filled == 0) {
8819 (void)SCTP_GETTIME_TIMEVAL(now);
8822 net->last_sent_time = *now;
8824 /* Now send it, if there is anything to send :> */
8825 if ((error = sctp_lowlevel_chunk_output(inp,
8828 (struct sockaddr *)&net->ro._l_addr,
8836 inp->sctp_lport, stcb->rport,
8837 htonl(stcb->asoc.peer_vtag),
8841 /* error, we could not output */
8842 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8843 if (from_where == 0) {
8844 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8846 if (error == ENOBUFS) {
8847 asoc->ifp_had_enobuf = 1;
8848 SCTP_STAT_INCR(sctps_lowlevelerr);
8850 if (error == EHOSTUNREACH) {
8852 * Destination went unreachable
8855 sctp_move_chunks_from_net(stcb, net);
8859 * I add this line to be paranoid. As far as
8860 * I can tell the continue, takes us back to
8861 * the top of the for, but just to make sure
8862 * I will reset these again here.
8864 ctl_cnt = bundle_at = 0;
8865 continue; /* This takes us back to the
8866 * for() for the nets. */
8868 asoc->ifp_had_enobuf = 0;
8874 *num_out += (ctl_cnt + bundle_at);
8877 /* setup for a RTO measurement */
8878 tsns_sent = data_list[0]->rec.data.tsn;
8879 /* fill time if not already filled */
8880 if (*now_filled == 0) {
8881 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8883 *now = asoc->time_last_sent;
8885 asoc->time_last_sent = *now;
8887 if (net->rto_needed) {
8888 data_list[0]->do_rtt = 1;
8889 net->rto_needed = 0;
8891 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8892 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8898 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8899 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8902 if (old_start_at == NULL) {
8903 old_start_at = start_at;
8904 start_at = TAILQ_FIRST(&asoc->nets);
8906 goto again_one_more_time;
8910 * At the end there should be no NON timed chunks hanging on this
8913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8914 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8916 if ((*num_out == 0) && (*reason_code == 0)) {
8921 sctp_clean_up_ctl(stcb, asoc, so_locked);
8926 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8929 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8930 * the control chunk queue.
8932 struct sctp_chunkhdr *hdr;
8933 struct sctp_tmit_chunk *chk;
8934 struct mbuf *mat, *last_mbuf;
8935 uint32_t chunk_length;
8936 uint16_t padding_length;
8938 SCTP_TCB_LOCK_ASSERT(stcb);
8939 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8940 if (op_err == NULL) {
8945 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8946 chunk_length += SCTP_BUF_LEN(mat);
8947 if (SCTP_BUF_NEXT(mat) == NULL) {
8951 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8952 sctp_m_freem(op_err);
8955 padding_length = chunk_length % 4;
8956 if (padding_length != 0) {
8957 padding_length = 4 - padding_length;
8959 if (padding_length != 0) {
8960 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8961 sctp_m_freem(op_err);
8965 sctp_alloc_a_chunk(stcb, chk);
8968 sctp_m_freem(op_err);
8971 chk->copy_by_ref = 0;
8972 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8973 chk->rec.chunk_id.can_take_data = 0;
8975 chk->send_size = (uint16_t)chunk_length;
8976 chk->sent = SCTP_DATAGRAM_UNSENT;
8978 chk->asoc = &stcb->asoc;
8981 hdr = mtod(op_err, struct sctp_chunkhdr *);
8982 hdr->chunk_type = SCTP_OPERATION_ERROR;
8983 hdr->chunk_flags = 0;
8984 hdr->chunk_length = htons(chk->send_size);
8985 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8986 chk->asoc->ctrl_queue_cnt++;
8990 sctp_send_cookie_echo(struct mbuf *m,
8992 struct sctp_tcb *stcb,
8993 struct sctp_nets *net)
8996 * pull out the cookie and put it at the front of the control chunk
9000 struct mbuf *cookie;
9001 struct sctp_paramhdr param, *phdr;
9002 struct sctp_chunkhdr *hdr;
9003 struct sctp_tmit_chunk *chk;
9004 uint16_t ptype, plen;
9006 SCTP_TCB_LOCK_ASSERT(stcb);
9007 /* First find the cookie in the param area */
9009 at = offset + sizeof(struct sctp_init_chunk);
9011 phdr = sctp_get_next_param(m, at, ¶m, sizeof(param));
9015 ptype = ntohs(phdr->param_type);
9016 plen = ntohs(phdr->param_length);
9017 if (ptype == SCTP_STATE_COOKIE) {
9020 /* found the cookie */
9021 if ((pad = (plen % 4))) {
9024 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
9025 if (cookie == NULL) {
9029 #ifdef SCTP_MBUF_LOGGING
9030 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9031 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9036 at += SCTP_SIZE32(plen);
9038 /* ok, we got the cookie lets change it into a cookie echo chunk */
9039 /* first the change from param to cookie */
9040 hdr = mtod(cookie, struct sctp_chunkhdr *);
9041 hdr->chunk_type = SCTP_COOKIE_ECHO;
9042 hdr->chunk_flags = 0;
9043 /* get the chunk stuff now and place it in the FRONT of the queue */
9044 sctp_alloc_a_chunk(stcb, chk);
9047 sctp_m_freem(cookie);
9050 chk->copy_by_ref = 0;
9051 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9052 chk->rec.chunk_id.can_take_data = 0;
9053 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9054 chk->send_size = plen;
9055 chk->sent = SCTP_DATAGRAM_UNSENT;
9057 chk->asoc = &stcb->asoc;
9060 atomic_add_int(&chk->whoTo->ref_count, 1);
9061 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9062 chk->asoc->ctrl_queue_cnt++;
9067 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9071 struct sctp_nets *net)
9074 * take a HB request and make it into a HB ack and send it.
9076 struct mbuf *outchain;
9077 struct sctp_chunkhdr *chdr;
9078 struct sctp_tmit_chunk *chk;
9082 /* must have a net pointer */
9085 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9086 if (outchain == NULL) {
9087 /* gak out of memory */
9090 #ifdef SCTP_MBUF_LOGGING
9091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9092 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9095 chdr = mtod(outchain, struct sctp_chunkhdr *);
9096 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9097 chdr->chunk_flags = 0;
9098 if (chk_length % 4) {
9100 uint32_t cpthis = 0;
9103 padlen = 4 - (chk_length % 4);
9104 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9106 sctp_alloc_a_chunk(stcb, chk);
9109 sctp_m_freem(outchain);
9112 chk->copy_by_ref = 0;
9113 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9114 chk->rec.chunk_id.can_take_data = 1;
9116 chk->send_size = chk_length;
9117 chk->sent = SCTP_DATAGRAM_UNSENT;
9119 chk->asoc = &stcb->asoc;
9120 chk->data = outchain;
9122 atomic_add_int(&chk->whoTo->ref_count, 1);
9123 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9124 chk->asoc->ctrl_queue_cnt++;
9128 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9130 /* formulate and queue a cookie-ack back to sender */
9131 struct mbuf *cookie_ack;
9132 struct sctp_chunkhdr *hdr;
9133 struct sctp_tmit_chunk *chk;
9135 SCTP_TCB_LOCK_ASSERT(stcb);
9137 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9138 if (cookie_ack == NULL) {
9142 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9143 sctp_alloc_a_chunk(stcb, chk);
9146 sctp_m_freem(cookie_ack);
9149 chk->copy_by_ref = 0;
9150 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9151 chk->rec.chunk_id.can_take_data = 1;
9153 chk->send_size = sizeof(struct sctp_chunkhdr);
9154 chk->sent = SCTP_DATAGRAM_UNSENT;
9156 chk->asoc = &stcb->asoc;
9157 chk->data = cookie_ack;
9158 if (chk->asoc->last_control_chunk_from != NULL) {
9159 chk->whoTo = chk->asoc->last_control_chunk_from;
9160 atomic_add_int(&chk->whoTo->ref_count, 1);
9164 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9165 hdr->chunk_type = SCTP_COOKIE_ACK;
9166 hdr->chunk_flags = 0;
9167 hdr->chunk_length = htons(chk->send_size);
9168 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9169 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9170 chk->asoc->ctrl_queue_cnt++;
9176 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9178 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9179 struct mbuf *m_shutdown_ack;
9180 struct sctp_shutdown_ack_chunk *ack_cp;
9181 struct sctp_tmit_chunk *chk;
9183 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9184 if (m_shutdown_ack == NULL) {
9188 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9189 sctp_alloc_a_chunk(stcb, chk);
9192 sctp_m_freem(m_shutdown_ack);
9195 chk->copy_by_ref = 0;
9196 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9197 chk->rec.chunk_id.can_take_data = 1;
9199 chk->send_size = sizeof(struct sctp_chunkhdr);
9200 chk->sent = SCTP_DATAGRAM_UNSENT;
9202 chk->asoc = &stcb->asoc;
9203 chk->data = m_shutdown_ack;
9206 atomic_add_int(&chk->whoTo->ref_count, 1);
9208 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9209 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9210 ack_cp->ch.chunk_flags = 0;
9211 ack_cp->ch.chunk_length = htons(chk->send_size);
9212 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9213 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9214 chk->asoc->ctrl_queue_cnt++;
9219 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9221 /* formulate and queue a SHUTDOWN to the sender */
9222 struct mbuf *m_shutdown;
9223 struct sctp_shutdown_chunk *shutdown_cp;
9224 struct sctp_tmit_chunk *chk;
9226 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9227 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9228 /* We already have a SHUTDOWN queued. Reuse it. */
9230 sctp_free_remote_addr(chk->whoTo);
9237 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9238 if (m_shutdown == NULL) {
9242 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9243 sctp_alloc_a_chunk(stcb, chk);
9246 sctp_m_freem(m_shutdown);
9249 chk->copy_by_ref = 0;
9250 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9251 chk->rec.chunk_id.can_take_data = 1;
9253 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9254 chk->sent = SCTP_DATAGRAM_UNSENT;
9256 chk->asoc = &stcb->asoc;
9257 chk->data = m_shutdown;
9260 atomic_add_int(&chk->whoTo->ref_count, 1);
9262 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9263 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9264 shutdown_cp->ch.chunk_flags = 0;
9265 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9266 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9267 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9268 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9269 chk->asoc->ctrl_queue_cnt++;
9271 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9274 atomic_add_int(&chk->whoTo->ref_count, 1);
9276 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9277 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9278 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9284 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9287 * formulate and queue an ASCONF to the peer. ASCONF parameters
9288 * should be queued on the assoc queue.
9290 struct sctp_tmit_chunk *chk;
9291 struct mbuf *m_asconf;
9294 SCTP_TCB_LOCK_ASSERT(stcb);
9296 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9297 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9298 /* can't send a new one if there is one in flight already */
9302 /* compose an ASCONF chunk, maximum length is PMTU */
9303 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9304 if (m_asconf == NULL) {
9308 sctp_alloc_a_chunk(stcb, chk);
9311 sctp_m_freem(m_asconf);
9315 chk->copy_by_ref = 0;
9316 chk->rec.chunk_id.id = SCTP_ASCONF;
9317 chk->rec.chunk_id.can_take_data = 0;
9318 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9319 chk->data = m_asconf;
9320 chk->send_size = len;
9321 chk->sent = SCTP_DATAGRAM_UNSENT;
9323 chk->asoc = &stcb->asoc;
9326 atomic_add_int(&chk->whoTo->ref_count, 1);
9328 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9329 chk->asoc->ctrl_queue_cnt++;
9334 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9337 * formulate and queue a asconf-ack back to sender. the asconf-ack
9338 * must be stored in the tcb.
9340 struct sctp_tmit_chunk *chk;
9341 struct sctp_asconf_ack *ack, *latest_ack;
9343 struct sctp_nets *net = NULL;
9345 SCTP_TCB_LOCK_ASSERT(stcb);
9346 /* Get the latest ASCONF-ACK */
9347 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9348 if (latest_ack == NULL) {
9351 if (latest_ack->last_sent_to != NULL &&
9352 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9353 /* we're doing a retransmission */
9354 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9357 if (stcb->asoc.last_control_chunk_from == NULL) {
9358 if (stcb->asoc.alternate) {
9359 net = stcb->asoc.alternate;
9361 net = stcb->asoc.primary_destination;
9364 net = stcb->asoc.last_control_chunk_from;
9369 if (stcb->asoc.last_control_chunk_from == NULL) {
9370 if (stcb->asoc.alternate) {
9371 net = stcb->asoc.alternate;
9373 net = stcb->asoc.primary_destination;
9376 net = stcb->asoc.last_control_chunk_from;
9379 latest_ack->last_sent_to = net;
9381 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9382 if (ack->data == NULL) {
9386 /* copy the asconf_ack */
9387 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9388 if (m_ack == NULL) {
9389 /* couldn't copy it */
9392 #ifdef SCTP_MBUF_LOGGING
9393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9394 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9398 sctp_alloc_a_chunk(stcb, chk);
9402 sctp_m_freem(m_ack);
9405 chk->copy_by_ref = 0;
9406 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9407 chk->rec.chunk_id.can_take_data = 1;
9408 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9411 atomic_add_int(&chk->whoTo->ref_count, 1);
9414 chk->send_size = ack->len;
9415 chk->sent = SCTP_DATAGRAM_UNSENT;
9417 chk->asoc = &stcb->asoc;
9419 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9420 chk->asoc->ctrl_queue_cnt++;
9427 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9428 struct sctp_tcb *stcb,
9429 struct sctp_association *asoc,
9430 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9431 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9437 * send out one MTU of retransmission. If fast_retransmit is
9438 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9439 * rwnd. For a Cookie or Asconf in the control chunk queue we
9440 * retransmit them by themselves.
9442 * For data chunks we will pick out the lowest TSN's in the sent_queue
9443 * marked for resend and bundle them all together (up to a MTU of
9444 * destination). The address to send to should have been
9445 * selected/changed where the retransmission was marked (i.e. in FR
9446 * or t3-timeout routines).
9448 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9449 struct sctp_tmit_chunk *chk, *fwd;
9450 struct mbuf *m, *endofchain;
9451 struct sctp_nets *net = NULL;
9452 uint32_t tsns_sent = 0;
9453 int no_fragmentflg, bundle_at, cnt_thru;
9455 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9456 struct sctp_auth_chunk *auth = NULL;
9457 uint32_t auth_offset = 0;
9458 uint16_t auth_keyid;
9459 int override_ok = 1;
9460 int data_auth_reqd = 0;
9463 SCTP_TCB_LOCK_ASSERT(stcb);
9464 tmr_started = ctl_cnt = bundle_at = error = 0;
9469 endofchain = m = NULL;
9470 auth_keyid = stcb->asoc.authinfo.active_keyid;
9471 #ifdef SCTP_AUDITING_ENABLED
9472 sctp_audit_log(0xC3, 1);
9474 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9475 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9476 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9477 asoc->sent_queue_retran_cnt);
9478 asoc->sent_queue_cnt = 0;
9479 asoc->sent_queue_cnt_removeable = 0;
9480 /* send back 0/0 so we enter normal transmission */
9484 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9485 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9486 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9487 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9488 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9491 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9492 if (chk != asoc->str_reset) {
9494 * not eligible for retran if its
9501 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9505 * Add an AUTH chunk, if chunk requires it save the
9506 * offset into the chain for AUTH
9508 if ((auth == NULL) &&
9509 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9510 stcb->asoc.peer_auth_chunks))) {
9511 m = sctp_add_auth_chunk(m, &endofchain,
9512 &auth, &auth_offset,
9514 chk->rec.chunk_id.id);
9515 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9517 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9523 /* do we have control chunks to retransmit? */
9525 /* Start a timer no matter if we succeed or fail */
9526 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9527 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9528 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9529 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9530 chk->snd_count++; /* update our count */
9531 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9532 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9533 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9534 no_fragmentflg, 0, 0,
9535 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9536 chk->whoTo->port, NULL,
9539 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9540 if (error == ENOBUFS) {
9541 asoc->ifp_had_enobuf = 1;
9542 SCTP_STAT_INCR(sctps_lowlevelerr);
9546 asoc->ifp_had_enobuf = 0;
9552 * We don't want to mark the net->sent time here since this
9553 * we use this for HB and retrans cannot measure RTT
9555 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9557 chk->sent = SCTP_DATAGRAM_SENT;
9558 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9562 /* Clean up the fwd-tsn list */
9563 sctp_clean_up_ctl(stcb, asoc, so_locked);
9568 * Ok, it is just data retransmission we need to do or that and a
9569 * fwd-tsn with it all.
9571 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9572 return (SCTP_RETRAN_DONE);
9574 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED) ||
9575 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT)) {
9576 /* not yet open, resend the cookie and that is it */
9579 #ifdef SCTP_AUDITING_ENABLED
9580 sctp_auditing(20, inp, stcb, NULL);
9582 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9583 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9584 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9585 /* No, not sent to this net or not ready for rtx */
9588 if (chk->data == NULL) {
9589 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9590 chk->rec.data.tsn, chk->snd_count, chk->sent);
9593 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9594 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9595 struct mbuf *op_err;
9596 char msg[SCTP_DIAG_INFO_LEN];
9598 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9599 chk->rec.data.tsn, chk->snd_count);
9600 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9602 atomic_add_int(&stcb->asoc.refcnt, 1);
9603 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9605 SCTP_TCB_LOCK(stcb);
9606 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9607 return (SCTP_RETRAN_EXIT);
9609 /* pick up the net */
9611 switch (net->ro._l_addr.sa.sa_family) {
9614 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9619 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9628 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9629 /* No room in peers rwnd */
9632 tsn = asoc->last_acked_seq + 1;
9633 if (tsn == chk->rec.data.tsn) {
9635 * we make a special exception for this
9636 * case. The peer has no rwnd but is missing
9637 * the lowest chunk.. which is probably what
9638 * is holding up the rwnd.
9640 goto one_chunk_around;
9645 if (asoc->peers_rwnd < mtu) {
9647 if ((asoc->peers_rwnd == 0) &&
9648 (asoc->total_flight == 0)) {
9649 chk->window_probe = 1;
9650 chk->whoTo->window_probe = 1;
9653 #ifdef SCTP_AUDITING_ENABLED
9654 sctp_audit_log(0xC3, 2);
9658 net->fast_retran_ip = 0;
9659 if (chk->rec.data.doing_fast_retransmit == 0) {
9661 * if no FR in progress skip destination that have
9662 * flight_size > cwnd.
9664 if (net->flight_size >= net->cwnd) {
9669 * Mark the destination net to have FR recovery
9673 net->fast_retran_ip = 1;
9677 * if no AUTH is yet included and this chunk requires it,
9678 * make sure to account for it. We don't apply the size
9679 * until the AUTH chunk is actually added below in case
9680 * there is no room for this chunk.
9682 if (data_auth_reqd && (auth == NULL)) {
9683 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9687 if ((chk->send_size <= (mtu - dmtu)) ||
9688 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9689 /* ok we will add this one */
9690 if (data_auth_reqd) {
9692 m = sctp_add_auth_chunk(m,
9698 auth_keyid = chk->auth_keyid;
9700 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9701 } else if (override_ok) {
9702 auth_keyid = chk->auth_keyid;
9704 } else if (chk->auth_keyid != auth_keyid) {
9705 /* different keyid, so done bundling */
9709 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9711 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9714 /* Do clear IP_DF ? */
9715 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9718 /* upate our MTU size */
9719 if (mtu > (chk->send_size + dmtu))
9720 mtu -= (chk->send_size + dmtu);
9723 data_list[bundle_at++] = chk;
9724 if (one_chunk && (asoc->total_flight <= 0)) {
9725 SCTP_STAT_INCR(sctps_windowprobed);
9728 if (one_chunk == 0) {
9730 * now are there anymore forward from chk to pick
9733 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9734 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9735 /* Nope, not for retran */
9738 if (fwd->whoTo != net) {
9739 /* Nope, not the net in question */
9742 if (data_auth_reqd && (auth == NULL)) {
9743 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9746 if (fwd->send_size <= (mtu - dmtu)) {
9747 if (data_auth_reqd) {
9749 m = sctp_add_auth_chunk(m,
9755 auth_keyid = fwd->auth_keyid;
9757 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9758 } else if (override_ok) {
9759 auth_keyid = fwd->auth_keyid;
9761 } else if (fwd->auth_keyid != auth_keyid) {
9769 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9771 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9774 /* Do clear IP_DF ? */
9775 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9778 /* upate our MTU size */
9779 if (mtu > (fwd->send_size + dmtu))
9780 mtu -= (fwd->send_size + dmtu);
9783 data_list[bundle_at++] = fwd;
9784 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9788 /* can't fit so we are done */
9793 /* Is there something to send for this destination? */
9796 * No matter if we fail/or succeed we should start a
9797 * timer. A failure is like a lost IP packet :-)
9799 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9801 * no timer running on this destination
9804 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9807 /* Now lets send it, if there is anything to send :> */
9808 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9809 (struct sockaddr *)&net->ro._l_addr, m,
9810 auth_offset, auth, auth_keyid,
9811 no_fragmentflg, 0, 0,
9812 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9816 /* error, we could not output */
9817 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9818 if (error == ENOBUFS) {
9819 asoc->ifp_had_enobuf = 1;
9820 SCTP_STAT_INCR(sctps_lowlevelerr);
9824 asoc->ifp_had_enobuf = 0;
9831 * We don't want to mark the net->sent time here
9832 * since this we use this for HB and retrans cannot
9835 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9837 /* For auto-close */
9839 if (*now_filled == 0) {
9840 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9841 *now = asoc->time_last_sent;
9844 asoc->time_last_sent = *now;
9846 *cnt_out += bundle_at;
9847 #ifdef SCTP_AUDITING_ENABLED
9848 sctp_audit_log(0xC4, bundle_at);
9851 tsns_sent = data_list[0]->rec.data.tsn;
9853 for (i = 0; i < bundle_at; i++) {
9854 SCTP_STAT_INCR(sctps_sendretransdata);
9855 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9857 * When we have a revoked data, and we
9858 * retransmit it, then we clear the revoked
9859 * flag since this flag dictates if we
9860 * subtracted from the fs
9862 if (data_list[i]->rec.data.chunk_was_revoked) {
9863 /* Deflate the cwnd */
9864 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9865 data_list[i]->rec.data.chunk_was_revoked = 0;
9867 data_list[i]->snd_count++;
9868 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9869 /* record the time */
9870 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9871 if (data_list[i]->book_size_scale) {
9873 * need to double the book size on
9876 data_list[i]->book_size_scale = 0;
9878 * Since we double the booksize, we
9879 * must also double the output queue
9880 * size, since this get shrunk when
9881 * we free by this amount.
9883 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9884 data_list[i]->book_size *= 2;
9888 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9889 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9890 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9892 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9893 (uint32_t)(data_list[i]->send_size +
9894 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9896 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9897 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9898 data_list[i]->whoTo->flight_size,
9899 data_list[i]->book_size,
9900 (uint32_t)(uintptr_t)data_list[i]->whoTo,
9901 data_list[i]->rec.data.tsn);
9903 sctp_flight_size_increase(data_list[i]);
9904 sctp_total_flight_increase(stcb, data_list[i]);
9905 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9906 /* SWS sender side engages */
9907 asoc->peers_rwnd = 0;
9910 (data_list[i]->rec.data.doing_fast_retransmit)) {
9911 SCTP_STAT_INCR(sctps_sendfastretrans);
9912 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9913 (tmr_started == 0)) {
9915 * ok we just fast-retrans'd
9916 * the lowest TSN, i.e the
9917 * first on the list. In
9918 * this case we want to give
9919 * some more time to get a
9920 * SACK back without a
9923 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9924 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
9925 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9929 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9930 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9932 #ifdef SCTP_AUDITING_ENABLED
9933 sctp_auditing(21, inp, stcb, NULL);
9939 if (asoc->sent_queue_retran_cnt <= 0) {
9940 /* all done we have no more to retran */
9941 asoc->sent_queue_retran_cnt = 0;
9945 /* No more room in rwnd */
9948 /* stop the for loop here. we sent out a packet */
9955 sctp_timer_validation(struct sctp_inpcb *inp,
9956 struct sctp_tcb *stcb,
9957 struct sctp_association *asoc)
9959 struct sctp_nets *net;
9961 /* Validate that a timer is running somewhere */
9962 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9963 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9964 /* Here is a timer */
9968 SCTP_TCB_LOCK_ASSERT(stcb);
9969 /* Gak, we did not have a timer somewhere */
9970 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9971 if (asoc->alternate) {
9972 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9974 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9980 sctp_chunk_output(struct sctp_inpcb *inp,
9981 struct sctp_tcb *stcb,
9984 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9990 * Ok this is the generic chunk service queue. we must do the
9992 * - See if there are retransmits pending, if so we must
9994 * - Service the stream queue that is next, moving any
9995 * message (note I must get a complete message i.e.
9996 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9998 * - Check to see if the cwnd/rwnd allows any output, if so we
9999 * go ahead and fomulate and send the low level chunks. Making sure
10000 * to combine any control in the control chunk queue also.
10002 struct sctp_association *asoc;
10003 struct sctp_nets *net;
10004 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
10005 unsigned int burst_cnt = 0;
10006 struct timeval now;
10007 int now_filled = 0;
10009 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10012 unsigned int tot_frs = 0;
10014 asoc = &stcb->asoc;
10016 /* The Nagle algorithm is only applied when handling a send call. */
10017 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
10018 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
10026 SCTP_TCB_LOCK_ASSERT(stcb);
10028 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
10030 if ((un_sent <= 0) &&
10031 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
10032 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
10033 (asoc->sent_queue_retran_cnt == 0) &&
10034 (asoc->trigger_reset == 0)) {
10035 /* Nothing to do unless there is something to be sent left */
10039 * Do we have something to send, data or control AND a sack timer
10040 * running, if so piggy-back the sack.
10042 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10043 sctp_send_sack(stcb, so_locked);
10044 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10046 while (asoc->sent_queue_retran_cnt) {
10048 * Ok, it is retransmission time only, we send out only ONE
10049 * packet with a single call off to the retran code.
10051 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10053 * Special hook for handling cookiess discarded
10054 * by peer that carried data. Send cookie-ack only
10055 * and then the next call with get the retran's.
10057 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10059 &now, &now_filled, frag_point, so_locked);
10061 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10062 /* if its not from a HB then do it */
10064 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10070 * its from any other place, we don't allow retran
10071 * output (only control)
10076 /* Can't send anymore */
10078 * now lets push out control by calling med-level
10079 * output once. this assures that we WILL send HB's
10082 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10084 &now, &now_filled, frag_point, so_locked);
10085 #ifdef SCTP_AUDITING_ENABLED
10086 sctp_auditing(8, inp, stcb, NULL);
10088 sctp_timer_validation(inp, stcb, asoc);
10093 * The count was off.. retran is not happening so do
10094 * the normal retransmission.
10096 #ifdef SCTP_AUDITING_ENABLED
10097 sctp_auditing(9, inp, stcb, NULL);
10099 if (ret == SCTP_RETRAN_EXIT) {
10104 if (from_where == SCTP_OUTPUT_FROM_T3) {
10105 /* Only one transmission allowed out of a timeout */
10106 #ifdef SCTP_AUDITING_ENABLED
10107 sctp_auditing(10, inp, stcb, NULL);
10109 /* Push out any control */
10110 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10111 &now, &now_filled, frag_point, so_locked);
10114 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10115 /* Hit FR burst limit */
10118 if ((num_out == 0) && (ret == 0)) {
10119 /* No more retrans to send */
10123 #ifdef SCTP_AUDITING_ENABLED
10124 sctp_auditing(12, inp, stcb, NULL);
10126 /* Check for bad destinations, if they exist move chunks around. */
10127 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10128 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10130 * if possible move things off of this address we
10131 * still may send below due to the dormant state but
10132 * we try to find an alternate address to send to
10133 * and if we have one we move all queued data on the
10134 * out wheel to this alternate address.
10136 if (net->ref_count > 1)
10137 sctp_move_chunks_from_net(stcb, net);
10140 * if ((asoc->sat_network) || (net->addr_is_local))
10141 * { burst_limit = asoc->max_burst *
10142 * SCTP_SAT_NETWORK_BURST_INCR; }
10144 if (asoc->max_burst > 0) {
10145 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10146 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10148 * JRS - Use the congestion
10149 * control given in the
10150 * congestion control module
10152 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10153 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10154 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10156 SCTP_STAT_INCR(sctps_maxburstqueued);
10158 net->fast_retran_ip = 0;
10160 if (net->flight_size == 0) {
10162 * Should be decaying the
10174 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10175 &reason_code, 0, from_where,
10176 &now, &now_filled, frag_point, so_locked);
10178 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10179 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10180 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10182 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10183 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10184 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10188 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10190 tot_out += num_out;
10192 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10193 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10194 if (num_out == 0) {
10195 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10200 * When the Nagle algorithm is used, look at how
10201 * much is unsent, then if its smaller than an MTU
10202 * and we have data in flight we stop, except if we
10203 * are handling a fragmented user message.
10205 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10206 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10207 (stcb->asoc.total_flight > 0)) {
10208 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10212 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10213 TAILQ_EMPTY(&asoc->send_queue) &&
10214 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10215 /* Nothing left to send */
10218 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10219 /* Nothing left to send */
10222 } while (num_out &&
10223 ((asoc->max_burst == 0) ||
10224 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10225 (burst_cnt < asoc->max_burst)));
10227 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10228 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10229 SCTP_STAT_INCR(sctps_maxburstqueued);
10230 asoc->burst_limit_applied = 1;
10231 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10232 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10235 asoc->burst_limit_applied = 0;
10238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10239 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10241 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10245 * Now we need to clean up the control chunk chain if a ECNE is on
10246 * it. It must be marked as UNSENT again so next call will continue
10247 * to send it until such time that we get a CWR, to remove it.
10249 if (stcb->asoc.ecn_echo_cnt_onq)
10250 sctp_fix_ecn_echo(asoc);
10252 if (stcb->asoc.trigger_reset) {
10253 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10263 struct sctp_inpcb *inp,
10265 struct sockaddr *addr,
10266 struct mbuf *control,
10271 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10275 if (inp->sctp_socket == NULL) {
10276 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10279 return (sctp_sosend(inp->sctp_socket,
10281 (struct uio *)NULL,
10289 send_forward_tsn(struct sctp_tcb *stcb,
10290 struct sctp_association *asoc)
10292 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10293 struct sctp_forward_tsn_chunk *fwdtsn;
10294 struct sctp_strseq *strseq;
10295 struct sctp_strseq_mid *strseq_m;
10296 uint32_t advance_peer_ack_point;
10297 unsigned int cnt_of_space, i, ovh;
10298 unsigned int space_needed;
10299 unsigned int cnt_of_skipped = 0;
10301 SCTP_TCB_LOCK_ASSERT(stcb);
10302 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10303 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10304 /* mark it to unsent */
10305 chk->sent = SCTP_DATAGRAM_UNSENT;
10306 chk->snd_count = 0;
10307 /* Do we correct its output location? */
10309 sctp_free_remote_addr(chk->whoTo);
10312 goto sctp_fill_in_rest;
10315 /* Ok if we reach here we must build one */
10316 sctp_alloc_a_chunk(stcb, chk);
10320 asoc->fwd_tsn_cnt++;
10321 chk->copy_by_ref = 0;
10323 * We don't do the old thing here since this is used not for on-wire
10324 * but to tell if we are sending a fwd-tsn by the stack during
10325 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10327 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10328 chk->rec.chunk_id.can_take_data = 0;
10332 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10333 if (chk->data == NULL) {
10334 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10337 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10338 chk->sent = SCTP_DATAGRAM_UNSENT;
10339 chk->snd_count = 0;
10340 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10341 asoc->ctrl_queue_cnt++;
10344 * Here we go through and fill out the part that deals with
10345 * stream/seq of the ones we skip.
10347 SCTP_BUF_LEN(chk->data) = 0;
10348 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10349 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10350 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10351 /* no more to look at */
10354 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10355 /* We don't report these */
10360 if (asoc->idata_supported) {
10361 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10362 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10364 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10365 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10367 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10369 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10370 ovh = SCTP_MIN_OVERHEAD;
10372 ovh = SCTP_MIN_V4_OVERHEAD;
10374 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10375 /* trim to a mtu size */
10376 cnt_of_space = asoc->smallest_mtu - ovh;
10378 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10379 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10380 0xff, 0, cnt_of_skipped,
10381 asoc->advanced_peer_ack_point);
10383 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10384 if (cnt_of_space < space_needed) {
10386 * ok we must trim down the chunk by lowering the
10387 * advance peer ack point.
10389 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10390 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10391 0xff, 0xff, cnt_of_space,
10394 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10395 if (asoc->idata_supported) {
10396 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10398 cnt_of_skipped /= sizeof(struct sctp_strseq);
10401 * Go through and find the TSN that will be the one
10404 at = TAILQ_FIRST(&asoc->sent_queue);
10406 for (i = 0; i < cnt_of_skipped; i++) {
10407 tp1 = TAILQ_NEXT(at, sctp_next);
10414 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10415 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10416 0xff, cnt_of_skipped, at->rec.data.tsn,
10417 asoc->advanced_peer_ack_point);
10421 * last now points to last one I can report, update
10425 advance_peer_ack_point = last->rec.data.tsn;
10427 if (asoc->idata_supported) {
10428 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10429 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10431 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10432 cnt_of_skipped * sizeof(struct sctp_strseq);
10435 chk->send_size = space_needed;
10436 /* Setup the chunk */
10437 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10438 fwdtsn->ch.chunk_length = htons(chk->send_size);
10439 fwdtsn->ch.chunk_flags = 0;
10440 if (asoc->idata_supported) {
10441 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10443 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10445 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10446 SCTP_BUF_LEN(chk->data) = chk->send_size;
10449 * Move pointer to after the fwdtsn and transfer to the
10452 if (asoc->idata_supported) {
10453 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10456 strseq = (struct sctp_strseq *)fwdtsn;
10460 * Now populate the strseq list. This is done blindly
10461 * without pulling out duplicate stream info. This is
10462 * inefficent but won't harm the process since the peer will
10463 * look at these in sequence and will thus release anything.
10464 * It could mean we exceed the PMTU and chop off some that
10465 * we could have included.. but this is unlikely (aka 1432/4
10466 * would mean 300+ stream seq's would have to be reported in
10467 * one FWD-TSN. With a bit of work we can later FIX this to
10468 * optimize and pull out duplicates.. but it does add more
10469 * overhead. So for now... not!
10472 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10473 if (i >= cnt_of_skipped) {
10476 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10477 /* We don't report these */
10480 if (at->rec.data.tsn == advance_peer_ack_point) {
10481 at->rec.data.fwd_tsn_cnt = 0;
10483 if (asoc->idata_supported) {
10484 strseq_m->sid = htons(at->rec.data.sid);
10485 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10486 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
10488 strseq_m->flags = 0;
10490 strseq_m->mid = htonl(at->rec.data.mid);
10493 strseq->sid = htons(at->rec.data.sid);
10494 strseq->ssn = htons((uint16_t)at->rec.data.mid);
10503 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10504 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10510 * Queue up a SACK or NR-SACK in the control queue.
10511 * We must first check to see if a SACK or NR-SACK is
10512 * somehow on the control queue.
10513 * If so, we will take and and remove the old one.
10515 struct sctp_association *asoc;
10516 struct sctp_tmit_chunk *chk, *a_chk;
10517 struct sctp_sack_chunk *sack;
10518 struct sctp_nr_sack_chunk *nr_sack;
10519 struct sctp_gap_ack_block *gap_descriptor;
10520 const struct sack_track *selector;
10525 int limit_reached = 0;
10526 unsigned int i, siz, j;
10527 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10530 uint32_t highest_tsn;
10535 if (stcb->asoc.nrsack_supported == 1) {
10536 type = SCTP_NR_SELECTIVE_ACK;
10538 type = SCTP_SELECTIVE_ACK;
10541 asoc = &stcb->asoc;
10542 SCTP_TCB_LOCK_ASSERT(stcb);
10543 if (asoc->last_data_chunk_from == NULL) {
10544 /* Hmm we never received anything */
10547 sctp_slide_mapping_arrays(stcb);
10548 sctp_set_rwnd(stcb, asoc);
10549 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10550 if (chk->rec.chunk_id.id == type) {
10551 /* Hmm, found a sack already on queue, remove it */
10552 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10553 asoc->ctrl_queue_cnt--;
10556 sctp_m_freem(a_chk->data);
10557 a_chk->data = NULL;
10559 if (a_chk->whoTo) {
10560 sctp_free_remote_addr(a_chk->whoTo);
10561 a_chk->whoTo = NULL;
10566 if (a_chk == NULL) {
10567 sctp_alloc_a_chunk(stcb, a_chk);
10568 if (a_chk == NULL) {
10569 /* No memory so we drop the idea, and set a timer */
10570 if (stcb->asoc.delayed_ack) {
10571 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10572 stcb->sctp_ep, stcb, NULL,
10573 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10574 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10575 stcb->sctp_ep, stcb, NULL);
10577 stcb->asoc.send_sack = 1;
10581 a_chk->copy_by_ref = 0;
10582 a_chk->rec.chunk_id.id = type;
10583 a_chk->rec.chunk_id.can_take_data = 1;
10585 /* Clear our pkt counts */
10586 asoc->data_pkts_seen = 0;
10589 a_chk->asoc = asoc;
10590 a_chk->snd_count = 0;
10591 a_chk->send_size = 0; /* fill in later */
10592 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10593 a_chk->whoTo = NULL;
10595 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
10597 * Ok, the destination for the SACK is unreachable, lets see if
10598 * we can select an alternate to asoc->last_data_chunk_from
10600 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10601 if (a_chk->whoTo == NULL) {
10602 /* Nope, no alternate */
10603 a_chk->whoTo = asoc->last_data_chunk_from;
10606 a_chk->whoTo = asoc->last_data_chunk_from;
10608 if (a_chk->whoTo) {
10609 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10611 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10612 highest_tsn = asoc->highest_tsn_inside_map;
10614 highest_tsn = asoc->highest_tsn_inside_nr_map;
10616 if (highest_tsn == asoc->cumulative_tsn) {
10618 if (type == SCTP_SELECTIVE_ACK) {
10619 space_req = sizeof(struct sctp_sack_chunk);
10621 space_req = sizeof(struct sctp_nr_sack_chunk);
10624 /* gaps get a cluster */
10625 space_req = MCLBYTES;
10627 /* Ok now lets formulate a MBUF with our sack */
10628 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10629 if ((a_chk->data == NULL) ||
10630 (a_chk->whoTo == NULL)) {
10631 /* rats, no mbuf memory */
10633 /* was a problem with the destination */
10634 sctp_m_freem(a_chk->data);
10635 a_chk->data = NULL;
10637 sctp_free_a_chunk(stcb, a_chk, so_locked);
10638 /* sa_ignore NO_NULL_CHK */
10639 if (stcb->asoc.delayed_ack) {
10640 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10641 stcb->sctp_ep, stcb, NULL,
10642 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
10643 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10644 stcb->sctp_ep, stcb, NULL);
10646 stcb->asoc.send_sack = 1;
10650 /* ok, lets go through and fill it in */
10651 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10652 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10653 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10654 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10656 limit = mtod(a_chk->data, caddr_t);
10661 if ((asoc->sctp_cmt_on_off > 0) &&
10662 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10664 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10665 * received, then set high bit to 1, else 0. Reset
10668 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10669 asoc->cmt_dac_pkts_rcvd = 0;
10671 #ifdef SCTP_ASOCLOG_OF_TSNS
10672 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10673 stcb->asoc.cumack_log_atsnt++;
10674 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10675 stcb->asoc.cumack_log_atsnt = 0;
10678 /* reset the readers interpretation */
10679 stcb->freed_by_sorcv_sincelast = 0;
10681 if (type == SCTP_SELECTIVE_ACK) {
10682 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10684 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10685 if (highest_tsn > asoc->mapping_array_base_tsn) {
10686 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10688 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10692 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10693 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10694 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10695 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10697 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10701 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10704 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10706 if (((type == SCTP_SELECTIVE_ACK) &&
10707 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10708 ((type == SCTP_NR_SELECTIVE_ACK) &&
10709 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10710 /* we have a gap .. maybe */
10711 for (i = 0; i < siz; i++) {
10712 tsn_map = asoc->mapping_array[i];
10713 if (type == SCTP_SELECTIVE_ACK) {
10714 tsn_map |= asoc->nr_mapping_array[i];
10718 * Clear all bits corresponding to TSNs
10719 * smaller or equal to the cumulative TSN.
10721 tsn_map &= (~0U << (1 - offset));
10723 selector = &sack_array[tsn_map];
10724 if (mergeable && selector->right_edge) {
10726 * Backup, left and right edges were ok to
10732 if (selector->num_entries == 0)
10735 for (j = 0; j < selector->num_entries; j++) {
10736 if (mergeable && selector->right_edge) {
10738 * do a merge by NOT setting
10744 * no merge, set the left
10748 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10750 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10753 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10759 if (selector->left_edge) {
10763 if (limit_reached) {
10764 /* Reached the limit stop */
10770 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10771 (limit_reached == 0)) {
10775 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10776 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10778 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10781 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10784 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10786 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10787 /* we have a gap .. maybe */
10788 for (i = 0; i < siz; i++) {
10789 tsn_map = asoc->nr_mapping_array[i];
10792 * Clear all bits corresponding to
10793 * TSNs smaller or equal to the
10796 tsn_map &= (~0U << (1 - offset));
10798 selector = &sack_array[tsn_map];
10799 if (mergeable && selector->right_edge) {
10801 * Backup, left and right edges were
10804 num_nr_gap_blocks--;
10807 if (selector->num_entries == 0)
10810 for (j = 0; j < selector->num_entries; j++) {
10811 if (mergeable && selector->right_edge) {
10813 * do a merge by NOT
10820 * no merge, set the
10824 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10826 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10827 num_nr_gap_blocks++;
10829 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10835 if (selector->left_edge) {
10839 if (limit_reached) {
10840 /* Reached the limit stop */
10847 /* now we must add any dups we are going to report. */
10848 if ((limit_reached == 0) && (asoc->numduptsns)) {
10849 dup = (uint32_t *)gap_descriptor;
10850 for (i = 0; i < asoc->numduptsns; i++) {
10851 *dup = htonl(asoc->dup_tsns[i]);
10854 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10859 asoc->numduptsns = 0;
10862 * now that the chunk is prepared queue it to the control chunk
10865 if (type == SCTP_SELECTIVE_ACK) {
10866 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
10867 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10868 num_dups * sizeof(int32_t));
10869 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10870 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10871 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10872 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10873 sack->sack.num_dup_tsns = htons(num_dups);
10874 sack->ch.chunk_type = type;
10875 sack->ch.chunk_flags = flags;
10876 sack->ch.chunk_length = htons(a_chk->send_size);
10878 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
10879 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10880 num_dups * sizeof(int32_t));
10881 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10882 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10883 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10884 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10885 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10886 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10887 nr_sack->nr_sack.reserved = 0;
10888 nr_sack->ch.chunk_type = type;
10889 nr_sack->ch.chunk_flags = flags;
10890 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10892 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10893 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10894 asoc->ctrl_queue_cnt++;
10895 asoc->send_sack = 0;
10896 SCTP_STAT_INCR(sctps_sendsacks);
10901 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10902 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10907 struct mbuf *m_abort, *m, *m_last;
10908 struct mbuf *m_out, *m_end = NULL;
10909 struct sctp_abort_chunk *abort;
10910 struct sctp_auth_chunk *auth = NULL;
10911 struct sctp_nets *net;
10913 uint32_t auth_offset = 0;
10915 uint16_t cause_len, chunk_len, padding_len;
10917 SCTP_TCB_LOCK_ASSERT(stcb);
10919 * Add an AUTH chunk, if chunk requires it and save the offset into
10920 * the chain for AUTH
10922 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10923 stcb->asoc.peer_auth_chunks)) {
10924 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10925 stcb, SCTP_ABORT_ASSOCIATION);
10926 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10930 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10931 if (m_abort == NULL) {
10933 sctp_m_freem(m_out);
10936 sctp_m_freem(operr);
10940 /* link in any error */
10941 SCTP_BUF_NEXT(m_abort) = operr;
10944 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10945 cause_len += (uint16_t)SCTP_BUF_LEN(m);
10946 if (SCTP_BUF_NEXT(m) == NULL) {
10950 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10951 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
10952 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10953 if (m_out == NULL) {
10954 /* NO Auth chunk prepended, so reserve space in front */
10955 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10958 /* Put AUTH chunk at the front of the chain */
10959 SCTP_BUF_NEXT(m_end) = m_abort;
10961 if (stcb->asoc.alternate) {
10962 net = stcb->asoc.alternate;
10964 net = stcb->asoc.primary_destination;
10966 /* Fill in the ABORT chunk header. */
10967 abort = mtod(m_abort, struct sctp_abort_chunk *);
10968 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10969 if (stcb->asoc.peer_vtag == 0) {
10970 /* This happens iff the assoc is in COOKIE-WAIT state. */
10971 vtag = stcb->asoc.my_vtag;
10972 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10974 vtag = stcb->asoc.peer_vtag;
10975 abort->ch.chunk_flags = 0;
10977 abort->ch.chunk_length = htons(chunk_len);
10978 /* Add padding, if necessary. */
10979 if (padding_len > 0) {
10980 if ((m_last == NULL) ||
10981 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
10982 sctp_m_freem(m_out);
10986 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10987 (struct sockaddr *)&net->ro._l_addr,
10988 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10989 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10990 stcb->asoc.primary_destination->port, NULL,
10993 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10994 if (error == ENOBUFS) {
10995 stcb->asoc.ifp_had_enobuf = 1;
10996 SCTP_STAT_INCR(sctps_lowlevelerr);
10999 stcb->asoc.ifp_had_enobuf = 0;
11001 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11005 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
11006 struct sctp_nets *net,
11009 /* formulate and SEND a SHUTDOWN-COMPLETE */
11010 struct mbuf *m_shutdown_comp;
11011 struct sctp_shutdown_complete_chunk *shutdown_complete;
11016 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
11017 if (m_shutdown_comp == NULL) {
11021 if (reflect_vtag) {
11022 flags = SCTP_HAD_NO_TCB;
11023 vtag = stcb->asoc.my_vtag;
11026 vtag = stcb->asoc.peer_vtag;
11028 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
11029 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
11030 shutdown_complete->ch.chunk_flags = flags;
11031 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
11032 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
11033 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
11034 (struct sockaddr *)&net->ro._l_addr,
11035 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11036 stcb->sctp_ep->sctp_lport, stcb->rport,
11040 SCTP_SO_NOT_LOCKED))) {
11041 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11042 if (error == ENOBUFS) {
11043 stcb->asoc.ifp_had_enobuf = 1;
11044 SCTP_STAT_INCR(sctps_lowlevelerr);
11047 stcb->asoc.ifp_had_enobuf = 0;
11049 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11054 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11055 struct sctphdr *sh, uint32_t vtag,
11056 uint8_t type, struct mbuf *cause,
11057 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11058 uint32_t vrf_id, uint16_t port)
11060 struct mbuf *o_pak;
11062 struct sctphdr *shout;
11063 struct sctp_chunkhdr *ch;
11064 #if defined(INET) || defined(INET6)
11065 struct udphdr *udp;
11067 int ret, len, cause_len, padding_len;
11069 struct sockaddr_in *src_sin, *dst_sin;
11073 struct sockaddr_in6 *src_sin6, *dst_sin6;
11074 struct ip6_hdr *ip6;
11077 /* Compute the length of the cause and add final padding. */
11079 if (cause != NULL) {
11080 struct mbuf *m_at, *m_last = NULL;
11082 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11083 if (SCTP_BUF_NEXT(m_at) == NULL)
11085 cause_len += SCTP_BUF_LEN(m_at);
11087 padding_len = cause_len % 4;
11088 if (padding_len != 0) {
11089 padding_len = 4 - padding_len;
11091 if (padding_len != 0) {
11092 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11093 sctp_m_freem(cause);
11100 /* Get an mbuf for the header. */
11101 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11102 switch (dst->sa_family) {
11105 len += sizeof(struct ip);
11110 len += sizeof(struct ip6_hdr);
11116 #if defined(INET) || defined(INET6)
11118 len += sizeof(struct udphdr);
11121 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11122 if (mout == NULL) {
11124 sctp_m_freem(cause);
11128 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11129 SCTP_BUF_LEN(mout) = len;
11130 SCTP_BUF_NEXT(mout) = cause;
11131 M_SETFIB(mout, fibnum);
11132 mout->m_pkthdr.flowid = mflowid;
11133 M_HASHTYPE_SET(mout, mflowtype);
11140 switch (dst->sa_family) {
11143 src_sin = (struct sockaddr_in *)src;
11144 dst_sin = (struct sockaddr_in *)dst;
11145 ip = mtod(mout, struct ip *);
11146 ip->ip_v = IPVERSION;
11147 ip->ip_hl = (sizeof(struct ip) >> 2);
11149 ip->ip_off = htons(IP_DF);
11151 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11153 ip->ip_p = IPPROTO_UDP;
11155 ip->ip_p = IPPROTO_SCTP;
11157 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11158 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11160 len = sizeof(struct ip);
11161 shout = (struct sctphdr *)((caddr_t)ip + len);
11166 src_sin6 = (struct sockaddr_in6 *)src;
11167 dst_sin6 = (struct sockaddr_in6 *)dst;
11168 ip6 = mtod(mout, struct ip6_hdr *);
11169 ip6->ip6_flow = htonl(0x60000000);
11170 if (V_ip6_auto_flowlabel) {
11171 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11173 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11175 ip6->ip6_nxt = IPPROTO_UDP;
11177 ip6->ip6_nxt = IPPROTO_SCTP;
11179 ip6->ip6_src = dst_sin6->sin6_addr;
11180 ip6->ip6_dst = src_sin6->sin6_addr;
11181 len = sizeof(struct ip6_hdr);
11182 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11187 shout = mtod(mout, struct sctphdr *);
11190 #if defined(INET) || defined(INET6)
11192 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11193 sctp_m_freem(mout);
11196 udp = (struct udphdr *)shout;
11197 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11198 udp->uh_dport = port;
11200 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11201 sizeof(struct sctphdr) +
11202 sizeof(struct sctp_chunkhdr) +
11203 cause_len + padding_len));
11204 len += sizeof(struct udphdr);
11205 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11210 shout->src_port = sh->dest_port;
11211 shout->dest_port = sh->src_port;
11212 shout->checksum = 0;
11214 shout->v_tag = htonl(vtag);
11216 shout->v_tag = sh->v_tag;
11218 len += sizeof(struct sctphdr);
11219 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11220 ch->chunk_type = type;
11222 ch->chunk_flags = 0;
11224 ch->chunk_flags = SCTP_HAD_NO_TCB;
11226 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11227 len += sizeof(struct sctp_chunkhdr);
11228 len += cause_len + padding_len;
11230 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11231 sctp_m_freem(mout);
11234 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11235 switch (dst->sa_family) {
11240 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11245 ip->ip_len = htons(len);
11247 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11248 SCTP_STAT_INCR(sctps_sendswcrc);
11250 SCTP_ENABLE_UDP_CSUM(o_pak);
11253 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11254 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11255 SCTP_STAT_INCR(sctps_sendhwcrc);
11257 #ifdef SCTP_PACKET_LOGGING
11258 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11259 sctp_packet_log(o_pak);
11262 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11267 ip6->ip6_plen = htons((uint16_t)(len - sizeof(struct ip6_hdr)));
11269 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11270 SCTP_STAT_INCR(sctps_sendswcrc);
11271 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11272 udp->uh_sum = 0xffff;
11275 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11276 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11277 SCTP_STAT_INCR(sctps_sendhwcrc);
11279 #ifdef SCTP_PACKET_LOGGING
11280 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11281 sctp_packet_log(o_pak);
11284 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11288 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11290 sctp_m_freem(mout);
11291 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11294 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
11296 UDPSTAT_INC(udps_opackets);
11298 SCTP_STAT_INCR(sctps_sendpackets);
11299 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11300 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11302 SCTP_STAT_INCR(sctps_senderrors);
11308 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11309 struct sctphdr *sh,
11310 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11311 uint32_t vrf_id, uint16_t port)
11313 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11314 mflowtype, mflowid, fibnum,
11319 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11320 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11325 struct sctp_tmit_chunk *chk;
11326 struct sctp_heartbeat_chunk *hb;
11327 struct timeval now;
11329 SCTP_TCB_LOCK_ASSERT(stcb);
11333 (void)SCTP_GETTIME_TIMEVAL(&now);
11334 switch (net->ro._l_addr.sa.sa_family) {
11346 sctp_alloc_a_chunk(stcb, chk);
11348 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11352 chk->copy_by_ref = 0;
11353 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11354 chk->rec.chunk_id.can_take_data = 1;
11356 chk->asoc = &stcb->asoc;
11357 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11359 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11360 if (chk->data == NULL) {
11361 sctp_free_a_chunk(stcb, chk, so_locked);
11364 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11365 SCTP_BUF_LEN(chk->data) = chk->send_size;
11366 chk->sent = SCTP_DATAGRAM_UNSENT;
11367 chk->snd_count = 0;
11369 atomic_add_int(&chk->whoTo->ref_count, 1);
11370 /* Now we have a mbuf that we can fill in with the details */
11371 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11372 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11373 /* fill out chunk header */
11374 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11375 hb->ch.chunk_flags = 0;
11376 hb->ch.chunk_length = htons(chk->send_size);
11377 /* Fill out hb parameter */
11378 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11379 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11380 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11381 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11382 /* Did our user request this one, put it in */
11383 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
11384 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11385 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11387 * we only take from the entropy pool if the address is not
11390 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11391 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11393 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11394 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11396 switch (net->ro._l_addr.sa.sa_family) {
11399 memcpy(hb->heartbeat.hb_info.address,
11400 &net->ro._l_addr.sin.sin_addr,
11401 sizeof(net->ro._l_addr.sin.sin_addr));
11406 memcpy(hb->heartbeat.hb_info.address,
11407 &net->ro._l_addr.sin6.sin6_addr,
11408 sizeof(net->ro._l_addr.sin6.sin6_addr));
11413 sctp_m_freem(chk->data);
11416 sctp_free_a_chunk(stcb, chk, so_locked);
11420 net->hb_responded = 0;
11421 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11422 stcb->asoc.ctrl_queue_cnt++;
11423 SCTP_STAT_INCR(sctps_sendheartbeat);
11428 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11431 struct sctp_association *asoc;
11432 struct sctp_ecne_chunk *ecne;
11433 struct sctp_tmit_chunk *chk;
11438 asoc = &stcb->asoc;
11439 SCTP_TCB_LOCK_ASSERT(stcb);
11440 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11441 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11442 /* found a previous ECN_ECHO update it if needed */
11443 uint32_t cnt, ctsn;
11445 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11446 ctsn = ntohl(ecne->tsn);
11447 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11448 ecne->tsn = htonl(high_tsn);
11449 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11451 cnt = ntohl(ecne->num_pkts_since_cwr);
11453 ecne->num_pkts_since_cwr = htonl(cnt);
11457 /* nope could not find one to update so we must build one */
11458 sctp_alloc_a_chunk(stcb, chk);
11462 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11463 chk->copy_by_ref = 0;
11464 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11465 chk->rec.chunk_id.can_take_data = 0;
11467 chk->asoc = &stcb->asoc;
11468 chk->send_size = sizeof(struct sctp_ecne_chunk);
11469 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11470 if (chk->data == NULL) {
11471 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11474 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11475 SCTP_BUF_LEN(chk->data) = chk->send_size;
11476 chk->sent = SCTP_DATAGRAM_UNSENT;
11477 chk->snd_count = 0;
11479 atomic_add_int(&chk->whoTo->ref_count, 1);
11481 stcb->asoc.ecn_echo_cnt_onq++;
11482 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11483 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11484 ecne->ch.chunk_flags = 0;
11485 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11486 ecne->tsn = htonl(high_tsn);
11487 ecne->num_pkts_since_cwr = htonl(1);
11488 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11489 asoc->ctrl_queue_cnt++;
11493 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11494 struct mbuf *m, int len, int iphlen, int bad_crc)
11496 struct sctp_association *asoc;
11497 struct sctp_pktdrop_chunk *drp;
11498 struct sctp_tmit_chunk *chk;
11504 struct sctp_chunkhdr *ch, chunk_buf;
11505 unsigned int chk_length;
11510 asoc = &stcb->asoc;
11511 SCTP_TCB_LOCK_ASSERT(stcb);
11512 if (asoc->pktdrop_supported == 0) {
11514 * peer must declare support before I send one.
11518 if (stcb->sctp_socket == NULL) {
11521 sctp_alloc_a_chunk(stcb, chk);
11525 chk->copy_by_ref = 0;
11526 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11527 chk->rec.chunk_id.can_take_data = 1;
11530 chk->send_size = len;
11531 /* Validate that we do not have an ABORT in here. */
11532 offset = iphlen + sizeof(struct sctphdr);
11533 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11534 sizeof(*ch), (uint8_t *)&chunk_buf);
11535 while (ch != NULL) {
11536 chk_length = ntohs(ch->chunk_length);
11537 if (chk_length < sizeof(*ch)) {
11538 /* break to abort land */
11541 switch (ch->chunk_type) {
11542 case SCTP_PACKET_DROPPED:
11543 case SCTP_ABORT_ASSOCIATION:
11544 case SCTP_INITIATION_ACK:
11546 * We don't respond with an PKT-DROP to an ABORT
11547 * or PKT-DROP. We also do not respond to an
11548 * INIT-ACK, because we can't know if the initiation
11549 * tag is correct or not.
11551 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11556 offset += SCTP_SIZE32(chk_length);
11557 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11558 sizeof(*ch), (uint8_t *)&chunk_buf);
11561 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11562 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11564 * only send 1 mtu worth, trim off the excess on the end.
11567 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11570 chk->asoc = &stcb->asoc;
11571 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11572 if (chk->data == NULL) {
11574 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11577 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11578 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11580 sctp_m_freem(chk->data);
11584 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11585 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11586 chk->book_size_scale = 0;
11588 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11589 drp->trunc_len = htons(fullsz);
11591 * Len is already adjusted to size minus overhead above take
11592 * out the pkt_drop chunk itself from it.
11594 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
11595 len = chk->send_size;
11597 /* no truncation needed */
11598 drp->ch.chunk_flags = 0;
11599 drp->trunc_len = htons(0);
11602 drp->ch.chunk_flags |= SCTP_BADCRC;
11604 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11605 SCTP_BUF_LEN(chk->data) = chk->send_size;
11606 chk->sent = SCTP_DATAGRAM_UNSENT;
11607 chk->snd_count = 0;
11609 /* we should hit here */
11611 atomic_add_int(&chk->whoTo->ref_count, 1);
11615 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11616 drp->ch.chunk_length = htons(chk->send_size);
11617 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11621 drp->bottle_bw = htonl(spc);
11622 if (asoc->my_rwnd) {
11623 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11624 asoc->size_on_all_streams +
11625 asoc->my_rwnd_control_len +
11626 stcb->sctp_socket->so_rcv.sb_cc);
11629 * If my rwnd is 0, possibly from mbuf depletion as well as
11630 * space used, tell the peer there is NO space aka onq == bw
11632 drp->current_onq = htonl(spc);
11636 m_copydata(m, iphlen, len, (caddr_t)datap);
11637 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11638 asoc->ctrl_queue_cnt++;
11642 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11644 struct sctp_association *asoc;
11645 struct sctp_cwr_chunk *cwr;
11646 struct sctp_tmit_chunk *chk;
11648 SCTP_TCB_LOCK_ASSERT(stcb);
11652 asoc = &stcb->asoc;
11653 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11654 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11656 * found a previous CWR queued to same destination
11657 * update it if needed
11661 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11662 ctsn = ntohl(cwr->tsn);
11663 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11664 cwr->tsn = htonl(high_tsn);
11666 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11667 /* Make sure override is carried */
11668 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11673 sctp_alloc_a_chunk(stcb, chk);
11677 chk->copy_by_ref = 0;
11678 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11679 chk->rec.chunk_id.can_take_data = 1;
11681 chk->asoc = &stcb->asoc;
11682 chk->send_size = sizeof(struct sctp_cwr_chunk);
11683 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11684 if (chk->data == NULL) {
11685 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11688 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11689 SCTP_BUF_LEN(chk->data) = chk->send_size;
11690 chk->sent = SCTP_DATAGRAM_UNSENT;
11691 chk->snd_count = 0;
11693 atomic_add_int(&chk->whoTo->ref_count, 1);
11694 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11695 cwr->ch.chunk_type = SCTP_ECN_CWR;
11696 cwr->ch.chunk_flags = override;
11697 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11698 cwr->tsn = htonl(high_tsn);
11699 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11700 asoc->ctrl_queue_cnt++;
11704 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
11705 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11707 uint16_t len, old_len, i;
11708 struct sctp_stream_reset_out_request *req_out;
11709 struct sctp_chunkhdr *ch;
11711 int number_entries = 0;
11713 ch = mtod(chk->data, struct sctp_chunkhdr *);
11714 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11715 /* get to new offset for the param. */
11716 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11717 /* now how long will this param be? */
11718 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11719 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11720 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11721 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11725 if (number_entries == 0) {
11728 if (number_entries == stcb->asoc.streamoutcnt) {
11729 number_entries = 0;
11731 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11732 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11734 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11735 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11736 req_out->ph.param_length = htons(len);
11737 req_out->request_seq = htonl(seq);
11738 req_out->response_seq = htonl(resp_seq);
11739 req_out->send_reset_at_tsn = htonl(last_sent);
11741 if (number_entries) {
11742 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11743 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11744 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11745 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11746 req_out->list_of_streams[at] = htons(i);
11748 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11749 if (at >= number_entries) {
11755 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11756 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11759 if (SCTP_SIZE32(len) > len) {
11761 * Need to worry about the pad we may end up adding to the
11762 * end. This is easy since the struct is either aligned to 4
11763 * bytes or 2 bytes off.
11765 req_out->list_of_streams[number_entries] = 0;
11767 /* now fix the chunk length */
11768 ch->chunk_length = htons(len + old_len);
11769 chk->book_size = len + old_len;
11770 chk->book_size_scale = 0;
11771 chk->send_size = SCTP_SIZE32(chk->book_size);
11772 SCTP_BUF_LEN(chk->data) = chk->send_size;
11777 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11778 int number_entries, uint16_t *list,
11781 uint16_t len, old_len, i;
11782 struct sctp_stream_reset_in_request *req_in;
11783 struct sctp_chunkhdr *ch;
11785 ch = mtod(chk->data, struct sctp_chunkhdr *);
11786 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11788 /* get to new offset for the param. */
11789 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11790 /* now how long will this param be? */
11791 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11792 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11793 req_in->ph.param_length = htons(len);
11794 req_in->request_seq = htonl(seq);
11795 if (number_entries) {
11796 for (i = 0; i < number_entries; i++) {
11797 req_in->list_of_streams[i] = htons(list[i]);
11800 if (SCTP_SIZE32(len) > len) {
11802 * Need to worry about the pad we may end up adding to the
11803 * end. This is easy since the struct is either aligned to 4
11804 * bytes or 2 bytes off.
11806 req_in->list_of_streams[number_entries] = 0;
11808 /* now fix the chunk length */
11809 ch->chunk_length = htons(len + old_len);
11810 chk->book_size = len + old_len;
11811 chk->book_size_scale = 0;
11812 chk->send_size = SCTP_SIZE32(chk->book_size);
11813 SCTP_BUF_LEN(chk->data) = chk->send_size;
11818 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11821 uint16_t len, old_len;
11822 struct sctp_stream_reset_tsn_request *req_tsn;
11823 struct sctp_chunkhdr *ch;
11825 ch = mtod(chk->data, struct sctp_chunkhdr *);
11826 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11828 /* get to new offset for the param. */
11829 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11830 /* now how long will this param be? */
11831 len = sizeof(struct sctp_stream_reset_tsn_request);
11832 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11833 req_tsn->ph.param_length = htons(len);
11834 req_tsn->request_seq = htonl(seq);
11836 /* now fix the chunk length */
11837 ch->chunk_length = htons(len + old_len);
11838 chk->send_size = len + old_len;
11839 chk->book_size = SCTP_SIZE32(chk->send_size);
11840 chk->book_size_scale = 0;
11841 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11846 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11847 uint32_t resp_seq, uint32_t result)
11849 uint16_t len, old_len;
11850 struct sctp_stream_reset_response *resp;
11851 struct sctp_chunkhdr *ch;
11853 ch = mtod(chk->data, struct sctp_chunkhdr *);
11854 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11856 /* get to new offset for the param. */
11857 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11858 /* now how long will this param be? */
11859 len = sizeof(struct sctp_stream_reset_response);
11860 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11861 resp->ph.param_length = htons(len);
11862 resp->response_seq = htonl(resp_seq);
11863 resp->result = ntohl(result);
11865 /* now fix the chunk length */
11866 ch->chunk_length = htons(len + old_len);
11867 chk->book_size = len + old_len;
11868 chk->book_size_scale = 0;
11869 chk->send_size = SCTP_SIZE32(chk->book_size);
11870 SCTP_BUF_LEN(chk->data) = chk->send_size;
11875 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
11876 struct sctp_stream_reset_list *ent,
11879 struct sctp_association *asoc;
11880 struct sctp_tmit_chunk *chk;
11881 struct sctp_chunkhdr *ch;
11883 asoc = &stcb->asoc;
11886 * Reset our last reset action to the new one IP -> response
11887 * (PERFORMED probably). This assures that if we fail to send, a
11888 * retran from the peer will get the new response.
11890 asoc->last_reset_action[0] = response;
11891 if (asoc->stream_reset_outstanding) {
11894 sctp_alloc_a_chunk(stcb, chk);
11896 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11899 chk->copy_by_ref = 0;
11900 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11901 chk->rec.chunk_id.can_take_data = 0;
11903 chk->asoc = &stcb->asoc;
11904 chk->book_size = sizeof(struct sctp_chunkhdr);
11905 chk->send_size = SCTP_SIZE32(chk->book_size);
11906 chk->book_size_scale = 0;
11907 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11908 if (chk->data == NULL) {
11909 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11910 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11913 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11914 /* setup chunk parameters */
11915 chk->sent = SCTP_DATAGRAM_UNSENT;
11916 chk->snd_count = 0;
11917 if (stcb->asoc.alternate) {
11918 chk->whoTo = stcb->asoc.alternate;
11920 chk->whoTo = stcb->asoc.primary_destination;
11922 ch = mtod(chk->data, struct sctp_chunkhdr *);
11923 ch->chunk_type = SCTP_STREAM_RESET;
11924 ch->chunk_flags = 0;
11925 ch->chunk_length = htons(chk->book_size);
11926 atomic_add_int(&chk->whoTo->ref_count, 1);
11927 SCTP_BUF_LEN(chk->data) = chk->send_size;
11928 sctp_add_stream_reset_result(chk, ent->seq, response);
11929 /* insert the chunk for sending */
11930 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11933 asoc->ctrl_queue_cnt++;
11937 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11938 uint32_t resp_seq, uint32_t result,
11939 uint32_t send_una, uint32_t recv_next)
11941 uint16_t len, old_len;
11942 struct sctp_stream_reset_response_tsn *resp;
11943 struct sctp_chunkhdr *ch;
11945 ch = mtod(chk->data, struct sctp_chunkhdr *);
11946 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11948 /* get to new offset for the param. */
11949 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11950 /* now how long will this param be? */
11951 len = sizeof(struct sctp_stream_reset_response_tsn);
11952 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11953 resp->ph.param_length = htons(len);
11954 resp->response_seq = htonl(resp_seq);
11955 resp->result = htonl(result);
11956 resp->senders_next_tsn = htonl(send_una);
11957 resp->receivers_next_tsn = htonl(recv_next);
11959 /* now fix the chunk length */
11960 ch->chunk_length = htons(len + old_len);
11961 chk->book_size = len + old_len;
11962 chk->send_size = SCTP_SIZE32(chk->book_size);
11963 chk->book_size_scale = 0;
11964 SCTP_BUF_LEN(chk->data) = chk->send_size;
11969 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11973 uint16_t len, old_len;
11974 struct sctp_chunkhdr *ch;
11975 struct sctp_stream_reset_add_strm *addstr;
11977 ch = mtod(chk->data, struct sctp_chunkhdr *);
11978 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11980 /* get to new offset for the param. */
11981 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11982 /* now how long will this param be? */
11983 len = sizeof(struct sctp_stream_reset_add_strm);
11986 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11987 addstr->ph.param_length = htons(len);
11988 addstr->request_seq = htonl(seq);
11989 addstr->number_of_streams = htons(adding);
11990 addstr->reserved = 0;
11992 /* now fix the chunk length */
11993 ch->chunk_length = htons(len + old_len);
11994 chk->send_size = len + old_len;
11995 chk->book_size = SCTP_SIZE32(chk->send_size);
11996 chk->book_size_scale = 0;
11997 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12002 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
12006 uint16_t len, old_len;
12007 struct sctp_chunkhdr *ch;
12008 struct sctp_stream_reset_add_strm *addstr;
12010 ch = mtod(chk->data, struct sctp_chunkhdr *);
12011 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
12013 /* get to new offset for the param. */
12014 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
12015 /* now how long will this param be? */
12016 len = sizeof(struct sctp_stream_reset_add_strm);
12018 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
12019 addstr->ph.param_length = htons(len);
12020 addstr->request_seq = htonl(seq);
12021 addstr->number_of_streams = htons(adding);
12022 addstr->reserved = 0;
12024 /* now fix the chunk length */
12025 ch->chunk_length = htons(len + old_len);
12026 chk->send_size = len + old_len;
12027 chk->book_size = SCTP_SIZE32(chk->send_size);
12028 chk->book_size_scale = 0;
12029 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
12034 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
12036 struct sctp_association *asoc;
12037 struct sctp_tmit_chunk *chk;
12038 struct sctp_chunkhdr *ch;
12041 asoc = &stcb->asoc;
12042 asoc->trigger_reset = 0;
12043 if (asoc->stream_reset_outstanding) {
12046 sctp_alloc_a_chunk(stcb, chk);
12048 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12051 chk->copy_by_ref = 0;
12052 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12053 chk->rec.chunk_id.can_take_data = 0;
12055 chk->asoc = &stcb->asoc;
12056 chk->book_size = sizeof(struct sctp_chunkhdr);
12057 chk->send_size = SCTP_SIZE32(chk->book_size);
12058 chk->book_size_scale = 0;
12059 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12060 if (chk->data == NULL) {
12061 sctp_free_a_chunk(stcb, chk, so_locked);
12062 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12065 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12067 /* setup chunk parameters */
12068 chk->sent = SCTP_DATAGRAM_UNSENT;
12069 chk->snd_count = 0;
12070 if (stcb->asoc.alternate) {
12071 chk->whoTo = stcb->asoc.alternate;
12073 chk->whoTo = stcb->asoc.primary_destination;
12075 ch = mtod(chk->data, struct sctp_chunkhdr *);
12076 ch->chunk_type = SCTP_STREAM_RESET;
12077 ch->chunk_flags = 0;
12078 ch->chunk_length = htons(chk->book_size);
12079 atomic_add_int(&chk->whoTo->ref_count, 1);
12080 SCTP_BUF_LEN(chk->data) = chk->send_size;
12081 seq = stcb->asoc.str_reset_seq_out;
12082 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12084 asoc->stream_reset_outstanding++;
12086 m_freem(chk->data);
12088 sctp_free_a_chunk(stcb, chk, so_locked);
12091 asoc->str_reset = chk;
12092 /* insert the chunk for sending */
12093 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12096 asoc->ctrl_queue_cnt++;
12098 if (stcb->asoc.send_sack) {
12099 sctp_send_sack(stcb, so_locked);
12101 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12106 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12107 uint16_t number_entries, uint16_t *list,
12108 uint8_t send_in_req,
12109 uint8_t send_tsn_req,
12110 uint8_t add_stream,
12112 uint16_t adding_i, uint8_t peer_asked)
12114 struct sctp_association *asoc;
12115 struct sctp_tmit_chunk *chk;
12116 struct sctp_chunkhdr *ch;
12117 int can_send_out_req = 0;
12120 asoc = &stcb->asoc;
12121 if (asoc->stream_reset_outstanding) {
12123 * Already one pending, must get ACK back to clear the flag.
12125 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12128 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12129 (add_stream == 0)) {
12130 /* nothing to do */
12131 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12134 if (send_tsn_req && send_in_req) {
12135 /* error, can't do that */
12136 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12138 } else if (send_in_req) {
12139 can_send_out_req = 1;
12141 if (number_entries > (MCLBYTES -
12142 SCTP_MIN_OVERHEAD -
12143 sizeof(struct sctp_chunkhdr) -
12144 sizeof(struct sctp_stream_reset_out_request)) /
12145 sizeof(uint16_t)) {
12146 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12149 sctp_alloc_a_chunk(stcb, chk);
12151 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12154 chk->copy_by_ref = 0;
12155 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12156 chk->rec.chunk_id.can_take_data = 0;
12158 chk->asoc = &stcb->asoc;
12159 chk->book_size = sizeof(struct sctp_chunkhdr);
12160 chk->send_size = SCTP_SIZE32(chk->book_size);
12161 chk->book_size_scale = 0;
12162 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12163 if (chk->data == NULL) {
12164 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12165 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12168 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12170 /* setup chunk parameters */
12171 chk->sent = SCTP_DATAGRAM_UNSENT;
12172 chk->snd_count = 0;
12173 if (stcb->asoc.alternate) {
12174 chk->whoTo = stcb->asoc.alternate;
12176 chk->whoTo = stcb->asoc.primary_destination;
12178 atomic_add_int(&chk->whoTo->ref_count, 1);
12179 ch = mtod(chk->data, struct sctp_chunkhdr *);
12180 ch->chunk_type = SCTP_STREAM_RESET;
12181 ch->chunk_flags = 0;
12182 ch->chunk_length = htons(chk->book_size);
12183 SCTP_BUF_LEN(chk->data) = chk->send_size;
12185 seq = stcb->asoc.str_reset_seq_out;
12186 if (can_send_out_req) {
12189 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12192 asoc->stream_reset_outstanding++;
12195 if ((add_stream & 1) &&
12196 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12197 /* Need to allocate more */
12198 struct sctp_stream_out *oldstream;
12199 struct sctp_stream_queue_pending *sp, *nsp;
12201 #if defined(SCTP_DETAILED_STR_STATS)
12205 oldstream = stcb->asoc.strmout;
12206 /* get some more */
12207 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12208 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12210 if (stcb->asoc.strmout == NULL) {
12213 stcb->asoc.strmout = oldstream;
12214 /* Turn off the bit */
12215 x = add_stream & 0xfe;
12220 * Ok now we proceed with copying the old out stuff and
12221 * initializing the new stuff.
12223 SCTP_TCB_SEND_LOCK(stcb);
12224 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12225 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12226 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12227 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12228 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12229 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12230 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12231 stcb->asoc.strmout[i].sid = i;
12232 stcb->asoc.strmout[i].state = oldstream[i].state;
12233 /* FIX ME FIX ME */
12235 * This should be a SS_COPY operation FIX ME STREAM
12238 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12239 /* now anything on those queues? */
12240 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12241 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12242 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12246 /* now the new streams */
12247 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12248 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12249 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12250 stcb->asoc.strmout[i].chunks_on_queues = 0;
12251 #if defined(SCTP_DETAILED_STR_STATS)
12252 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12253 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12254 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12257 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12258 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12260 stcb->asoc.strmout[i].next_mid_ordered = 0;
12261 stcb->asoc.strmout[i].next_mid_unordered = 0;
12262 stcb->asoc.strmout[i].sid = i;
12263 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12264 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12265 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12267 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12268 SCTP_FREE(oldstream, SCTP_M_STRMO);
12269 SCTP_TCB_SEND_UNLOCK(stcb);
12272 if ((add_stream & 1) && (adding_o > 0)) {
12273 asoc->strm_pending_add_size = adding_o;
12274 asoc->peer_req_out = peer_asked;
12275 sctp_add_an_out_stream(chk, seq, adding_o);
12277 asoc->stream_reset_outstanding++;
12279 if ((add_stream & 2) && (adding_i > 0)) {
12280 sctp_add_an_in_stream(chk, seq, adding_i);
12282 asoc->stream_reset_outstanding++;
12285 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12287 asoc->stream_reset_outstanding++;
12289 if (send_tsn_req) {
12290 sctp_add_stream_reset_tsn(chk, seq);
12291 asoc->stream_reset_outstanding++;
12293 asoc->str_reset = chk;
12294 /* insert the chunk for sending */
12295 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12298 asoc->ctrl_queue_cnt++;
12299 if (stcb->asoc.send_sack) {
12300 sctp_send_sack(stcb, SCTP_SO_LOCKED);
12302 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12307 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12308 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12309 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12310 uint32_t vrf_id, uint16_t port)
12312 /* Don't respond to an ABORT with an ABORT. */
12313 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12315 sctp_m_freem(cause);
12318 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12319 mflowtype, mflowid, fibnum,
12325 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12326 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12327 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12328 uint32_t vrf_id, uint16_t port)
12330 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12331 mflowtype, mflowid, fibnum,
12336 static struct mbuf *
12337 sctp_copy_resume(struct uio *uio,
12339 int user_marks_eor,
12342 struct mbuf **new_tail)
12346 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12347 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12349 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12352 *sndout = m_length(m, NULL);
12353 *new_tail = m_last(m);
12359 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12363 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12365 if (sp->data == NULL) {
12366 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12370 sp->tail_mbuf = m_last(sp->data);
12376 static struct sctp_stream_queue_pending *
12377 sctp_copy_it_in(struct sctp_tcb *stcb,
12378 struct sctp_association *asoc,
12379 struct sctp_sndrcvinfo *srcv,
12381 struct sctp_nets *net,
12382 ssize_t max_send_len,
12383 int user_marks_eor,
12388 * This routine must be very careful in its work. Protocol
12389 * processing is up and running so care must be taken to spl...()
12390 * when you need to do something that may effect the stcb/asoc. The
12391 * sb is locked however. When data is copied the protocol processing
12392 * should be enabled since this is a slower operation...
12394 struct sctp_stream_queue_pending *sp = NULL;
12398 /* Now can we send this? */
12399 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
12400 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12401 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12402 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12403 /* got data while shutting down */
12404 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12405 *error = ECONNRESET;
12408 sctp_alloc_a_strmoq(stcb, sp);
12410 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12415 sp->sender_all_done = 0;
12416 sp->sinfo_flags = srcv->sinfo_flags;
12417 sp->timetolive = srcv->sinfo_timetolive;
12418 sp->ppid = srcv->sinfo_ppid;
12419 sp->context = srcv->sinfo_context;
12421 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12423 sp->sid = srcv->sinfo_stream;
12424 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
12425 if ((sp->length == (uint32_t)uio->uio_resid) &&
12426 ((user_marks_eor == 0) ||
12427 (srcv->sinfo_flags & SCTP_EOF) ||
12428 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12429 sp->msg_is_complete = 1;
12431 sp->msg_is_complete = 0;
12433 sp->sender_all_done = 0;
12434 sp->some_taken = 0;
12435 sp->put_last_out = 0;
12436 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
12437 sp->data = sp->tail_mbuf = NULL;
12438 if (sp->length == 0) {
12441 if (srcv->sinfo_keynumber_valid) {
12442 sp->auth_keyid = srcv->sinfo_keynumber;
12444 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12446 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12447 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12448 sp->holds_key_ref = 1;
12450 *error = sctp_copy_one(sp, uio, resv_in_first);
12453 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12456 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12458 atomic_add_int(&sp->net->ref_count, 1);
12462 sctp_set_prsctp_policy(sp);
12470 sctp_sosend(struct socket *so,
12471 struct sockaddr *addr,
12474 struct mbuf *control,
12479 int error, use_sndinfo = 0;
12480 struct sctp_sndrcvinfo sndrcvninfo;
12481 struct sockaddr *addr_to_use;
12482 #if defined(INET) && defined(INET6)
12483 struct sockaddr_in sin;
12487 /* process cmsg snd/rcv info (maybe a assoc-id) */
12488 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12489 sizeof(sndrcvninfo))) {
12494 addr_to_use = addr;
12495 #if defined(INET) && defined(INET6)
12496 if ((addr) && (addr->sa_family == AF_INET6)) {
12497 struct sockaddr_in6 *sin6;
12499 sin6 = (struct sockaddr_in6 *)addr;
12500 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12501 in6_sin6_2_sin(&sin, sin6);
12502 addr_to_use = (struct sockaddr *)&sin;
12506 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12509 use_sndinfo ? &sndrcvninfo : NULL
12517 sctp_lower_sosend(struct socket *so,
12518 struct sockaddr *addr,
12520 struct mbuf *i_pak,
12521 struct mbuf *control,
12523 struct sctp_sndrcvinfo *srcv
12528 ssize_t sndlen = 0, max_len;
12530 struct mbuf *top = NULL;
12531 int queue_only = 0, queue_only_for_init = 0;
12532 int free_cnt_applied = 0;
12534 int now_filled = 0;
12535 unsigned int inqueue_bytes = 0;
12536 struct sctp_block_entry be;
12537 struct sctp_inpcb *inp;
12538 struct sctp_tcb *stcb = NULL;
12539 struct timeval now;
12540 struct sctp_nets *net;
12541 struct sctp_association *asoc;
12542 struct sctp_inpcb *t_inp;
12543 int user_marks_eor;
12544 int create_lock_applied = 0;
12545 int nagle_applies = 0;
12546 int some_on_control = 0;
12547 int got_all_of_the_send = 0;
12548 int hold_tcblock = 0;
12549 int non_blocking = 0;
12550 uint32_t local_add_more;
12551 ssize_t local_soresv = 0;
12553 uint16_t sinfo_flags;
12554 sctp_assoc_t sinfo_assoc_id;
12561 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12563 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12566 SCTP_RELEASE_PKT(i_pak);
12570 if ((uio == NULL) && (i_pak == NULL)) {
12571 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12574 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12575 atomic_add_int(&inp->total_sends, 1);
12577 if (uio->uio_resid < 0) {
12578 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12581 sndlen = uio->uio_resid;
12583 top = SCTP_HEADER_TO_CHAIN(i_pak);
12584 sndlen = SCTP_HEADER_LEN(i_pak);
12586 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %zu\n",
12589 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12590 (inp->sctp_socket->so_qlimit)) {
12591 /* The listener can NOT send */
12592 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12597 * Pre-screen address, if one is given the sin-len
12598 * must be set correctly!
12601 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12603 switch (raddr->sa.sa_family) {
12606 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12607 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12611 port = raddr->sin.sin_port;
12616 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12617 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12621 port = raddr->sin6.sin6_port;
12625 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12626 error = EAFNOSUPPORT;
12633 sinfo_flags = srcv->sinfo_flags;
12634 sinfo_assoc_id = srcv->sinfo_assoc_id;
12635 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12636 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12637 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12641 if (srcv->sinfo_flags)
12642 SCTP_STAT_INCR(sctps_sends_with_flags);
12644 sinfo_flags = inp->def_send.sinfo_flags;
12645 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12647 if (sinfo_flags & SCTP_SENDALL) {
12648 /* its a sendall */
12649 error = sctp_sendall(inp, uio, top, srcv);
12653 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12654 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12658 /* now we must find the assoc */
12659 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12660 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12661 SCTP_INP_RLOCK(inp);
12662 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12664 SCTP_TCB_LOCK(stcb);
12667 SCTP_INP_RUNLOCK(inp);
12668 } else if (sinfo_assoc_id) {
12669 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
12670 if (stcb != NULL) {
12675 * Since we did not use findep we must
12676 * increment it, and if we don't find a tcb
12679 SCTP_INP_WLOCK(inp);
12680 SCTP_INP_INCR_REF(inp);
12681 SCTP_INP_WUNLOCK(inp);
12682 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12683 if (stcb == NULL) {
12684 SCTP_INP_WLOCK(inp);
12685 SCTP_INP_DECR_REF(inp);
12686 SCTP_INP_WUNLOCK(inp);
12691 if ((stcb == NULL) && (addr)) {
12692 /* Possible implicit send? */
12693 SCTP_ASOC_CREATE_LOCK(inp);
12694 create_lock_applied = 1;
12695 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12696 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12697 /* Should I really unlock ? */
12698 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12703 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12704 (addr->sa_family == AF_INET6)) {
12705 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12709 SCTP_INP_WLOCK(inp);
12710 SCTP_INP_INCR_REF(inp);
12711 SCTP_INP_WUNLOCK(inp);
12712 /* With the lock applied look again */
12713 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12714 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12715 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12717 if (stcb == NULL) {
12718 SCTP_INP_WLOCK(inp);
12719 SCTP_INP_DECR_REF(inp);
12720 SCTP_INP_WUNLOCK(inp);
12727 if (t_inp != inp) {
12728 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12733 if (stcb == NULL) {
12734 if (addr == NULL) {
12735 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12739 /* We must go ahead and start the INIT process */
12742 if ((sinfo_flags & SCTP_ABORT) ||
12743 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12745 * User asks to abort a non-existant assoc,
12746 * or EOF a non-existant assoc with no data
12748 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12752 /* get an asoc/stcb struct */
12753 vrf_id = inp->def_vrf_id;
12755 if (create_lock_applied == 0) {
12756 panic("Error, should hold create lock and I don't?");
12759 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12760 inp->sctp_ep.pre_open_stream_count,
12763 if (stcb == NULL) {
12764 /* Error is setup for us in the call */
12767 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12768 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12770 * Set the connected flag so we can queue
12773 soisconnecting(so);
12776 if (create_lock_applied) {
12777 SCTP_ASOC_CREATE_UNLOCK(inp);
12778 create_lock_applied = 0;
12780 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12783 * Turn on queue only flag to prevent data from
12787 asoc = &stcb->asoc;
12788 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
12789 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12791 /* initialize authentication params for the assoc */
12792 sctp_initialize_auth_params(inp, stcb);
12795 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12796 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
12797 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
12803 /* out with the INIT */
12804 queue_only_for_init = 1;
12806 * we may want to dig in after this call and adjust the MTU
12807 * value. It defaulted to 1500 (constant) but the ro
12808 * structure may now have an update and thus we may need to
12809 * change it BEFORE we append the message.
12813 asoc = &stcb->asoc;
12815 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12816 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12818 net = sctp_findnet(stcb, addr);
12821 if ((net == NULL) ||
12822 ((port != 0) && (port != stcb->rport))) {
12823 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12828 if (stcb->asoc.alternate) {
12829 net = stcb->asoc.alternate;
12831 net = stcb->asoc.primary_destination;
12834 atomic_add_int(&stcb->total_sends, 1);
12835 /* Keep the stcb from being freed under our feet */
12836 atomic_add_int(&asoc->refcnt, 1);
12837 free_cnt_applied = 1;
12839 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12840 if (sndlen > asoc->smallest_mtu) {
12841 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12846 if (SCTP_SO_IS_NBIO(so)
12847 || (flags & MSG_NBIO)
12851 /* would we block? */
12852 if (non_blocking) {
12855 if (hold_tcblock == 0) {
12856 SCTP_TCB_LOCK(stcb);
12859 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
12860 if (user_marks_eor == 0) {
12865 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12866 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12867 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12868 if (sndlen > SCTP_SB_LIMIT_SND(so))
12871 error = EWOULDBLOCK;
12874 stcb->asoc.sb_send_resv += (uint32_t)sndlen;
12875 SCTP_TCB_UNLOCK(stcb);
12878 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12880 local_soresv = sndlen;
12881 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12882 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12883 error = ECONNRESET;
12886 if (create_lock_applied) {
12887 SCTP_ASOC_CREATE_UNLOCK(inp);
12888 create_lock_applied = 0;
12890 /* Is the stream no. valid? */
12891 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12892 /* Invalid stream number */
12893 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12897 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12898 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12900 * Can't queue any data while stream reset is underway.
12902 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12907 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12910 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12911 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
12914 /* we are now done with all control */
12916 sctp_m_freem(control);
12919 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) ||
12920 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12921 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12922 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12923 if (srcv->sinfo_flags & SCTP_ABORT) {
12926 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12927 error = ECONNRESET;
12931 /* Ok, we will attempt a msgsnd :> */
12933 p->td_ru.ru_msgsnd++;
12935 /* Are we aborting? */
12936 if (srcv->sinfo_flags & SCTP_ABORT) {
12938 ssize_t tot_demand, tot_out = 0, max_out;
12940 SCTP_STAT_INCR(sctps_sends_with_abort);
12941 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_WAIT) ||
12942 (SCTP_GET_STATE(stcb) == SCTP_STATE_COOKIE_ECHOED)) {
12943 /* It has to be up before we abort */
12944 /* how big is the user initiated abort? */
12945 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12949 if (hold_tcblock) {
12950 SCTP_TCB_UNLOCK(stcb);
12954 struct mbuf *cntm = NULL;
12956 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12958 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12959 tot_out += SCTP_BUF_LEN(cntm);
12963 /* Must fit in a MTU */
12965 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12966 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12968 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12972 mm = sctp_get_mbuf_for_msg((unsigned int)tot_demand, 0, M_WAITOK, 1, MT_DATA);
12975 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12979 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12980 max_out -= sizeof(struct sctp_abort_msg);
12981 if (tot_out > max_out) {
12985 struct sctp_paramhdr *ph;
12987 /* now move forward the data pointer */
12988 ph = mtod(mm, struct sctp_paramhdr *);
12989 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12990 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
12992 SCTP_BUF_LEN(mm) = (int)(tot_out + sizeof(struct sctp_paramhdr));
12994 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12997 * Here if we can't get his data we
12998 * still abort we just don't get to
12999 * send the users note :-0
13006 SCTP_BUF_NEXT(mm) = top;
13010 if (hold_tcblock == 0) {
13011 SCTP_TCB_LOCK(stcb);
13013 atomic_add_int(&stcb->asoc.refcnt, -1);
13014 free_cnt_applied = 0;
13015 /* release this lock, otherwise we hang on ourselves */
13016 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
13017 /* now relock the stcb so everything is sane */
13021 * In this case top is already chained to mm avoid double
13022 * free, since we free it below if top != NULL and driver
13023 * would free it after sending the packet out
13030 /* Calculate the maximum we can send */
13031 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13032 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13033 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13037 if (hold_tcblock) {
13038 SCTP_TCB_UNLOCK(stcb);
13041 if (asoc->strmout == NULL) {
13042 /* huh? software error */
13043 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13048 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13049 if ((user_marks_eor == 0) &&
13050 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13051 /* It will NEVER fit */
13052 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13056 if ((uio == NULL) && user_marks_eor) {
13058 * We do not support eeor mode for
13059 * sending with mbuf chains (like sendfile).
13061 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13066 if (user_marks_eor) {
13067 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13070 * For non-eeor the whole message must fit in
13071 * the socket send buffer.
13073 local_add_more = (uint32_t)sndlen;
13076 if (non_blocking) {
13077 goto skip_preblock;
13079 if (((max_len <= local_add_more) &&
13080 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13082 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13083 /* No room right now ! */
13084 SOCKBUF_LOCK(&so->so_snd);
13085 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13086 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13087 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13088 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13089 (unsigned int)SCTP_SB_LIMIT_SND(so),
13092 stcb->asoc.stream_queue_cnt,
13093 stcb->asoc.chunks_on_out_queue,
13094 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13095 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13096 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13099 stcb->block_entry = &be;
13100 error = sbwait(&so->so_snd);
13101 stcb->block_entry = NULL;
13102 if (error || so->so_error || be.error) {
13105 error = so->so_error;
13110 SOCKBUF_UNLOCK(&so->so_snd);
13113 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13114 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13115 asoc, stcb->asoc.total_output_queue_size);
13117 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13118 SOCKBUF_UNLOCK(&so->so_snd);
13121 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13123 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13124 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13128 SOCKBUF_UNLOCK(&so->so_snd);
13132 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13136 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13137 * case NOTE: uio will be null when top/mbuf is passed
13140 if (srcv->sinfo_flags & SCTP_EOF) {
13141 got_all_of_the_send = 1;
13144 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13150 struct sctp_stream_queue_pending *sp;
13151 struct sctp_stream_out *strm;
13154 SCTP_TCB_SEND_LOCK(stcb);
13155 if ((asoc->stream_locked) &&
13156 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13157 SCTP_TCB_SEND_UNLOCK(stcb);
13158 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13162 SCTP_TCB_SEND_UNLOCK(stcb);
13164 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13165 if (strm->last_msg_incomplete == 0) {
13167 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13171 SCTP_TCB_SEND_LOCK(stcb);
13172 if (sp->msg_is_complete) {
13173 strm->last_msg_incomplete = 0;
13174 asoc->stream_locked = 0;
13177 * Just got locked to this guy in case of an
13180 strm->last_msg_incomplete = 1;
13181 if (stcb->asoc.idata_supported == 0) {
13182 asoc->stream_locked = 1;
13183 asoc->stream_locked_on = srcv->sinfo_stream;
13185 sp->sender_all_done = 0;
13187 sctp_snd_sb_alloc(stcb, sp->length);
13188 atomic_add_int(&asoc->stream_queue_cnt, 1);
13189 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13190 SCTP_STAT_INCR(sctps_sends_with_unord);
13192 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13193 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13194 SCTP_TCB_SEND_UNLOCK(stcb);
13196 SCTP_TCB_SEND_LOCK(stcb);
13197 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13198 SCTP_TCB_SEND_UNLOCK(stcb);
13200 /* ???? Huh ??? last msg is gone */
13202 panic("Warning: Last msg marked incomplete, yet nothing left?");
13204 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13205 strm->last_msg_incomplete = 0;
13211 while (uio->uio_resid > 0) {
13212 /* How much room do we have? */
13213 struct mbuf *new_tail, *mm;
13215 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13216 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13217 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13221 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13222 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13223 (uio->uio_resid && (uio->uio_resid <= max_len))) {
13226 if (hold_tcblock) {
13227 SCTP_TCB_UNLOCK(stcb);
13230 mm = sctp_copy_resume(uio, (int)max_len, user_marks_eor, &error, &sndout, &new_tail);
13231 if ((mm == NULL) || error) {
13237 /* Update the mbuf and count */
13238 SCTP_TCB_SEND_LOCK(stcb);
13239 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13241 * we need to get out. Peer probably
13245 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13246 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13247 error = ECONNRESET;
13249 SCTP_TCB_SEND_UNLOCK(stcb);
13252 if (sp->tail_mbuf) {
13253 /* tack it to the end */
13254 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13255 sp->tail_mbuf = new_tail;
13257 /* A stolen mbuf */
13259 sp->tail_mbuf = new_tail;
13261 sctp_snd_sb_alloc(stcb, sndout);
13262 atomic_add_int(&sp->length, sndout);
13264 if (srcv->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
13265 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
13268 /* Did we reach EOR? */
13269 if ((uio->uio_resid == 0) &&
13270 ((user_marks_eor == 0) ||
13271 (srcv->sinfo_flags & SCTP_EOF) ||
13272 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13273 sp->msg_is_complete = 1;
13275 sp->msg_is_complete = 0;
13277 SCTP_TCB_SEND_UNLOCK(stcb);
13279 if (uio->uio_resid == 0) {
13284 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13286 * This is ugly but we must assure locking
13289 if (hold_tcblock == 0) {
13290 SCTP_TCB_LOCK(stcb);
13293 sctp_prune_prsctp(stcb, asoc, srcv, (int)sndlen);
13294 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13295 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13296 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13302 SCTP_TCB_UNLOCK(stcb);
13305 /* wait for space now */
13306 if (non_blocking) {
13307 /* Non-blocking io in place out */
13310 /* What about the INIT, send it maybe */
13311 if (queue_only_for_init) {
13312 if (hold_tcblock == 0) {
13313 SCTP_TCB_LOCK(stcb);
13316 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13317 /* a collision took us forward? */
13320 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13321 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13325 if ((net->flight_size > net->cwnd) &&
13326 (asoc->sctp_cmt_on_off == 0)) {
13327 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13329 } else if (asoc->ifp_had_enobuf) {
13330 SCTP_STAT_INCR(sctps_ifnomemqueued);
13331 if (net->flight_size > (2 * net->mtu)) {
13334 asoc->ifp_had_enobuf = 0;
13336 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13337 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13338 (stcb->asoc.total_flight > 0) &&
13339 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13340 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13343 * Ok, Nagle is set on and we have data outstanding.
13344 * Don't send anything and let SACKs drive out the
13345 * data unless we have a "full" segment to send.
13347 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13348 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13350 SCTP_STAT_INCR(sctps_naglequeued);
13353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13354 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13355 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13357 SCTP_STAT_INCR(sctps_naglesent);
13360 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13362 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13363 nagle_applies, un_sent);
13364 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13365 stcb->asoc.total_flight,
13366 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13368 if (queue_only_for_init)
13369 queue_only_for_init = 0;
13370 if ((queue_only == 0) && (nagle_applies == 0)) {
13372 * need to start chunk output
13373 * before blocking.. note that if
13374 * a lock is already applied, then
13375 * the input via the net is happening
13376 * and I don't need to start output :-D
13378 if (hold_tcblock == 0) {
13379 if (SCTP_TCB_TRYLOCK(stcb)) {
13381 sctp_chunk_output(inp,
13383 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13386 sctp_chunk_output(inp,
13388 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13390 if (hold_tcblock == 1) {
13391 SCTP_TCB_UNLOCK(stcb);
13395 SOCKBUF_LOCK(&so->so_snd);
13397 * This is a bit strange, but I think it will
13398 * work. The total_output_queue_size is locked and
13399 * protected by the TCB_LOCK, which we just released.
13400 * There is a race that can occur between releasing it
13401 * above, and me getting the socket lock, where sacks
13402 * come in but we have not put the SB_WAIT on the
13403 * so_snd buffer to get the wakeup. After the LOCK
13404 * is applied the sack_processing will also need to
13405 * LOCK the so->so_snd to do the actual sowwakeup(). So
13406 * once we have the socket buffer lock if we recheck the
13407 * size we KNOW we will get to sleep safely with the
13408 * wakeup flag in place.
13410 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13411 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
13412 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13413 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13414 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13415 asoc, uio->uio_resid);
13418 stcb->block_entry = &be;
13419 error = sbwait(&so->so_snd);
13420 stcb->block_entry = NULL;
13422 if (error || so->so_error || be.error) {
13425 error = so->so_error;
13430 SOCKBUF_UNLOCK(&so->so_snd);
13434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13435 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13436 asoc, stcb->asoc.total_output_queue_size);
13439 SOCKBUF_UNLOCK(&so->so_snd);
13440 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13444 SCTP_TCB_SEND_LOCK(stcb);
13445 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13446 SCTP_TCB_SEND_UNLOCK(stcb);
13450 if (sp->msg_is_complete == 0) {
13451 strm->last_msg_incomplete = 1;
13452 if (stcb->asoc.idata_supported == 0) {
13453 asoc->stream_locked = 1;
13454 asoc->stream_locked_on = srcv->sinfo_stream;
13457 sp->sender_all_done = 1;
13458 strm->last_msg_incomplete = 0;
13459 asoc->stream_locked = 0;
13462 SCTP_PRINTF("Huh no sp TSNH?\n");
13463 strm->last_msg_incomplete = 0;
13464 asoc->stream_locked = 0;
13466 SCTP_TCB_SEND_UNLOCK(stcb);
13467 if (uio->uio_resid == 0) {
13468 got_all_of_the_send = 1;
13471 /* We send in a 0, since we do NOT have any locks */
13472 error = sctp_msg_append(stcb, net, top, srcv, 0);
13474 if (srcv->sinfo_flags & SCTP_EOF) {
13476 * This should only happen for Panda for the mbuf
13477 * send case, which does NOT yet support EEOR mode.
13478 * Thus, we can just set this flag to do the proper
13481 got_all_of_the_send = 1;
13489 if ((srcv->sinfo_flags & SCTP_EOF) &&
13490 (got_all_of_the_send == 1)) {
13491 SCTP_STAT_INCR(sctps_sends_with_eof);
13493 if (hold_tcblock == 0) {
13494 SCTP_TCB_LOCK(stcb);
13497 if (TAILQ_EMPTY(&asoc->send_queue) &&
13498 TAILQ_EMPTY(&asoc->sent_queue) &&
13499 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
13500 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13503 /* there is nothing queued to send, so I'm done... */
13504 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
13505 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13506 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13507 struct sctp_nets *netp;
13509 /* only send SHUTDOWN the first time through */
13510 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13511 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13513 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
13514 SCTP_CLEAR_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
13515 sctp_stop_timers_for_shutdown(stcb);
13516 if (stcb->asoc.alternate) {
13517 netp = stcb->asoc.alternate;
13519 netp = stcb->asoc.primary_destination;
13521 sctp_send_shutdown(stcb, netp);
13522 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13524 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13525 asoc->primary_destination);
13529 * we still got (or just got) data to send, so set
13533 * XXX sockets draft says that SCTP_EOF should be
13534 * sent with no data. currently, we will allow user
13535 * data to be sent first and move to
13538 if ((SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_SENT) &&
13539 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13540 (SCTP_GET_STATE(stcb) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13541 if (hold_tcblock == 0) {
13542 SCTP_TCB_LOCK(stcb);
13545 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13546 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
13548 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_SHUTDOWN_PENDING);
13549 if (TAILQ_EMPTY(&asoc->send_queue) &&
13550 TAILQ_EMPTY(&asoc->sent_queue) &&
13551 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13552 struct mbuf *op_err;
13553 char msg[SCTP_DIAG_INFO_LEN];
13556 if (free_cnt_applied) {
13557 atomic_add_int(&stcb->asoc.refcnt, -1);
13558 free_cnt_applied = 0;
13560 snprintf(msg, sizeof(msg),
13561 "%s:%d at %s", __FILE__, __LINE__, __func__);
13562 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13564 sctp_abort_an_association(stcb->sctp_ep, stcb,
13565 op_err, SCTP_SO_LOCKED);
13567 * now relock the stcb so everything
13574 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13575 asoc->primary_destination);
13576 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13581 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13582 some_on_control = 1;
13584 if (queue_only_for_init) {
13585 if (hold_tcblock == 0) {
13586 SCTP_TCB_LOCK(stcb);
13589 if (SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) {
13590 /* a collision took us forward? */
13593 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13594 SCTP_SET_STATE(stcb, SCTP_STATE_COOKIE_WAIT);
13598 if ((net->flight_size > net->cwnd) &&
13599 (stcb->asoc.sctp_cmt_on_off == 0)) {
13600 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13602 } else if (asoc->ifp_had_enobuf) {
13603 SCTP_STAT_INCR(sctps_ifnomemqueued);
13604 if (net->flight_size > (2 * net->mtu)) {
13607 asoc->ifp_had_enobuf = 0;
13609 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13610 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13611 (stcb->asoc.total_flight > 0) &&
13612 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13613 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13615 * Ok, Nagle is set on and we have data outstanding.
13616 * Don't send anything and let SACKs drive out the
13617 * data unless wen have a "full" segment to send.
13619 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13620 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13622 SCTP_STAT_INCR(sctps_naglequeued);
13625 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13626 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13627 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13629 SCTP_STAT_INCR(sctps_naglesent);
13632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13633 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13634 nagle_applies, un_sent);
13635 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13636 stcb->asoc.total_flight,
13637 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13639 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13640 /* we can attempt to send too. */
13641 if (hold_tcblock == 0) {
13643 * If there is activity recv'ing sacks no need to
13646 if (SCTP_TCB_TRYLOCK(stcb)) {
13647 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13651 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13653 } else if ((queue_only == 0) &&
13654 (stcb->asoc.peers_rwnd == 0) &&
13655 (stcb->asoc.total_flight == 0)) {
13656 /* We get to have a probe outstanding */
13657 if (hold_tcblock == 0) {
13659 SCTP_TCB_LOCK(stcb);
13661 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13662 } else if (some_on_control) {
13663 int num_out, reason, frag_point;
13665 /* Here we do control only */
13666 if (hold_tcblock == 0) {
13668 SCTP_TCB_LOCK(stcb);
13670 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13671 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13672 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13674 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13675 queue_only, stcb->asoc.peers_rwnd, un_sent,
13676 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13677 stcb->asoc.total_output_queue_size, error);
13682 if (local_soresv && stcb) {
13683 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13685 if (create_lock_applied) {
13686 SCTP_ASOC_CREATE_UNLOCK(inp);
13688 if ((stcb) && hold_tcblock) {
13689 SCTP_TCB_UNLOCK(stcb);
13691 if (stcb && free_cnt_applied) {
13692 atomic_add_int(&stcb->asoc.refcnt, -1);
13696 if (mtx_owned(&stcb->tcb_mtx)) {
13697 panic("Leaving with tcb mtx owned?");
13699 if (mtx_owned(&stcb->tcb_send_mtx)) {
13700 panic("Leaving with tcb send mtx owned?");
13708 sctp_m_freem(control);
13715 * generate an AUTHentication chunk, if required
13718 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13719 struct sctp_auth_chunk **auth_ret, uint32_t *offset,
13720 struct sctp_tcb *stcb, uint8_t chunk)
13722 struct mbuf *m_auth;
13723 struct sctp_auth_chunk *auth;
13727 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13731 if (stcb->asoc.auth_supported == 0) {
13734 /* does the requested chunk require auth? */
13735 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13738 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13739 if (m_auth == NULL) {
13743 /* reserve some space if this will be the first mbuf */
13745 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13746 /* fill in the AUTH chunk details */
13747 auth = mtod(m_auth, struct sctp_auth_chunk *);
13748 memset(auth, 0, sizeof(*auth));
13749 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13750 auth->ch.chunk_flags = 0;
13751 chunk_len = sizeof(*auth) +
13752 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13753 auth->ch.chunk_length = htons(chunk_len);
13754 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13755 /* key id and hmac digest will be computed and filled in upon send */
13757 /* save the offset where the auth was inserted into the chain */
13759 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13760 *offset += SCTP_BUF_LEN(cn);
13763 /* update length and return pointer to the auth chunk */
13764 SCTP_BUF_LEN(m_auth) = chunk_len;
13765 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13766 if (auth_ret != NULL)
13774 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
13776 struct nd_prefix *pfx = NULL;
13777 struct nd_pfxrouter *pfxrtr = NULL;
13778 struct sockaddr_in6 gw6;
13780 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13783 /* get prefix entry of address */
13785 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13786 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13788 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13789 &src6->sin6_addr, &pfx->ndpr_mask))
13792 /* no prefix entry in the prefix list */
13795 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13796 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13800 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13801 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13803 /* search installed gateway from prefix entry */
13804 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13805 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13806 gw6.sin6_family = AF_INET6;
13807 gw6.sin6_len = sizeof(struct sockaddr_in6);
13808 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13809 sizeof(struct in6_addr));
13810 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13811 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13812 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13813 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13814 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
13816 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13822 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13828 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
13831 struct sockaddr_in *sin, *mask;
13832 struct ifaddr *ifa;
13833 struct in_addr srcnetaddr, gwnetaddr;
13835 if (ro == NULL || ro->ro_rt == NULL ||
13836 sifa->address.sa.sa_family != AF_INET) {
13839 ifa = (struct ifaddr *)sifa->ifa;
13840 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13841 sin = &sifa->address.sin;
13842 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13843 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13844 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13845 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13847 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13848 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13849 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13850 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13851 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13852 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {