2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #if defined(INET) || defined(INET6)
54 #include <netinet/udp.h>
56 #include <netinet/udp_var.h>
57 #include <machine/in_cksum.h>
61 #define SCTP_MAX_GAPS_INARRAY 4
63 uint8_t right_edge; /* mergable on the right edge */
64 uint8_t left_edge; /* mergable on the left edge */
67 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
70 const struct sack_track sack_array[256] = {
71 {0, 0, 0, 0, /* 0x00 */
78 {1, 0, 1, 0, /* 0x01 */
85 {0, 0, 1, 0, /* 0x02 */
92 {1, 0, 1, 0, /* 0x03 */
99 {0, 0, 1, 0, /* 0x04 */
106 {1, 0, 2, 0, /* 0x05 */
113 {0, 0, 1, 0, /* 0x06 */
120 {1, 0, 1, 0, /* 0x07 */
127 {0, 0, 1, 0, /* 0x08 */
134 {1, 0, 2, 0, /* 0x09 */
141 {0, 0, 2, 0, /* 0x0a */
148 {1, 0, 2, 0, /* 0x0b */
155 {0, 0, 1, 0, /* 0x0c */
162 {1, 0, 2, 0, /* 0x0d */
169 {0, 0, 1, 0, /* 0x0e */
176 {1, 0, 1, 0, /* 0x0f */
183 {0, 0, 1, 0, /* 0x10 */
190 {1, 0, 2, 0, /* 0x11 */
197 {0, 0, 2, 0, /* 0x12 */
204 {1, 0, 2, 0, /* 0x13 */
211 {0, 0, 2, 0, /* 0x14 */
218 {1, 0, 3, 0, /* 0x15 */
225 {0, 0, 2, 0, /* 0x16 */
232 {1, 0, 2, 0, /* 0x17 */
239 {0, 0, 1, 0, /* 0x18 */
246 {1, 0, 2, 0, /* 0x19 */
253 {0, 0, 2, 0, /* 0x1a */
260 {1, 0, 2, 0, /* 0x1b */
267 {0, 0, 1, 0, /* 0x1c */
274 {1, 0, 2, 0, /* 0x1d */
281 {0, 0, 1, 0, /* 0x1e */
288 {1, 0, 1, 0, /* 0x1f */
295 {0, 0, 1, 0, /* 0x20 */
302 {1, 0, 2, 0, /* 0x21 */
309 {0, 0, 2, 0, /* 0x22 */
316 {1, 0, 2, 0, /* 0x23 */
323 {0, 0, 2, 0, /* 0x24 */
330 {1, 0, 3, 0, /* 0x25 */
337 {0, 0, 2, 0, /* 0x26 */
344 {1, 0, 2, 0, /* 0x27 */
351 {0, 0, 2, 0, /* 0x28 */
358 {1, 0, 3, 0, /* 0x29 */
365 {0, 0, 3, 0, /* 0x2a */
372 {1, 0, 3, 0, /* 0x2b */
379 {0, 0, 2, 0, /* 0x2c */
386 {1, 0, 3, 0, /* 0x2d */
393 {0, 0, 2, 0, /* 0x2e */
400 {1, 0, 2, 0, /* 0x2f */
407 {0, 0, 1, 0, /* 0x30 */
414 {1, 0, 2, 0, /* 0x31 */
421 {0, 0, 2, 0, /* 0x32 */
428 {1, 0, 2, 0, /* 0x33 */
435 {0, 0, 2, 0, /* 0x34 */
442 {1, 0, 3, 0, /* 0x35 */
449 {0, 0, 2, 0, /* 0x36 */
456 {1, 0, 2, 0, /* 0x37 */
463 {0, 0, 1, 0, /* 0x38 */
470 {1, 0, 2, 0, /* 0x39 */
477 {0, 0, 2, 0, /* 0x3a */
484 {1, 0, 2, 0, /* 0x3b */
491 {0, 0, 1, 0, /* 0x3c */
498 {1, 0, 2, 0, /* 0x3d */
505 {0, 0, 1, 0, /* 0x3e */
512 {1, 0, 1, 0, /* 0x3f */
519 {0, 0, 1, 0, /* 0x40 */
526 {1, 0, 2, 0, /* 0x41 */
533 {0, 0, 2, 0, /* 0x42 */
540 {1, 0, 2, 0, /* 0x43 */
547 {0, 0, 2, 0, /* 0x44 */
554 {1, 0, 3, 0, /* 0x45 */
561 {0, 0, 2, 0, /* 0x46 */
568 {1, 0, 2, 0, /* 0x47 */
575 {0, 0, 2, 0, /* 0x48 */
582 {1, 0, 3, 0, /* 0x49 */
589 {0, 0, 3, 0, /* 0x4a */
596 {1, 0, 3, 0, /* 0x4b */
603 {0, 0, 2, 0, /* 0x4c */
610 {1, 0, 3, 0, /* 0x4d */
617 {0, 0, 2, 0, /* 0x4e */
624 {1, 0, 2, 0, /* 0x4f */
631 {0, 0, 2, 0, /* 0x50 */
638 {1, 0, 3, 0, /* 0x51 */
645 {0, 0, 3, 0, /* 0x52 */
652 {1, 0, 3, 0, /* 0x53 */
659 {0, 0, 3, 0, /* 0x54 */
666 {1, 0, 4, 0, /* 0x55 */
673 {0, 0, 3, 0, /* 0x56 */
680 {1, 0, 3, 0, /* 0x57 */
687 {0, 0, 2, 0, /* 0x58 */
694 {1, 0, 3, 0, /* 0x59 */
701 {0, 0, 3, 0, /* 0x5a */
708 {1, 0, 3, 0, /* 0x5b */
715 {0, 0, 2, 0, /* 0x5c */
722 {1, 0, 3, 0, /* 0x5d */
729 {0, 0, 2, 0, /* 0x5e */
736 {1, 0, 2, 0, /* 0x5f */
743 {0, 0, 1, 0, /* 0x60 */
750 {1, 0, 2, 0, /* 0x61 */
757 {0, 0, 2, 0, /* 0x62 */
764 {1, 0, 2, 0, /* 0x63 */
771 {0, 0, 2, 0, /* 0x64 */
778 {1, 0, 3, 0, /* 0x65 */
785 {0, 0, 2, 0, /* 0x66 */
792 {1, 0, 2, 0, /* 0x67 */
799 {0, 0, 2, 0, /* 0x68 */
806 {1, 0, 3, 0, /* 0x69 */
813 {0, 0, 3, 0, /* 0x6a */
820 {1, 0, 3, 0, /* 0x6b */
827 {0, 0, 2, 0, /* 0x6c */
834 {1, 0, 3, 0, /* 0x6d */
841 {0, 0, 2, 0, /* 0x6e */
848 {1, 0, 2, 0, /* 0x6f */
855 {0, 0, 1, 0, /* 0x70 */
862 {1, 0, 2, 0, /* 0x71 */
869 {0, 0, 2, 0, /* 0x72 */
876 {1, 0, 2, 0, /* 0x73 */
883 {0, 0, 2, 0, /* 0x74 */
890 {1, 0, 3, 0, /* 0x75 */
897 {0, 0, 2, 0, /* 0x76 */
904 {1, 0, 2, 0, /* 0x77 */
911 {0, 0, 1, 0, /* 0x78 */
918 {1, 0, 2, 0, /* 0x79 */
925 {0, 0, 2, 0, /* 0x7a */
932 {1, 0, 2, 0, /* 0x7b */
939 {0, 0, 1, 0, /* 0x7c */
946 {1, 0, 2, 0, /* 0x7d */
953 {0, 0, 1, 0, /* 0x7e */
960 {1, 0, 1, 0, /* 0x7f */
967 {0, 1, 1, 0, /* 0x80 */
974 {1, 1, 2, 0, /* 0x81 */
981 {0, 1, 2, 0, /* 0x82 */
988 {1, 1, 2, 0, /* 0x83 */
995 {0, 1, 2, 0, /* 0x84 */
1002 {1, 1, 3, 0, /* 0x85 */
1009 {0, 1, 2, 0, /* 0x86 */
1016 {1, 1, 2, 0, /* 0x87 */
1023 {0, 1, 2, 0, /* 0x88 */
1030 {1, 1, 3, 0, /* 0x89 */
1037 {0, 1, 3, 0, /* 0x8a */
1044 {1, 1, 3, 0, /* 0x8b */
1051 {0, 1, 2, 0, /* 0x8c */
1058 {1, 1, 3, 0, /* 0x8d */
1065 {0, 1, 2, 0, /* 0x8e */
1072 {1, 1, 2, 0, /* 0x8f */
1079 {0, 1, 2, 0, /* 0x90 */
1086 {1, 1, 3, 0, /* 0x91 */
1093 {0, 1, 3, 0, /* 0x92 */
1100 {1, 1, 3, 0, /* 0x93 */
1107 {0, 1, 3, 0, /* 0x94 */
1114 {1, 1, 4, 0, /* 0x95 */
1121 {0, 1, 3, 0, /* 0x96 */
1128 {1, 1, 3, 0, /* 0x97 */
1135 {0, 1, 2, 0, /* 0x98 */
1142 {1, 1, 3, 0, /* 0x99 */
1149 {0, 1, 3, 0, /* 0x9a */
1156 {1, 1, 3, 0, /* 0x9b */
1163 {0, 1, 2, 0, /* 0x9c */
1170 {1, 1, 3, 0, /* 0x9d */
1177 {0, 1, 2, 0, /* 0x9e */
1184 {1, 1, 2, 0, /* 0x9f */
1191 {0, 1, 2, 0, /* 0xa0 */
1198 {1, 1, 3, 0, /* 0xa1 */
1205 {0, 1, 3, 0, /* 0xa2 */
1212 {1, 1, 3, 0, /* 0xa3 */
1219 {0, 1, 3, 0, /* 0xa4 */
1226 {1, 1, 4, 0, /* 0xa5 */
1233 {0, 1, 3, 0, /* 0xa6 */
1240 {1, 1, 3, 0, /* 0xa7 */
1247 {0, 1, 3, 0, /* 0xa8 */
1254 {1, 1, 4, 0, /* 0xa9 */
1261 {0, 1, 4, 0, /* 0xaa */
1268 {1, 1, 4, 0, /* 0xab */
1275 {0, 1, 3, 0, /* 0xac */
1282 {1, 1, 4, 0, /* 0xad */
1289 {0, 1, 3, 0, /* 0xae */
1296 {1, 1, 3, 0, /* 0xaf */
1303 {0, 1, 2, 0, /* 0xb0 */
1310 {1, 1, 3, 0, /* 0xb1 */
1317 {0, 1, 3, 0, /* 0xb2 */
1324 {1, 1, 3, 0, /* 0xb3 */
1331 {0, 1, 3, 0, /* 0xb4 */
1338 {1, 1, 4, 0, /* 0xb5 */
1345 {0, 1, 3, 0, /* 0xb6 */
1352 {1, 1, 3, 0, /* 0xb7 */
1359 {0, 1, 2, 0, /* 0xb8 */
1366 {1, 1, 3, 0, /* 0xb9 */
1373 {0, 1, 3, 0, /* 0xba */
1380 {1, 1, 3, 0, /* 0xbb */
1387 {0, 1, 2, 0, /* 0xbc */
1394 {1, 1, 3, 0, /* 0xbd */
1401 {0, 1, 2, 0, /* 0xbe */
1408 {1, 1, 2, 0, /* 0xbf */
1415 {0, 1, 1, 0, /* 0xc0 */
1422 {1, 1, 2, 0, /* 0xc1 */
1429 {0, 1, 2, 0, /* 0xc2 */
1436 {1, 1, 2, 0, /* 0xc3 */
1443 {0, 1, 2, 0, /* 0xc4 */
1450 {1, 1, 3, 0, /* 0xc5 */
1457 {0, 1, 2, 0, /* 0xc6 */
1464 {1, 1, 2, 0, /* 0xc7 */
1471 {0, 1, 2, 0, /* 0xc8 */
1478 {1, 1, 3, 0, /* 0xc9 */
1485 {0, 1, 3, 0, /* 0xca */
1492 {1, 1, 3, 0, /* 0xcb */
1499 {0, 1, 2, 0, /* 0xcc */
1506 {1, 1, 3, 0, /* 0xcd */
1513 {0, 1, 2, 0, /* 0xce */
1520 {1, 1, 2, 0, /* 0xcf */
1527 {0, 1, 2, 0, /* 0xd0 */
1534 {1, 1, 3, 0, /* 0xd1 */
1541 {0, 1, 3, 0, /* 0xd2 */
1548 {1, 1, 3, 0, /* 0xd3 */
1555 {0, 1, 3, 0, /* 0xd4 */
1562 {1, 1, 4, 0, /* 0xd5 */
1569 {0, 1, 3, 0, /* 0xd6 */
1576 {1, 1, 3, 0, /* 0xd7 */
1583 {0, 1, 2, 0, /* 0xd8 */
1590 {1, 1, 3, 0, /* 0xd9 */
1597 {0, 1, 3, 0, /* 0xda */
1604 {1, 1, 3, 0, /* 0xdb */
1611 {0, 1, 2, 0, /* 0xdc */
1618 {1, 1, 3, 0, /* 0xdd */
1625 {0, 1, 2, 0, /* 0xde */
1632 {1, 1, 2, 0, /* 0xdf */
1639 {0, 1, 1, 0, /* 0xe0 */
1646 {1, 1, 2, 0, /* 0xe1 */
1653 {0, 1, 2, 0, /* 0xe2 */
1660 {1, 1, 2, 0, /* 0xe3 */
1667 {0, 1, 2, 0, /* 0xe4 */
1674 {1, 1, 3, 0, /* 0xe5 */
1681 {0, 1, 2, 0, /* 0xe6 */
1688 {1, 1, 2, 0, /* 0xe7 */
1695 {0, 1, 2, 0, /* 0xe8 */
1702 {1, 1, 3, 0, /* 0xe9 */
1709 {0, 1, 3, 0, /* 0xea */
1716 {1, 1, 3, 0, /* 0xeb */
1723 {0, 1, 2, 0, /* 0xec */
1730 {1, 1, 3, 0, /* 0xed */
1737 {0, 1, 2, 0, /* 0xee */
1744 {1, 1, 2, 0, /* 0xef */
1751 {0, 1, 1, 0, /* 0xf0 */
1758 {1, 1, 2, 0, /* 0xf1 */
1765 {0, 1, 2, 0, /* 0xf2 */
1772 {1, 1, 2, 0, /* 0xf3 */
1779 {0, 1, 2, 0, /* 0xf4 */
1786 {1, 1, 3, 0, /* 0xf5 */
1793 {0, 1, 2, 0, /* 0xf6 */
1800 {1, 1, 2, 0, /* 0xf7 */
1807 {0, 1, 1, 0, /* 0xf8 */
1814 {1, 1, 2, 0, /* 0xf9 */
1821 {0, 1, 2, 0, /* 0xfa */
1828 {1, 1, 2, 0, /* 0xfb */
1835 {0, 1, 1, 0, /* 0xfc */
1842 {1, 1, 2, 0, /* 0xfd */
1849 {0, 1, 1, 0, /* 0xfe */
1856 {1, 1, 1, 0, /* 0xff */
1867 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1868 struct sctp_scoping *scope,
1871 if ((scope->loopback_scope == 0) &&
1872 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1874 * skip loopback if not in scope *
1878 switch (ifa->address.sa.sa_family) {
1881 if (scope->ipv4_addr_legal) {
1882 struct sockaddr_in *sin;
1884 sin = &ifa->address.sin;
1885 if (sin->sin_addr.s_addr == 0) {
1886 /* not in scope , unspecified */
1889 if ((scope->ipv4_local_scope == 0) &&
1890 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1891 /* private address not in scope */
1901 if (scope->ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1909 sctp_gather_internal_ifa_flags(ifa);
1911 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1914 /* ok to use deprecated addresses? */
1915 sin6 = &ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1924 if ((scope->site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1939 static struct mbuf *
1940 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
1942 #if defined(INET) || defined(INET6)
1943 struct sctp_paramhdr *paramh;
1948 switch (ifa->address.sa.sa_family) {
1951 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
1956 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
1962 #if defined(INET) || defined(INET6)
1963 if (M_TRAILINGSPACE(m) >= plen) {
1964 /* easy side we just drop it on the end */
1965 paramh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1968 /* Need more space */
1970 while (SCTP_BUF_NEXT(mret) != NULL) {
1971 mret = SCTP_BUF_NEXT(mret);
1973 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
1974 if (SCTP_BUF_NEXT(mret) == NULL) {
1975 /* We are hosed, can't add more addresses */
1978 mret = SCTP_BUF_NEXT(mret);
1979 paramh = mtod(mret, struct sctp_paramhdr *);
1981 /* now add the parameter */
1982 switch (ifa->address.sa.sa_family) {
1986 struct sctp_ipv4addr_param *ipv4p;
1987 struct sockaddr_in *sin;
1989 sin = &ifa->address.sin;
1990 ipv4p = (struct sctp_ipv4addr_param *)paramh;
1991 paramh->param_type = htons(SCTP_IPV4_ADDRESS);
1992 paramh->param_length = htons(plen);
1993 ipv4p->addr = sin->sin_addr.s_addr;
1994 SCTP_BUF_LEN(mret) += plen;
2001 struct sctp_ipv6addr_param *ipv6p;
2002 struct sockaddr_in6 *sin6;
2004 sin6 = &ifa->address.sin6;
2005 ipv6p = (struct sctp_ipv6addr_param *)paramh;
2006 paramh->param_type = htons(SCTP_IPV6_ADDRESS);
2007 paramh->param_length = htons(plen);
2008 memcpy(ipv6p->addr, &sin6->sin6_addr,
2009 sizeof(ipv6p->addr));
2010 /* clear embedded scope in the address */
2011 in6_clearscope((struct in6_addr *)ipv6p->addr);
2012 SCTP_BUF_LEN(mret) += plen;
2028 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2029 struct sctp_scoping *scope,
2030 struct mbuf *m_at, int cnt_inits_to,
2031 uint16_t *padding_len, uint16_t *chunk_len)
2033 struct sctp_vrf *vrf = NULL;
2034 int cnt, limit_out = 0, total_count;
2037 vrf_id = inp->def_vrf_id;
2038 SCTP_IPI_ADDR_RLOCK();
2039 vrf = sctp_find_vrf(vrf_id);
2041 SCTP_IPI_ADDR_RUNLOCK();
2044 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2045 struct sctp_ifa *sctp_ifap;
2046 struct sctp_ifn *sctp_ifnp;
2049 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2051 cnt = SCTP_ADDRESS_LIMIT;
2054 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2055 if ((scope->loopback_scope == 0) &&
2056 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2058 * Skip loopback devices if loopback_scope
2063 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2065 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2066 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2067 &sctp_ifap->address.sin.sin_addr) != 0)) {
2072 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2073 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2074 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2078 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2081 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
2085 if (cnt > SCTP_ADDRESS_LIMIT) {
2089 if (cnt > SCTP_ADDRESS_LIMIT) {
2096 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2098 if ((scope->loopback_scope == 0) &&
2099 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2101 * Skip loopback devices if
2102 * loopback_scope not set
2106 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2108 if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
2109 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2110 &sctp_ifap->address.sin.sin_addr) != 0)) {
2115 if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
2116 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2117 &sctp_ifap->address.sin6.sin6_addr) != 0)) {
2121 if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
2124 if (sctp_is_address_in_scope(sctp_ifap,
2128 if ((chunk_len != NULL) &&
2129 (padding_len != NULL) &&
2130 (*padding_len > 0)) {
2131 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2132 SCTP_BUF_LEN(m_at) += *padding_len;
2133 *chunk_len += *padding_len;
2136 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
2147 if (total_count > SCTP_ADDRESS_LIMIT) {
2148 /* No more addresses */
2156 struct sctp_laddr *laddr;
2159 /* First, how many ? */
2160 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2161 if (laddr->ifa == NULL) {
2164 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2166 * Address being deleted by the system, dont
2170 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2172 * Address being deleted on this ep don't
2177 if (sctp_is_address_in_scope(laddr->ifa,
2184 * To get through a NAT we only list addresses if we have
2185 * more than one. That way if you just bind a single address
2186 * we let the source of the init dictate our address.
2190 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2191 if (laddr->ifa == NULL) {
2194 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
2197 if (sctp_is_address_in_scope(laddr->ifa,
2201 if ((chunk_len != NULL) &&
2202 (padding_len != NULL) &&
2203 (*padding_len > 0)) {
2204 memset(mtod(m_at, caddr_t)+*chunk_len, 0, *padding_len);
2205 SCTP_BUF_LEN(m_at) += *padding_len;
2206 *chunk_len += *padding_len;
2209 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
2211 if (cnt >= SCTP_ADDRESS_LIMIT) {
2217 SCTP_IPI_ADDR_RUNLOCK();
2221 static struct sctp_ifa *
2222 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2223 uint8_t dest_is_loop,
2224 uint8_t dest_is_priv,
2227 uint8_t dest_is_global = 0;
2229 /* dest_is_priv is true if destination is a private address */
2230 /* dest_is_loop is true if destination is a loopback addresses */
2233 * Here we determine if its a preferred address. A preferred address
2234 * means it is the same scope or higher scope then the destination.
2235 * L = loopback, P = private, G = global
2236 * -----------------------------------------
2237 * src | dest | result
2238 * ----------------------------------------
2240 * -----------------------------------------
2241 * P | L | yes-v4 no-v6
2242 * -----------------------------------------
2243 * G | L | yes-v4 no-v6
2244 * -----------------------------------------
2246 * -----------------------------------------
2248 * -----------------------------------------
2250 * -----------------------------------------
2252 * -----------------------------------------
2254 * -----------------------------------------
2256 * -----------------------------------------
2259 if (ifa->address.sa.sa_family != fam) {
2260 /* forget mis-matched family */
2263 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2266 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2267 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2268 /* Ok the address may be ok */
2270 if (fam == AF_INET6) {
2271 /* ok to use deprecated addresses? no lets not! */
2272 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2273 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2276 if (ifa->src_is_priv && !ifa->src_is_loop) {
2278 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2282 if (ifa->src_is_glob) {
2284 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2291 * Now that we know what is what, implement or table this could in
2292 * theory be done slicker (it used to be), but this is
2293 * straightforward and easier to validate :-)
2295 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2296 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2297 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2298 dest_is_loop, dest_is_priv, dest_is_global);
2300 if ((ifa->src_is_loop) && (dest_is_priv)) {
2301 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2304 if ((ifa->src_is_glob) && (dest_is_priv)) {
2305 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2308 if ((ifa->src_is_loop) && (dest_is_global)) {
2309 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2312 if ((ifa->src_is_priv) && (dest_is_global)) {
2313 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2316 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2317 /* its a preferred address */
2321 static struct sctp_ifa *
2322 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2323 uint8_t dest_is_loop,
2324 uint8_t dest_is_priv,
2327 uint8_t dest_is_global = 0;
2330 * Here we determine if its a acceptable address. A acceptable
2331 * address means it is the same scope or higher scope but we can
2332 * allow for NAT which means its ok to have a global dest and a
2335 * L = loopback, P = private, G = global
2336 * -----------------------------------------
2337 * src | dest | result
2338 * -----------------------------------------
2340 * -----------------------------------------
2341 * P | L | yes-v4 no-v6
2342 * -----------------------------------------
2344 * -----------------------------------------
2346 * -----------------------------------------
2348 * -----------------------------------------
2349 * G | P | yes - May not work
2350 * -----------------------------------------
2352 * -----------------------------------------
2353 * P | G | yes - May not work
2354 * -----------------------------------------
2356 * -----------------------------------------
2359 if (ifa->address.sa.sa_family != fam) {
2360 /* forget non matching family */
2361 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
2362 ifa->address.sa.sa_family, fam);
2365 /* Ok the address may be ok */
2366 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
2367 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
2368 dest_is_loop, dest_is_priv);
2369 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2373 if (fam == AF_INET6) {
2374 /* ok to use deprecated addresses? */
2375 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2378 if (ifa->src_is_priv) {
2379 /* Special case, linklocal to loop */
2386 * Now that we know what is what, implement our table. This could in
2387 * theory be done slicker (it used to be), but this is
2388 * straightforward and easier to validate :-)
2390 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
2393 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2396 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
2399 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2402 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
2403 /* its an acceptable address */
2408 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2410 struct sctp_laddr *laddr;
2413 /* There are no restrictions, no TCB :-) */
2416 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2417 if (laddr->ifa == NULL) {
2418 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2422 if (laddr->ifa == ifa) {
2423 /* Yes it is on the list */
2432 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2434 struct sctp_laddr *laddr;
2438 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2439 if (laddr->ifa == NULL) {
2440 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2444 if ((laddr->ifa == ifa) && laddr->action == 0)
2453 static struct sctp_ifa *
2454 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2457 int non_asoc_addr_ok,
2458 uint8_t dest_is_priv,
2459 uint8_t dest_is_loop,
2462 struct sctp_laddr *laddr, *starting_point;
2465 struct sctp_ifn *sctp_ifn;
2466 struct sctp_ifa *sctp_ifa, *sifa;
2467 struct sctp_vrf *vrf;
2470 vrf = sctp_find_vrf(vrf_id);
2474 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2475 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2476 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2478 * first question, is the ifn we will emit on in our list, if so, we
2479 * want such an address. Note that we first looked for a preferred
2483 /* is a preferred one on the interface we route out? */
2484 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2486 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2487 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2488 &sctp_ifa->address.sin.sin_addr) != 0)) {
2493 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2494 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2495 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2499 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2500 (non_asoc_addr_ok == 0))
2502 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2507 if (sctp_is_addr_in_ep(inp, sifa)) {
2508 atomic_add_int(&sifa->refcount, 1);
2514 * ok, now we now need to find one on the list of the addresses. We
2515 * can't get one on the emitting interface so let's find first a
2516 * preferred one. If not that an acceptable one otherwise... we
2519 starting_point = inp->next_addr_touse;
2521 if (inp->next_addr_touse == NULL) {
2522 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2525 for (laddr = inp->next_addr_touse; laddr;
2526 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2527 if (laddr->ifa == NULL) {
2528 /* address has been removed */
2531 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2532 /* address is being deleted */
2535 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2539 atomic_add_int(&sifa->refcount, 1);
2542 if (resettotop == 0) {
2543 inp->next_addr_touse = NULL;
2546 inp->next_addr_touse = starting_point;
2549 if (inp->next_addr_touse == NULL) {
2550 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2553 /* ok, what about an acceptable address in the inp */
2554 for (laddr = inp->next_addr_touse; laddr;
2555 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2556 if (laddr->ifa == NULL) {
2557 /* address has been removed */
2560 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2561 /* address is being deleted */
2564 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2568 atomic_add_int(&sifa->refcount, 1);
2571 if (resettotop == 0) {
2572 inp->next_addr_touse = NULL;
2573 goto once_again_too;
2576 * no address bound can be a source for the destination we are in
2584 static struct sctp_ifa *
2585 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2586 struct sctp_tcb *stcb,
2589 uint8_t dest_is_priv,
2590 uint8_t dest_is_loop,
2591 int non_asoc_addr_ok,
2594 struct sctp_laddr *laddr, *starting_point;
2596 struct sctp_ifn *sctp_ifn;
2597 struct sctp_ifa *sctp_ifa, *sifa;
2598 uint8_t start_at_beginning = 0;
2599 struct sctp_vrf *vrf;
2603 * first question, is the ifn we will emit on in our list, if so, we
2606 vrf = sctp_find_vrf(vrf_id);
2610 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2611 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2612 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2615 * first question, is the ifn we will emit on in our list? If so,
2616 * we want that one. First we look for a preferred. Second, we go
2617 * for an acceptable.
2620 /* first try for a preferred address on the ep */
2621 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2623 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2624 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2625 &sctp_ifa->address.sin.sin_addr) != 0)) {
2630 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2631 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2632 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2636 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2638 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2639 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2642 if (((non_asoc_addr_ok == 0) &&
2643 (sctp_is_addr_restricted(stcb, sifa))) ||
2644 (non_asoc_addr_ok &&
2645 (sctp_is_addr_restricted(stcb, sifa)) &&
2646 (!sctp_is_addr_pending(stcb, sifa)))) {
2647 /* on the no-no list */
2650 atomic_add_int(&sifa->refcount, 1);
2654 /* next try for an acceptable address on the ep */
2655 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2657 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
2658 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2659 &sctp_ifa->address.sin.sin_addr) != 0)) {
2664 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
2665 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2666 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
2670 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2672 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2673 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2676 if (((non_asoc_addr_ok == 0) &&
2677 (sctp_is_addr_restricted(stcb, sifa))) ||
2678 (non_asoc_addr_ok &&
2679 (sctp_is_addr_restricted(stcb, sifa)) &&
2680 (!sctp_is_addr_pending(stcb, sifa)))) {
2681 /* on the no-no list */
2684 atomic_add_int(&sifa->refcount, 1);
2691 * if we can't find one like that then we must look at all addresses
2692 * bound to pick one at first preferable then secondly acceptable.
2694 starting_point = stcb->asoc.last_used_address;
2696 if (stcb->asoc.last_used_address == NULL) {
2697 start_at_beginning = 1;
2698 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2700 /* search beginning with the last used address */
2701 for (laddr = stcb->asoc.last_used_address; laddr;
2702 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2703 if (laddr->ifa == NULL) {
2704 /* address has been removed */
2707 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2708 /* address is being deleted */
2711 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2714 if (((non_asoc_addr_ok == 0) &&
2715 (sctp_is_addr_restricted(stcb, sifa))) ||
2716 (non_asoc_addr_ok &&
2717 (sctp_is_addr_restricted(stcb, sifa)) &&
2718 (!sctp_is_addr_pending(stcb, sifa)))) {
2719 /* on the no-no list */
2722 stcb->asoc.last_used_address = laddr;
2723 atomic_add_int(&sifa->refcount, 1);
2726 if (start_at_beginning == 0) {
2727 stcb->asoc.last_used_address = NULL;
2728 goto sctp_from_the_top;
2730 /* now try for any higher scope than the destination */
2731 stcb->asoc.last_used_address = starting_point;
2732 start_at_beginning = 0;
2734 if (stcb->asoc.last_used_address == NULL) {
2735 start_at_beginning = 1;
2736 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2738 /* search beginning with the last used address */
2739 for (laddr = stcb->asoc.last_used_address; laddr;
2740 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2741 if (laddr->ifa == NULL) {
2742 /* address has been removed */
2745 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2746 /* address is being deleted */
2749 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2753 if (((non_asoc_addr_ok == 0) &&
2754 (sctp_is_addr_restricted(stcb, sifa))) ||
2755 (non_asoc_addr_ok &&
2756 (sctp_is_addr_restricted(stcb, sifa)) &&
2757 (!sctp_is_addr_pending(stcb, sifa)))) {
2758 /* on the no-no list */
2761 stcb->asoc.last_used_address = laddr;
2762 atomic_add_int(&sifa->refcount, 1);
2765 if (start_at_beginning == 0) {
2766 stcb->asoc.last_used_address = NULL;
2767 goto sctp_from_the_top2;
2772 static struct sctp_ifa *
2773 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2774 struct sctp_inpcb *inp,
2775 struct sctp_tcb *stcb,
2776 int non_asoc_addr_ok,
2777 uint8_t dest_is_loop,
2778 uint8_t dest_is_priv,
2784 struct sctp_ifa *ifa, *sifa;
2785 int num_eligible_addr = 0;
2787 struct sockaddr_in6 sin6, lsa6;
2789 if (fam == AF_INET6) {
2790 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2791 (void)sa6_recoverscope(&sin6);
2794 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2796 if ((ifa->address.sa.sa_family == AF_INET) &&
2797 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2798 &ifa->address.sin.sin_addr) != 0)) {
2803 if ((ifa->address.sa.sa_family == AF_INET6) &&
2804 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2805 &ifa->address.sin6.sin6_addr) != 0)) {
2809 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2810 (non_asoc_addr_ok == 0))
2812 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2817 if (fam == AF_INET6 &&
2819 sifa->src_is_loop && sifa->src_is_priv) {
2821 * don't allow fe80::1 to be a src on loop ::1, we
2822 * don't list it to the peer so we will get an
2827 if (fam == AF_INET6 &&
2828 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2829 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2831 * link-local <-> link-local must belong to the same
2834 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2835 (void)sa6_recoverscope(&lsa6);
2836 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2843 * Check if the IPv6 address matches to next-hop. In the
2844 * mobile case, old IPv6 address may be not deleted from the
2845 * interface. Then, the interface has previous and new
2846 * addresses. We should use one corresponding to the
2847 * next-hop. (by micchie)
2850 if (stcb && fam == AF_INET6 &&
2851 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2852 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2859 /* Avoid topologically incorrect IPv4 address */
2860 if (stcb && fam == AF_INET &&
2861 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2862 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2868 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2871 if (((non_asoc_addr_ok == 0) &&
2872 (sctp_is_addr_restricted(stcb, sifa))) ||
2873 (non_asoc_addr_ok &&
2874 (sctp_is_addr_restricted(stcb, sifa)) &&
2875 (!sctp_is_addr_pending(stcb, sifa)))) {
2877 * It is restricted for some reason..
2878 * probably not yet added.
2883 if (num_eligible_addr >= addr_wanted) {
2886 num_eligible_addr++;
2893 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2894 struct sctp_inpcb *inp,
2895 struct sctp_tcb *stcb,
2896 int non_asoc_addr_ok,
2897 uint8_t dest_is_loop,
2898 uint8_t dest_is_priv,
2901 struct sctp_ifa *ifa, *sifa;
2902 int num_eligible_addr = 0;
2904 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2906 if ((ifa->address.sa.sa_family == AF_INET) &&
2907 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
2908 &ifa->address.sin.sin_addr) != 0)) {
2913 if ((ifa->address.sa.sa_family == AF_INET6) &&
2915 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
2916 &ifa->address.sin6.sin6_addr) != 0)) {
2920 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2921 (non_asoc_addr_ok == 0)) {
2924 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2930 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
2933 if (((non_asoc_addr_ok == 0) &&
2934 (sctp_is_addr_restricted(stcb, sifa))) ||
2935 (non_asoc_addr_ok &&
2936 (sctp_is_addr_restricted(stcb, sifa)) &&
2937 (!sctp_is_addr_pending(stcb, sifa)))) {
2939 * It is restricted for some reason..
2940 * probably not yet added.
2945 num_eligible_addr++;
2947 return (num_eligible_addr);
2950 static struct sctp_ifa *
2951 sctp_choose_boundall(struct sctp_inpcb *inp,
2952 struct sctp_tcb *stcb,
2953 struct sctp_nets *net,
2956 uint8_t dest_is_priv,
2957 uint8_t dest_is_loop,
2958 int non_asoc_addr_ok,
2961 int cur_addr_num = 0, num_preferred = 0;
2963 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2964 struct sctp_ifa *sctp_ifa, *sifa;
2966 struct sctp_vrf *vrf;
2972 * For boundall we can use any address in the association.
2973 * If non_asoc_addr_ok is set we can use any address (at least in
2974 * theory). So we look for preferred addresses first. If we find one,
2975 * we use it. Otherwise we next try to get an address on the
2976 * interface, which we should be able to do (unless non_asoc_addr_ok
2977 * is false and we are routed out that way). In these cases where we
2978 * can't use the address of the interface we go through all the
2979 * ifn's looking for an address we can use and fill that in. Punting
2980 * means we send back address 0, which will probably cause problems
2981 * actually since then IP will fill in the address of the route ifn,
2982 * which means we probably already rejected it.. i.e. here comes an
2985 vrf = sctp_find_vrf(vrf_id);
2989 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2990 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2991 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
2992 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2993 if (sctp_ifn == NULL) {
2994 /* ?? We don't have this guy ?? */
2995 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2996 goto bound_all_plan_b;
2998 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2999 ifn_index, sctp_ifn->ifn_name);
3002 cur_addr_num = net->indx_of_eligible_next_to_use;
3004 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
3009 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
3010 num_preferred, sctp_ifn->ifn_name);
3011 if (num_preferred == 0) {
3013 * no eligible addresses, we must use some other interface
3014 * address if we can find one.
3016 goto bound_all_plan_b;
3019 * Ok we have num_eligible_addr set with how many we can use, this
3020 * may vary from call to call due to addresses being deprecated
3023 if (cur_addr_num >= num_preferred) {
3027 * select the nth address from the list (where cur_addr_num is the
3028 * nth) and 0 is the first one, 1 is the second one etc...
3030 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
3032 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3033 dest_is_priv, cur_addr_num, fam, ro);
3035 /* if sctp_ifa is NULL something changed??, fall to plan b. */
3037 atomic_add_int(&sctp_ifa->refcount, 1);
3039 /* save off where the next one we will want */
3040 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3045 * plan_b: Look at all interfaces and find a preferred address. If
3046 * no preferred fall through to plan_c.
3049 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
3050 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3051 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
3052 sctp_ifn->ifn_name);
3053 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3054 /* wrong base scope */
3055 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
3058 if ((sctp_ifn == looked_at) && looked_at) {
3059 /* already looked at this guy */
3060 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
3063 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
3064 dest_is_loop, dest_is_priv, fam);
3065 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3066 "Found ifn:%p %d preferred source addresses\n",
3067 ifn, num_preferred);
3068 if (num_preferred == 0) {
3069 /* None on this interface. */
3070 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
3073 SCTPDBG(SCTP_DEBUG_OUTPUT2,
3074 "num preferred:%d on interface:%p cur_addr_num:%d\n",
3075 num_preferred, (void *)sctp_ifn, cur_addr_num);
3078 * Ok we have num_eligible_addr set with how many we can
3079 * use, this may vary from call to call due to addresses
3080 * being deprecated etc..
3082 if (cur_addr_num >= num_preferred) {
3085 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
3086 dest_is_priv, cur_addr_num, fam, ro);
3090 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
3091 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
3093 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
3094 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
3095 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
3096 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
3098 atomic_add_int(&sifa->refcount, 1);
3102 again_with_private_addresses_allowed:
3104 /* plan_c: do we have an acceptable address on the emit interface */
3106 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
3107 if (emit_ifn == NULL) {
3108 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jump to Plan D - no emit_ifn\n");
3111 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
3112 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
3114 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3115 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3116 &sctp_ifa->address.sin.sin_addr) != 0)) {
3117 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3122 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3123 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3124 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3125 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Jailed\n");
3129 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3130 (non_asoc_addr_ok == 0)) {
3131 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Defer\n");
3134 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
3137 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
3141 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3142 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
3146 if (((non_asoc_addr_ok == 0) &&
3147 (sctp_is_addr_restricted(stcb, sifa))) ||
3148 (non_asoc_addr_ok &&
3149 (sctp_is_addr_restricted(stcb, sifa)) &&
3150 (!sctp_is_addr_pending(stcb, sifa)))) {
3152 * It is restricted for some reason..
3153 * probably not yet added.
3155 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
3160 atomic_add_int(&sifa->refcount, 1);
3165 * plan_d: We are in trouble. No preferred address on the emit
3166 * interface. And not even a preferred address on all interfaces. Go
3167 * out and see if we can find an acceptable address somewhere
3168 * amongst all interfaces.
3170 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
3171 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3172 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3173 /* wrong base scope */
3176 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3178 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3179 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3180 &sctp_ifa->address.sin.sin_addr) != 0)) {
3185 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3186 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3187 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3191 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3192 (non_asoc_addr_ok == 0))
3194 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3200 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
3204 if (((non_asoc_addr_ok == 0) &&
3205 (sctp_is_addr_restricted(stcb, sifa))) ||
3206 (non_asoc_addr_ok &&
3207 (sctp_is_addr_restricted(stcb, sifa)) &&
3208 (!sctp_is_addr_pending(stcb, sifa)))) {
3210 * It is restricted for some
3211 * reason.. probably not yet added.
3222 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
3223 stcb->asoc.scope.ipv4_local_scope = 1;
3225 goto again_with_private_addresses_allowed;
3226 } else if (retried == 1) {
3227 stcb->asoc.scope.ipv4_local_scope = 0;
3235 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3236 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3237 /* wrong base scope */
3240 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3241 struct sctp_ifa *tmp_sifa;
3244 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
3245 (prison_check_ip4(inp->ip_inp.inp.inp_cred,
3246 &sctp_ifa->address.sin.sin_addr) != 0)) {
3251 if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
3252 (prison_check_ip6(inp->ip_inp.inp.inp_cred,
3253 &sctp_ifa->address.sin6.sin6_addr) != 0)) {
3257 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3258 (non_asoc_addr_ok == 0))
3260 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3263 if (tmp_sifa == NULL) {
3266 if (tmp_sifa == sifa) {
3270 if (sctp_is_address_in_scope(tmp_sifa,
3271 &stcb->asoc.scope, 0) == 0) {
3274 if (((non_asoc_addr_ok == 0) &&
3275 (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
3276 (non_asoc_addr_ok &&
3277 (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
3278 (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
3288 if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
3289 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
3290 sctp_add_local_addr_restricted(stcb, tmp_sifa);
3295 atomic_add_int(&sifa->refcount, 1);
3303 /* tcb may be NULL */
3305 sctp_source_address_selection(struct sctp_inpcb *inp,
3306 struct sctp_tcb *stcb,
3308 struct sctp_nets *net,
3309 int non_asoc_addr_ok, uint32_t vrf_id)
3311 struct sctp_ifa *answer;
3312 uint8_t dest_is_priv, dest_is_loop;
3315 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3318 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3323 * - Find the route if needed, cache if I can.
3324 * - Look at interface address in route, Is it in the bound list. If so we
3325 * have the best source.
3326 * - If not we must rotate amongst the addresses.
3330 * Do we need to pay attention to scope. We can have a private address
3331 * or a global address we are sourcing or sending to. So if we draw
3333 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3335 * ------------------------------------------
3336 * source * dest * result
3337 * -----------------------------------------
3338 * <a> Private * Global * NAT
3339 * -----------------------------------------
3340 * <b> Private * Private * No problem
3341 * -----------------------------------------
3342 * <c> Global * Private * Huh, How will this work?
3343 * -----------------------------------------
3344 * <d> Global * Global * No Problem
3345 *------------------------------------------
3346 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3348 *------------------------------------------
3349 * source * dest * result
3350 * -----------------------------------------
3351 * <a> Linklocal * Global *
3352 * -----------------------------------------
3353 * <b> Linklocal * Linklocal * No problem
3354 * -----------------------------------------
3355 * <c> Global * Linklocal * Huh, How will this work?
3356 * -----------------------------------------
3357 * <d> Global * Global * No Problem
3358 *------------------------------------------
3359 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3361 * And then we add to that what happens if there are multiple addresses
3362 * assigned to an interface. Remember the ifa on a ifn is a linked
3363 * list of addresses. So one interface can have more than one IP
3364 * address. What happens if we have both a private and a global
3365 * address? Do we then use context of destination to sort out which
3366 * one is best? And what about NAT's sending P->G may get you a NAT
3367 * translation, or should you select the G thats on the interface in
3372 * - count the number of addresses on the interface.
3373 * - if it is one, no problem except case <c>.
3374 * For <a> we will assume a NAT out there.
3375 * - if there are more than one, then we need to worry about scope P
3376 * or G. We should prefer G -> G and P -> P if possible.
3377 * Then as a secondary fall back to mixed types G->P being a last
3379 * - The above all works for bound all, but bound specific we need to
3380 * use the same concept but instead only consider the bound
3381 * addresses. If the bound set is NOT assigned to the interface then
3382 * we must use rotation amongst the bound addresses..
3384 if (ro->ro_rt == NULL) {
3386 * Need a route to cache.
3388 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
3390 if (ro->ro_rt == NULL) {
3393 fam = ro->ro_dst.sa_family;
3394 dest_is_priv = dest_is_loop = 0;
3395 /* Setup our scopes for the destination */
3399 /* Scope based on outbound address */
3400 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3403 /* mark it as local */
3404 net->addr_is_local = 1;
3406 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3413 /* Scope based on outbound address */
3414 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3415 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3417 * If the address is a loopback address, which
3418 * consists of "::1" OR "fe80::1%lo0", we are
3419 * loopback scope. But we don't use dest_is_priv
3420 * (link local addresses).
3424 /* mark it as local */
3425 net->addr_is_local = 1;
3427 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3433 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3434 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
3435 SCTP_IPI_ADDR_RLOCK();
3436 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3440 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3441 dest_is_priv, dest_is_loop,
3442 non_asoc_addr_ok, fam);
3443 SCTP_IPI_ADDR_RUNLOCK();
3450 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
3451 vrf_id, dest_is_priv,
3453 non_asoc_addr_ok, fam);
3455 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3460 SCTP_IPI_ADDR_RUNLOCK();
3465 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
3468 struct sctp_sndinfo sndinfo;
3469 struct sctp_prinfo prinfo;
3470 struct sctp_authinfo authinfo;
3471 int tot_len, rem_len, cmsg_data_len, cmsg_data_off, off;
3475 * Independent of how many mbufs, find the c_type inside the control
3476 * structure and copy out the data.
3479 tot_len = SCTP_BUF_LEN(control);
3480 for (off = 0; off < tot_len; off += CMSG_ALIGN(cmh.cmsg_len)) {
3481 rem_len = tot_len - off;
3482 if (rem_len < (int)CMSG_ALIGN(sizeof(cmh))) {
3483 /* There is not enough room for one more. */
3486 m_copydata(control, off, sizeof(cmh), (caddr_t)&cmh);
3487 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3488 /* We dont't have a complete CMSG header. */
3491 if ((cmh.cmsg_len > INT_MAX) || ((int)cmh.cmsg_len > rem_len)) {
3492 /* We don't have the complete CMSG. */
3495 cmsg_data_len = (int)cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh));
3496 cmsg_data_off = off + CMSG_ALIGN(sizeof(cmh));
3497 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3498 ((c_type == cmh.cmsg_type) ||
3499 ((c_type == SCTP_SNDRCV) &&
3500 ((cmh.cmsg_type == SCTP_SNDINFO) ||
3501 (cmh.cmsg_type == SCTP_PRINFO) ||
3502 (cmh.cmsg_type == SCTP_AUTHINFO))))) {
3503 if (c_type == cmh.cmsg_type) {
3504 if (cpsize > INT_MAX) {
3507 if (cmsg_data_len < (int)cpsize) {
3510 /* It is exactly what we want. Copy it out. */
3511 m_copydata(control, cmsg_data_off, (int)cpsize, (caddr_t)data);
3514 struct sctp_sndrcvinfo *sndrcvinfo;
3516 sndrcvinfo = (struct sctp_sndrcvinfo *)data;
3518 if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
3521 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
3523 switch (cmh.cmsg_type) {
3525 if (cmsg_data_len < (int)sizeof(struct sctp_sndinfo)) {
3528 m_copydata(control, cmsg_data_off, sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
3529 sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
3530 sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
3531 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
3532 sndrcvinfo->sinfo_context = sndinfo.snd_context;
3533 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
3536 if (cmsg_data_len < (int)sizeof(struct sctp_prinfo)) {
3539 m_copydata(control, cmsg_data_off, sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
3540 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
3541 sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
3543 sndrcvinfo->sinfo_timetolive = 0;
3545 sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
3548 if (cmsg_data_len < (int)sizeof(struct sctp_authinfo)) {
3551 m_copydata(control, cmsg_data_off, sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
3552 sndrcvinfo->sinfo_keynumber_valid = 1;
3553 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
3566 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
3570 struct sctp_initmsg initmsg;
3572 struct sockaddr_in sin;
3575 struct sockaddr_in6 sin6;
3578 tlen = SCTP_BUF_LEN(control);
3581 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3582 /* There is not enough room for one more. */
3586 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3587 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3588 /* We dont't have a complete CMSG header. */
3592 if (((int)cmh.cmsg_len + at) > tlen) {
3593 /* We don't have the complete CMSG. */
3597 if (cmh.cmsg_level == IPPROTO_SCTP) {
3598 switch (cmh.cmsg_type) {
3600 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
3604 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
3605 if (initmsg.sinit_max_attempts)
3606 stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
3607 if (initmsg.sinit_num_ostreams)
3608 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
3609 if (initmsg.sinit_max_instreams)
3610 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
3611 if (initmsg.sinit_max_init_timeo)
3612 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
3613 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
3614 struct sctp_stream_out *tmp_str;
3616 #if defined(SCTP_DETAILED_STR_STATS)
3620 /* Default is NOT correct */
3621 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
3622 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
3623 SCTP_TCB_UNLOCK(stcb);
3624 SCTP_MALLOC(tmp_str,
3625 struct sctp_stream_out *,
3626 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
3628 SCTP_TCB_LOCK(stcb);
3629 if (tmp_str != NULL) {
3630 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
3631 stcb->asoc.strmout = tmp_str;
3632 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
3634 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
3636 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3637 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3638 stcb->asoc.strmout[i].chunks_on_queues = 0;
3639 stcb->asoc.strmout[i].next_mid_ordered = 0;
3640 stcb->asoc.strmout[i].next_mid_unordered = 0;
3641 #if defined(SCTP_DETAILED_STR_STATS)
3642 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
3643 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
3644 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
3647 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
3648 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
3650 stcb->asoc.strmout[i].sid = i;
3651 stcb->asoc.strmout[i].last_msg_incomplete = 0;
3652 stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
3653 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
3658 case SCTP_DSTADDRV4:
3659 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3663 memset(&sin, 0, sizeof(struct sockaddr_in));
3664 sin.sin_family = AF_INET;
3665 sin.sin_len = sizeof(struct sockaddr_in);
3666 sin.sin_port = stcb->rport;
3667 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3668 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3669 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3670 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3674 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3675 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3682 case SCTP_DSTADDRV6:
3683 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3687 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3688 sin6.sin6_family = AF_INET6;
3689 sin6.sin6_len = sizeof(struct sockaddr_in6);
3690 sin6.sin6_port = stcb->rport;
3691 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3692 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
3693 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
3698 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3699 in6_sin6_2_sin(&sin, &sin6);
3700 if ((sin.sin_addr.s_addr == INADDR_ANY) ||
3701 (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
3702 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
3706 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
3707 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3713 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
3714 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
3724 at += CMSG_ALIGN(cmh.cmsg_len);
3729 static struct sctp_tcb *
3730 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
3732 struct mbuf *control,
3733 struct sctp_nets **net_p,
3738 struct sctp_tcb *stcb;
3739 struct sockaddr *addr;
3741 struct sockaddr_in sin;
3744 struct sockaddr_in6 sin6;
3747 tlen = SCTP_BUF_LEN(control);
3750 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3751 /* There is not enough room for one more. */
3755 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3756 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
3757 /* We dont't have a complete CMSG header. */
3761 if (((int)cmh.cmsg_len + at) > tlen) {
3762 /* We don't have the complete CMSG. */
3766 if (cmh.cmsg_level == IPPROTO_SCTP) {
3767 switch (cmh.cmsg_type) {
3769 case SCTP_DSTADDRV4:
3770 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
3774 memset(&sin, 0, sizeof(struct sockaddr_in));
3775 sin.sin_family = AF_INET;
3776 sin.sin_len = sizeof(struct sockaddr_in);
3777 sin.sin_port = port;
3778 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
3779 addr = (struct sockaddr *)&sin;
3783 case SCTP_DSTADDRV6:
3784 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
3788 memset(&sin6, 0, sizeof(struct sockaddr_in6));
3789 sin6.sin6_family = AF_INET6;
3790 sin6.sin6_len = sizeof(struct sockaddr_in6);
3791 sin6.sin6_port = port;
3792 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
3794 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
3795 in6_sin6_2_sin(&sin, &sin6);
3796 addr = (struct sockaddr *)&sin;
3799 addr = (struct sockaddr *)&sin6;
3807 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
3813 at += CMSG_ALIGN(cmh.cmsg_len);
3818 static struct mbuf *
3819 sctp_add_cookie(struct mbuf *init, int init_offset,
3820 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
3822 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3823 struct sctp_state_cookie *stc;
3824 struct sctp_paramhdr *ph;
3829 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3830 sizeof(struct sctp_paramhdr)), 0,
3831 M_NOWAIT, 1, MT_DATA);
3835 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
3836 if (copy_init == NULL) {
3840 #ifdef SCTP_MBUF_LOGGING
3841 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3842 sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
3845 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3847 if (copy_initack == NULL) {
3849 sctp_m_freem(copy_init);
3852 #ifdef SCTP_MBUF_LOGGING
3853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3854 sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
3857 /* easy side we just drop it on the end */
3858 ph = mtod(mret, struct sctp_paramhdr *);
3859 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3860 sizeof(struct sctp_paramhdr);
3861 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3862 sizeof(struct sctp_paramhdr));
3863 ph->param_type = htons(SCTP_STATE_COOKIE);
3864 ph->param_length = 0; /* fill in at the end */
3865 /* Fill in the stc cookie data */
3866 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3868 /* tack the INIT and then the INIT-ACK onto the chain */
3870 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3871 cookie_sz += SCTP_BUF_LEN(m_at);
3872 if (SCTP_BUF_NEXT(m_at) == NULL) {
3873 SCTP_BUF_NEXT(m_at) = copy_init;
3877 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3878 cookie_sz += SCTP_BUF_LEN(m_at);
3879 if (SCTP_BUF_NEXT(m_at) == NULL) {
3880 SCTP_BUF_NEXT(m_at) = copy_initack;
3884 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3885 cookie_sz += SCTP_BUF_LEN(m_at);
3886 if (SCTP_BUF_NEXT(m_at) == NULL) {
3890 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
3892 /* no space, so free the entire chain */
3896 SCTP_BUF_LEN(sig) = 0;
3897 SCTP_BUF_NEXT(m_at) = sig;
3899 foo = (uint8_t *)(mtod(sig, caddr_t)+sig_offset);
3900 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3902 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3903 cookie_sz += SCTP_SIGNATURE_SIZE;
3904 ph->param_length = htons(cookie_sz);
3910 sctp_get_ect(struct sctp_tcb *stcb)
3912 if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
3913 return (SCTP_ECT0_BIT);
3919 #if defined(INET) || defined(INET6)
3921 sctp_handle_no_route(struct sctp_tcb *stcb,
3922 struct sctp_nets *net,
3925 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
3928 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
3929 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
3930 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3931 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3932 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
3933 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3937 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3938 net->dest_state &= ~SCTP_ADDR_PF;
3942 if (net == stcb->asoc.primary_destination) {
3943 /* need a new primary */
3944 struct sctp_nets *alt;
3946 alt = sctp_find_alternate_net(stcb, net, 0);
3948 if (stcb->asoc.alternate) {
3949 sctp_free_remote_addr(stcb->asoc.alternate);
3951 stcb->asoc.alternate = alt;
3952 atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
3953 if (net->ro._s_addr) {
3954 sctp_free_ifa(net->ro._s_addr);
3955 net->ro._s_addr = NULL;
3957 net->src_addr_selected = 0;
3966 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3967 struct sctp_tcb *stcb, /* may be NULL */
3968 struct sctp_nets *net,
3969 struct sockaddr *to,
3971 uint32_t auth_offset,
3972 struct sctp_auth_chunk *auth,
3973 uint16_t auth_keyid,
3974 int nofragment_flag,
3981 union sctp_sockstore *over_addr,
3982 uint8_t mflowtype, uint32_t mflowid,
3983 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3984 int so_locked SCTP_UNUSED
3989 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3992 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
3993 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
3994 * - fill in the HMAC digest of any AUTH chunk in the packet.
3995 * - calculate and fill in the SCTP checksum.
3996 * - prepend an IP address header.
3997 * - if boundall use INADDR_ANY.
3998 * - if boundspecific do source address selection.
3999 * - set fragmentation option for ipV4.
4000 * - On return from IP output, check/adjust mtu size of output
4001 * interface and smallest_mtu size as well.
4003 /* Will need ifdefs around this */
4005 struct sctphdr *sctphdr;
4008 #if defined(INET) || defined(INET6)
4011 #if defined(INET) || defined(INET6)
4013 sctp_route_t *ro = NULL;
4014 struct udphdr *udp = NULL;
4017 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4018 struct socket *so = NULL;
4021 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
4022 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4026 #if defined(INET) || defined(INET6)
4028 vrf_id = stcb->asoc.vrf_id;
4030 vrf_id = inp->def_vrf_id;
4033 /* fill in the HMAC digest for any AUTH chunk in the packet */
4034 if ((auth != NULL) && (stcb != NULL)) {
4035 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
4038 tos_value = net->dscp;
4040 tos_value = stcb->asoc.default_dscp;
4042 tos_value = inp->sctp_ep.default_dscp;
4045 switch (to->sa_family) {
4049 struct ip *ip = NULL;
4050 sctp_route_t iproute;
4053 len = SCTP_MIN_V4_OVERHEAD;
4055 len += sizeof(struct udphdr);
4057 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4060 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4063 SCTP_ALIGN_TO_END(newm, len);
4064 SCTP_BUF_LEN(newm) = len;
4065 SCTP_BUF_NEXT(newm) = m;
4068 m->m_pkthdr.flowid = net->flowid;
4069 M_HASHTYPE_SET(m, net->flowtype);
4071 m->m_pkthdr.flowid = mflowid;
4072 M_HASHTYPE_SET(m, mflowtype);
4074 packet_length = sctp_calculate_len(m);
4075 ip = mtod(m, struct ip *);
4076 ip->ip_v = IPVERSION;
4077 ip->ip_hl = (sizeof(struct ip) >> 2);
4078 if (tos_value == 0) {
4080 * This means especially, that it is not set
4081 * at the SCTP layer. So use the value from
4084 tos_value = inp->ip_inp.inp.inp_ip_tos;
4088 tos_value |= sctp_get_ect(stcb);
4090 if ((nofragment_flag) && (port == 0)) {
4091 ip->ip_off = htons(IP_DF);
4093 ip->ip_off = htons(0);
4095 /* FreeBSD has a function for ip_id's */
4098 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
4099 ip->ip_len = htons(packet_length);
4100 ip->ip_tos = tos_value;
4102 ip->ip_p = IPPROTO_UDP;
4104 ip->ip_p = IPPROTO_SCTP;
4109 memset(&iproute, 0, sizeof(iproute));
4110 memcpy(&ro->ro_dst, to, to->sa_len);
4112 ro = (sctp_route_t *)&net->ro;
4114 /* Now the address selection part */
4115 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
4117 /* call the routine to select the src address */
4118 if (net && out_of_asoc_ok == 0) {
4119 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4120 sctp_free_ifa(net->ro._s_addr);
4121 net->ro._s_addr = NULL;
4122 net->src_addr_selected = 0;
4128 if (net->src_addr_selected == 0) {
4129 /* Cache the source address */
4130 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
4133 net->src_addr_selected = 1;
4135 if (net->ro._s_addr == NULL) {
4136 /* No route to host */
4137 net->src_addr_selected = 0;
4138 sctp_handle_no_route(stcb, net, so_locked);
4139 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4141 return (EHOSTUNREACH);
4143 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
4145 if (over_addr == NULL) {
4146 struct sctp_ifa *_lsrc;
4148 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4152 if (_lsrc == NULL) {
4153 sctp_handle_no_route(stcb, net, so_locked);
4154 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4156 return (EHOSTUNREACH);
4158 ip->ip_src = _lsrc->address.sin.sin_addr;
4159 sctp_free_ifa(_lsrc);
4161 ip->ip_src = over_addr->sin.sin_addr;
4162 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4166 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4167 sctp_handle_no_route(stcb, net, so_locked);
4168 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4170 return (EHOSTUNREACH);
4172 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
4173 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4174 udp->uh_dport = port;
4175 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
4177 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
4181 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4183 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
4186 sctphdr->src_port = src_port;
4187 sctphdr->dest_port = dest_port;
4188 sctphdr->v_tag = v_tag;
4189 sctphdr->checksum = 0;
4192 * If source address selection fails and we find no
4193 * route then the ip_output should fail as well with
4194 * a NO_ROUTE_TO_HOST type error. We probably should
4195 * catch that somewhere and abort the association
4196 * right away (assuming this is an INIT being sent).
4198 if (ro->ro_rt == NULL) {
4200 * src addr selection failed to find a route
4201 * (or valid source addr), so we can't get
4202 * there from here (yet)!
4204 sctp_handle_no_route(stcb, net, so_locked);
4205 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4207 return (EHOSTUNREACH);
4209 if (ro != &iproute) {
4210 memcpy(&iproute, ro, sizeof(*ro));
4212 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
4213 (uint32_t)(ntohl(ip->ip_src.s_addr)));
4214 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
4215 (uint32_t)(ntohl(ip->ip_dst.s_addr)));
4216 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
4219 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4220 /* failed to prepend data, give up */
4221 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4225 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4227 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
4228 SCTP_STAT_INCR(sctps_sendswcrc);
4230 SCTP_ENABLE_UDP_CSUM(o_pak);
4233 m->m_pkthdr.csum_flags = CSUM_SCTP;
4234 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4235 SCTP_STAT_INCR(sctps_sendhwcrc);
4237 #ifdef SCTP_PACKET_LOGGING
4238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4239 sctp_packet_log(o_pak);
4241 /* send it out. table id is taken from stcb */
4242 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4243 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4244 so = SCTP_INP_SO(inp);
4245 SCTP_SOCKET_UNLOCK(so, 0);
4248 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
4249 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4250 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4251 atomic_add_int(&stcb->asoc.refcnt, 1);
4252 SCTP_TCB_UNLOCK(stcb);
4253 SCTP_SOCKET_LOCK(so, 0);
4254 SCTP_TCB_LOCK(stcb);
4255 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4258 SCTP_STAT_INCR(sctps_sendpackets);
4259 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4261 SCTP_STAT_INCR(sctps_senderrors);
4263 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
4265 /* free tempy routes */
4268 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4269 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4272 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4275 mtu -= sizeof(struct udphdr);
4277 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4278 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4282 } else if (ro->ro_rt == NULL) {
4283 /* route was freed */
4284 if (net->ro._s_addr &&
4285 net->src_addr_selected) {
4286 sctp_free_ifa(net->ro._s_addr);
4287 net->ro._s_addr = NULL;
4289 net->src_addr_selected = 0;
4298 uint32_t flowlabel, flowinfo;
4299 struct ip6_hdr *ip6h;
4300 struct route_in6 ip6route;
4302 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
4304 struct sockaddr_in6 lsa6_storage;
4306 u_short prev_port = 0;
4310 flowlabel = net->flowlabel;
4312 flowlabel = stcb->asoc.default_flowlabel;
4314 flowlabel = inp->sctp_ep.default_flowlabel;
4316 if (flowlabel == 0) {
4318 * This means especially, that it is not set
4319 * at the SCTP layer. So use the value from
4322 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
4324 flowlabel &= 0x000fffff;
4325 len = SCTP_MIN_OVERHEAD;
4327 len += sizeof(struct udphdr);
4329 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
4332 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4335 SCTP_ALIGN_TO_END(newm, len);
4336 SCTP_BUF_LEN(newm) = len;
4337 SCTP_BUF_NEXT(newm) = m;
4340 m->m_pkthdr.flowid = net->flowid;
4341 M_HASHTYPE_SET(m, net->flowtype);
4343 m->m_pkthdr.flowid = mflowid;
4344 M_HASHTYPE_SET(m, mflowtype);
4346 packet_length = sctp_calculate_len(m);
4348 ip6h = mtod(m, struct ip6_hdr *);
4349 /* protect *sin6 from overwrite */
4350 sin6 = (struct sockaddr_in6 *)to;
4354 /* KAME hack: embed scopeid */
4355 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4356 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4360 memset(&ip6route, 0, sizeof(ip6route));
4361 ro = (sctp_route_t *)&ip6route;
4362 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
4364 ro = (sctp_route_t *)&net->ro;
4367 * We assume here that inp_flow is in host byte
4368 * order within the TCB!
4370 if (tos_value == 0) {
4372 * This means especially, that it is not set
4373 * at the SCTP layer. So use the value from
4376 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
4380 tos_value |= sctp_get_ect(stcb);
4384 flowinfo |= tos_value;
4386 flowinfo |= flowlabel;
4387 ip6h->ip6_flow = htonl(flowinfo);
4389 ip6h->ip6_nxt = IPPROTO_UDP;
4391 ip6h->ip6_nxt = IPPROTO_SCTP;
4393 ip6h->ip6_plen = (uint16_t)(packet_length - sizeof(struct ip6_hdr));
4394 ip6h->ip6_dst = sin6->sin6_addr;
4397 * Add SRC address selection here: we can only reuse
4398 * to a limited degree the kame src-addr-sel, since
4399 * we can try their selection but it may not be
4402 memset(&lsa6_tmp, 0, sizeof(lsa6_tmp));
4403 lsa6_tmp.sin6_family = AF_INET6;
4404 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
4406 if (net && out_of_asoc_ok == 0) {
4407 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
4408 sctp_free_ifa(net->ro._s_addr);
4409 net->ro._s_addr = NULL;
4410 net->src_addr_selected = 0;
4416 if (net->src_addr_selected == 0) {
4417 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4418 /* KAME hack: embed scopeid */
4419 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4420 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4423 /* Cache the source address */
4424 net->ro._s_addr = sctp_source_address_selection(inp,
4430 (void)sa6_recoverscope(sin6);
4431 net->src_addr_selected = 1;
4433 if (net->ro._s_addr == NULL) {
4434 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
4435 net->src_addr_selected = 0;
4436 sctp_handle_no_route(stcb, net, so_locked);
4437 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4439 return (EHOSTUNREACH);
4441 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
4443 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
4444 /* KAME hack: embed scopeid */
4445 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
4446 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
4449 if (over_addr == NULL) {
4450 struct sctp_ifa *_lsrc;
4452 _lsrc = sctp_source_address_selection(inp, stcb, ro,
4456 if (_lsrc == NULL) {
4457 sctp_handle_no_route(stcb, net, so_locked);
4458 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4460 return (EHOSTUNREACH);
4462 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
4463 sctp_free_ifa(_lsrc);
4465 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
4466 SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
4468 (void)sa6_recoverscope(sin6);
4470 lsa6->sin6_port = inp->sctp_lport;
4472 if (ro->ro_rt == NULL) {
4474 * src addr selection failed to find a route
4475 * (or valid source addr), so we can't get
4478 sctp_handle_no_route(stcb, net, so_locked);
4479 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4481 return (EHOSTUNREACH);
4484 * XXX: sa6 may not have a valid sin6_scope_id in
4485 * the non-SCOPEDROUTING case.
4487 memset(&lsa6_storage, 0, sizeof(lsa6_storage));
4488 lsa6_storage.sin6_family = AF_INET6;
4489 lsa6_storage.sin6_len = sizeof(lsa6_storage);
4490 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4491 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
4492 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
4497 lsa6_storage.sin6_addr = lsa6->sin6_addr;
4498 lsa6_storage.sin6_port = inp->sctp_lport;
4499 lsa6 = &lsa6_storage;
4500 ip6h->ip6_src = lsa6->sin6_addr;
4503 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
4504 sctp_handle_no_route(stcb, net, so_locked);
4505 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
4507 return (EHOSTUNREACH);
4509 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4510 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4511 udp->uh_dport = port;
4512 udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
4514 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4516 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4519 sctphdr->src_port = src_port;
4520 sctphdr->dest_port = dest_port;
4521 sctphdr->v_tag = v_tag;
4522 sctphdr->checksum = 0;
4525 * We set the hop limit now since there is a good
4526 * chance that our ro pointer is now filled
4528 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4529 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4532 /* Copy to be sure something bad is not happening */
4533 sin6->sin6_addr = ip6h->ip6_dst;
4534 lsa6->sin6_addr = ip6h->ip6_src;
4537 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4538 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4539 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4540 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4541 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4543 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4545 * preserve the port and scope for link
4548 prev_scope = sin6->sin6_scope_id;
4549 prev_port = sin6->sin6_port;
4551 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4552 /* failed to prepend data, give up */
4554 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4557 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4559 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4560 SCTP_STAT_INCR(sctps_sendswcrc);
4561 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4562 udp->uh_sum = 0xffff;
4565 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
4566 m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
4567 SCTP_STAT_INCR(sctps_sendhwcrc);
4569 /* send it out. table id is taken from stcb */
4570 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4571 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4572 so = SCTP_INP_SO(inp);
4573 SCTP_SOCKET_UNLOCK(so, 0);
4576 #ifdef SCTP_PACKET_LOGGING
4577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4578 sctp_packet_log(o_pak);
4580 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4581 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4582 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4583 atomic_add_int(&stcb->asoc.refcnt, 1);
4584 SCTP_TCB_UNLOCK(stcb);
4585 SCTP_SOCKET_LOCK(so, 0);
4586 SCTP_TCB_LOCK(stcb);
4587 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4591 /* for link local this must be done */
4592 sin6->sin6_scope_id = prev_scope;
4593 sin6->sin6_port = prev_port;
4595 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4596 SCTP_STAT_INCR(sctps_sendpackets);
4597 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4599 SCTP_STAT_INCR(sctps_senderrors);
4602 /* Now if we had a temp route free it */
4606 * PMTU check versus smallest asoc MTU goes
4609 if (ro->ro_rt == NULL) {
4610 /* Route was freed */
4611 if (net->ro._s_addr &&
4612 net->src_addr_selected) {
4613 sctp_free_ifa(net->ro._s_addr);
4614 net->ro._s_addr = NULL;
4616 net->src_addr_selected = 0;
4618 if ((ro->ro_rt != NULL) && (net->ro._s_addr) &&
4619 ((net->dest_state & SCTP_ADDR_NO_PMTUD) == 0)) {
4622 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4625 mtu -= sizeof(struct udphdr);
4627 if ((stcb != NULL) && (stcb->asoc.smallest_mtu > mtu)) {
4628 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4633 if (ND_IFINFO(ifp)->linkmtu &&
4634 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4635 sctp_mtu_size_reset(inp,
4637 ND_IFINFO(ifp)->linkmtu);
4645 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4646 ((struct sockaddr *)to)->sa_family);
4648 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4655 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4656 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4661 struct mbuf *m, *m_last;
4662 struct sctp_nets *net;
4663 struct sctp_init_chunk *init;
4664 struct sctp_supported_addr_param *sup_addr;
4665 struct sctp_adaptation_layer_indication *ali;
4666 struct sctp_supported_chunk_types_param *pr_supported;
4667 struct sctp_paramhdr *ph;
4668 int cnt_inits_to = 0;
4670 uint16_t num_ext, chunk_len, padding_len, parameter_len;
4672 /* INIT's always go to the primary (and usually ONLY address) */
4673 net = stcb->asoc.primary_destination;
4675 net = TAILQ_FIRST(&stcb->asoc.nets);
4680 /* we confirm any address we send an INIT to */
4681 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4682 (void)sctp_set_primary_addr(stcb, NULL, net);
4684 /* we confirm any address we send an INIT to */
4685 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4687 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4689 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
4691 * special hook, if we are sending to link local it will not
4692 * show up in our private address count.
4694 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
4698 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4699 /* This case should not happen */
4700 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4703 /* start the INIT timer */
4704 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4706 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
4708 /* No memory, INIT timer will re-attempt. */
4709 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4712 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
4714 /* Now lets put the chunk header in place */
4715 init = mtod(m, struct sctp_init_chunk *);
4716 /* now the chunk header */
4717 init->ch.chunk_type = SCTP_INITIATION;
4718 init->ch.chunk_flags = 0;
4719 /* fill in later from mbuf we build */
4720 init->ch.chunk_length = 0;
4721 /* place in my tag */
4722 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4723 /* set up some of the credits. */
4724 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4725 SCTP_MINIMAL_RWND));
4726 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4727 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4728 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4730 /* Adaptation layer indication parameter */
4731 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
4732 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
4733 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
4734 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4735 ali->ph.param_length = htons(parameter_len);
4736 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
4737 chunk_len += parameter_len;
4740 if (stcb->asoc.ecn_supported == 1) {
4741 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4742 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4743 ph->param_type = htons(SCTP_ECN_CAPABLE);
4744 ph->param_length = htons(parameter_len);
4745 chunk_len += parameter_len;
4747 /* PR-SCTP supported parameter */
4748 if (stcb->asoc.prsctp_supported == 1) {
4749 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4750 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4751 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
4752 ph->param_length = htons(parameter_len);
4753 chunk_len += parameter_len;
4755 /* Add NAT friendly parameter. */
4756 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4757 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4758 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
4759 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4760 ph->param_length = htons(parameter_len);
4761 chunk_len += parameter_len;
4763 /* And now tell the peer which extensions we support */
4765 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
4766 if (stcb->asoc.prsctp_supported == 1) {
4767 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4768 if (stcb->asoc.idata_supported) {
4769 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
4772 if (stcb->asoc.auth_supported == 1) {
4773 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4775 if (stcb->asoc.asconf_supported == 1) {
4776 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4777 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4779 if (stcb->asoc.reconfig_supported == 1) {
4780 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4782 if (stcb->asoc.idata_supported) {
4783 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
4785 if (stcb->asoc.nrsack_supported == 1) {
4786 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4788 if (stcb->asoc.pktdrop_supported == 1) {
4789 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4792 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
4793 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4794 pr_supported->ph.param_length = htons(parameter_len);
4795 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4796 chunk_len += parameter_len;
4798 /* add authentication parameters */
4799 if (stcb->asoc.auth_supported) {
4800 /* attach RANDOM parameter, if available */
4801 if (stcb->asoc.authinfo.random != NULL) {
4802 struct sctp_auth_random *randp;
4804 if (padding_len > 0) {
4805 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4806 chunk_len += padding_len;
4809 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
4810 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
4811 /* random key already contains the header */
4812 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
4813 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4814 chunk_len += parameter_len;
4816 /* add HMAC_ALGO parameter */
4817 if (stcb->asoc.local_hmacs != NULL) {
4818 struct sctp_auth_hmac_algo *hmacs;
4820 if (padding_len > 0) {
4821 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4822 chunk_len += padding_len;
4825 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
4826 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
4827 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
4828 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4829 hmacs->ph.param_length = htons(parameter_len);
4830 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
4831 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4832 chunk_len += parameter_len;
4834 /* add CHUNKS parameter */
4835 if (stcb->asoc.local_auth_chunks != NULL) {
4836 struct sctp_auth_chunk_list *chunks;
4838 if (padding_len > 0) {
4839 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4840 chunk_len += padding_len;
4843 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
4844 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
4845 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
4846 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4847 chunks->ph.param_length = htons(parameter_len);
4848 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
4849 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
4850 chunk_len += parameter_len;
4853 /* now any cookie time extensions */
4854 if (stcb->asoc.cookie_preserve_req) {
4855 struct sctp_cookie_perserve_param *cookie_preserve;
4857 if (padding_len > 0) {
4858 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4859 chunk_len += padding_len;
4862 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
4863 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t)+chunk_len);
4864 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4865 cookie_preserve->ph.param_length = htons(parameter_len);
4866 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4867 stcb->asoc.cookie_preserve_req = 0;
4868 chunk_len += parameter_len;
4870 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
4873 if (padding_len > 0) {
4874 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
4875 chunk_len += padding_len;
4878 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
4879 if (stcb->asoc.scope.ipv4_addr_legal) {
4880 parameter_len += (uint16_t)sizeof(uint16_t);
4882 if (stcb->asoc.scope.ipv6_addr_legal) {
4883 parameter_len += (uint16_t)sizeof(uint16_t);
4885 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t)+chunk_len);
4886 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4887 sup_addr->ph.param_length = htons(parameter_len);
4889 if (stcb->asoc.scope.ipv4_addr_legal) {
4890 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
4892 if (stcb->asoc.scope.ipv6_addr_legal) {
4893 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
4895 padding_len = 4 - 2 * i;
4896 chunk_len += parameter_len;
4898 SCTP_BUF_LEN(m) = chunk_len;
4899 /* now the addresses */
4901 * To optimize this we could put the scoping stuff into a structure
4902 * and remove the individual uint8's from the assoc structure. Then
4903 * we could just sifa in the address within the stcb. But for now
4904 * this is a quick hack to get the address stuff teased apart.
4906 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
4908 &padding_len, &chunk_len);
4910 init->ch.chunk_length = htons(chunk_len);
4911 if (padding_len > 0) {
4912 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
4917 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4918 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
4919 (struct sockaddr *)&net->ro._l_addr,
4920 m, 0, NULL, 0, 0, 0, 0,
4921 inp->sctp_lport, stcb->rport, htonl(0),
4925 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
4926 if (error == ENOBUFS) {
4927 stcb->asoc.ifp_had_enobuf = 1;
4928 SCTP_STAT_INCR(sctps_lowlevelerr);
4931 stcb->asoc.ifp_had_enobuf = 0;
4933 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4934 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4938 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4939 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4942 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4943 * being equal to the beginning of the params i.e. (iphlen +
4944 * sizeof(struct sctp_init_msg) parse through the parameters to the
4945 * end of the mbuf verifying that all parameters are known.
4947 * For unknown parameters build and return a mbuf with
4948 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4949 * processing this chunk stop, and set *abort_processing to 1.
4951 * By having param_offset be pre-set to where parameters begin it is
4952 * hoped that this routine may be reused in the future by new
4955 struct sctp_paramhdr *phdr, params;
4957 struct mbuf *mat, *op_err;
4958 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4959 int at, limit, pad_needed;
4960 uint16_t ptype, plen, padded_size;
4963 *abort_processing = 0;
4966 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4969 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4970 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4971 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4972 ptype = ntohs(phdr->param_type);
4973 plen = ntohs(phdr->param_length);
4974 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4975 /* wacked parameter */
4976 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4979 limit -= SCTP_SIZE32(plen);
4981 * All parameters for all chunks that we know/understand are
4982 * listed here. We process them other places and make
4983 * appropriate stop actions per the upper bits. However this
4984 * is the generic routine processor's can call to get back
4985 * an operr.. to either incorporate (init-ack) or send.
4987 padded_size = SCTP_SIZE32(plen);
4989 /* Param's with variable size */
4990 case SCTP_HEARTBEAT_INFO:
4991 case SCTP_STATE_COOKIE:
4992 case SCTP_UNRECOG_PARAM:
4993 case SCTP_ERROR_CAUSE_IND:
4997 /* Param's with variable size within a range */
4998 case SCTP_CHUNK_LIST:
4999 case SCTP_SUPPORTED_CHUNK_EXT:
5000 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
5001 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
5006 case SCTP_SUPPORTED_ADDRTYPE:
5007 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
5008 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
5014 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
5015 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
5020 case SCTP_SET_PRIM_ADDR:
5021 case SCTP_DEL_IP_ADDRESS:
5022 case SCTP_ADD_IP_ADDRESS:
5023 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
5024 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
5025 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
5030 /* Param's with a fixed size */
5031 case SCTP_IPV4_ADDRESS:
5032 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
5033 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
5038 case SCTP_IPV6_ADDRESS:
5039 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
5040 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
5045 case SCTP_COOKIE_PRESERVE:
5046 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
5047 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
5052 case SCTP_HAS_NAT_SUPPORT:
5055 case SCTP_PRSCTP_SUPPORTED:
5056 if (padded_size != sizeof(struct sctp_paramhdr)) {
5057 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
5062 case SCTP_ECN_CAPABLE:
5063 if (padded_size != sizeof(struct sctp_paramhdr)) {
5064 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
5069 case SCTP_ULP_ADAPTATION:
5070 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
5071 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
5076 case SCTP_SUCCESS_REPORT:
5077 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
5078 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
5083 case SCTP_HOSTNAME_ADDRESS:
5085 /* We can NOT handle HOST NAME addresses!! */
5088 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
5089 *abort_processing = 1;
5090 if (op_err == NULL) {
5091 /* Ok need to try to get a mbuf */
5093 l_len = SCTP_MIN_OVERHEAD;
5095 l_len = SCTP_MIN_V4_OVERHEAD;
5097 l_len += sizeof(struct sctp_chunkhdr);
5099 l_len += sizeof(struct sctp_paramhdr);
5100 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5102 SCTP_BUF_LEN(op_err) = 0;
5104 * pre-reserve space for ip
5105 * and sctp header and
5109 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5111 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5113 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5114 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5118 /* If we have space */
5119 struct sctp_paramhdr s;
5122 uint32_t cpthis = 0;
5124 pad_needed = 4 - (err_at % 4);
5125 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5126 err_at += pad_needed;
5128 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
5129 s.param_length = htons(sizeof(s) + plen);
5130 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5131 err_at += sizeof(s);
5132 if (plen > sizeof(tempbuf)) {
5133 plen = sizeof(tempbuf);
5135 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
5137 sctp_m_freem(op_err);
5139 * we are out of memory but
5140 * we still need to have a
5141 * look at what to do (the
5142 * system is in trouble
5147 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5154 * we do not recognize the parameter figure out what
5157 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
5158 if ((ptype & 0x4000) == 0x4000) {
5159 /* Report bit is set?? */
5160 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
5161 if (op_err == NULL) {
5164 /* Ok need to try to get an mbuf */
5166 l_len = SCTP_MIN_OVERHEAD;
5168 l_len = SCTP_MIN_V4_OVERHEAD;
5170 l_len += sizeof(struct sctp_chunkhdr);
5172 l_len += sizeof(struct sctp_paramhdr);
5173 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5175 SCTP_BUF_LEN(op_err) = 0;
5177 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5179 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5181 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5182 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5186 /* If we have space */
5187 struct sctp_paramhdr s;
5190 uint32_t cpthis = 0;
5192 pad_needed = 4 - (err_at % 4);
5193 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5194 err_at += pad_needed;
5196 s.param_type = htons(SCTP_UNRECOG_PARAM);
5197 s.param_length = htons(sizeof(s) + plen);
5198 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5199 err_at += sizeof(s);
5200 if (plen > sizeof(tempbuf)) {
5201 plen = sizeof(tempbuf);
5203 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
5205 sctp_m_freem(op_err);
5207 * we are out of memory but
5208 * we still need to have a
5209 * look at what to do (the
5210 * system is in trouble
5214 goto more_processing;
5216 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
5221 if ((ptype & 0x8000) == 0x0000) {
5222 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
5225 /* skip this chunk and continue processing */
5226 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
5227 at += SCTP_SIZE32(plen);
5232 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
5236 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
5237 *abort_processing = 1;
5238 if ((op_err == NULL) && phdr) {
5241 l_len = SCTP_MIN_OVERHEAD;
5243 l_len = SCTP_MIN_V4_OVERHEAD;
5245 l_len += sizeof(struct sctp_chunkhdr);
5246 l_len += (2 * sizeof(struct sctp_paramhdr));
5247 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
5249 SCTP_BUF_LEN(op_err) = 0;
5251 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
5253 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
5255 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
5256 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
5259 if ((op_err) && phdr) {
5260 struct sctp_paramhdr s;
5263 uint32_t cpthis = 0;
5265 pad_needed = 4 - (err_at % 4);
5266 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
5267 err_at += pad_needed;
5269 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5270 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
5271 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
5272 err_at += sizeof(s);
5273 /* Only copy back the p-hdr that caused the issue */
5274 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
5280 sctp_are_there_new_addresses(struct sctp_association *asoc,
5281 struct mbuf *in_initpkt, int offset, struct sockaddr *src)
5284 * Given a INIT packet, look through the packet to verify that there
5285 * are NO new addresses. As we go through the parameters add reports
5286 * of any un-understood parameters that require an error. Also we
5287 * must return (1) to drop the packet if we see a un-understood
5288 * parameter that tells us to drop the chunk.
5290 struct sockaddr *sa_touse;
5291 struct sockaddr *sa;
5292 struct sctp_paramhdr *phdr, params;
5293 uint16_t ptype, plen;
5295 struct sctp_nets *net;
5298 struct sockaddr_in sin4, *sa4;
5301 struct sockaddr_in6 sin6, *sa6;
5305 memset(&sin4, 0, sizeof(sin4));
5306 sin4.sin_family = AF_INET;
5307 sin4.sin_len = sizeof(sin4);
5310 memset(&sin6, 0, sizeof(sin6));
5311 sin6.sin6_family = AF_INET6;
5312 sin6.sin6_len = sizeof(sin6);
5314 /* First what about the src address of the pkt ? */
5316 switch (src->sa_family) {
5319 if (asoc->scope.ipv4_addr_legal) {
5326 if (asoc->scope.ipv6_addr_legal) {
5337 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5338 sa = (struct sockaddr *)&net->ro._l_addr;
5339 if (sa->sa_family == src->sa_family) {
5341 if (sa->sa_family == AF_INET) {
5342 struct sockaddr_in *src4;
5344 sa4 = (struct sockaddr_in *)sa;
5345 src4 = (struct sockaddr_in *)src;
5346 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
5353 if (sa->sa_family == AF_INET6) {
5354 struct sockaddr_in6 *src6;
5356 sa6 = (struct sockaddr_in6 *)sa;
5357 src6 = (struct sockaddr_in6 *)src;
5358 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
5367 /* New address added! no need to look further. */
5371 /* Ok so far lets munge through the rest of the packet */
5372 offset += sizeof(struct sctp_init_chunk);
5373 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5376 ptype = ntohs(phdr->param_type);
5377 plen = ntohs(phdr->param_length);
5380 case SCTP_IPV4_ADDRESS:
5382 struct sctp_ipv4addr_param *p4, p4_buf;
5384 if (plen != sizeof(struct sctp_ipv4addr_param)) {
5387 phdr = sctp_get_next_param(in_initpkt, offset,
5388 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
5392 if (asoc->scope.ipv4_addr_legal) {
5393 p4 = (struct sctp_ipv4addr_param *)phdr;
5394 sin4.sin_addr.s_addr = p4->addr;
5395 sa_touse = (struct sockaddr *)&sin4;
5401 case SCTP_IPV6_ADDRESS:
5403 struct sctp_ipv6addr_param *p6, p6_buf;
5405 if (plen != sizeof(struct sctp_ipv6addr_param)) {
5408 phdr = sctp_get_next_param(in_initpkt, offset,
5409 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
5413 if (asoc->scope.ipv6_addr_legal) {
5414 p6 = (struct sctp_ipv6addr_param *)phdr;
5415 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
5417 sa_touse = (struct sockaddr *)&sin6;
5427 /* ok, sa_touse points to one to check */
5429 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5430 sa = (struct sockaddr *)&net->ro._l_addr;
5431 if (sa->sa_family != sa_touse->sa_family) {
5435 if (sa->sa_family == AF_INET) {
5436 sa4 = (struct sockaddr_in *)sa;
5437 if (sa4->sin_addr.s_addr ==
5438 sin4.sin_addr.s_addr) {
5445 if (sa->sa_family == AF_INET6) {
5446 sa6 = (struct sockaddr_in6 *)sa;
5447 if (SCTP6_ARE_ADDR_EQUAL(
5456 /* New addr added! no need to look further */
5460 offset += SCTP_SIZE32(plen);
5461 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params));
5467 * Given a MBUF chain that was sent into us containing an INIT. Build a
5468 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
5469 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
5470 * message (i.e. the struct sctp_init_msg).
5473 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
5474 struct sctp_nets *src_net, struct mbuf *init_pkt,
5475 int iphlen, int offset,
5476 struct sockaddr *src, struct sockaddr *dst,
5477 struct sctphdr *sh, struct sctp_init_chunk *init_chk,
5478 uint8_t mflowtype, uint32_t mflowid,
5479 uint32_t vrf_id, uint16_t port)
5481 struct sctp_association *asoc;
5482 struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
5483 struct sctp_init_ack_chunk *initack;
5484 struct sctp_adaptation_layer_indication *ali;
5485 struct sctp_supported_chunk_types_param *pr_supported;
5486 struct sctp_paramhdr *ph;
5487 union sctp_sockstore *over_addr;
5488 struct sctp_scoping scp;
5491 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
5492 struct sockaddr_in *src4 = (struct sockaddr_in *)src;
5493 struct sockaddr_in *sin;
5496 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
5497 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
5498 struct sockaddr_in6 *sin6;
5500 struct sockaddr *to;
5501 struct sctp_state_cookie stc;
5502 struct sctp_nets *net = NULL;
5503 uint8_t *signature = NULL;
5504 int cnt_inits_to = 0;
5505 uint16_t his_limit, i_want;
5507 int nat_friendly = 0;
5510 uint16_t num_ext, chunk_len, padding_len, parameter_len;
5517 if ((asoc != NULL) &&
5518 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT)) {
5519 if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
5521 * new addresses, out of here in non-cookie-wait
5524 * Send an ABORT, without the new address error
5525 * cause. This looks no different than if no
5526 * listener was present.
5528 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5530 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5531 mflowtype, mflowid, inp->fibnum,
5535 if (src_net != NULL && (src_net->port != port)) {
5537 * change of remote encapsulation port, out of here
5538 * in non-cookie-wait states
5540 * Send an ABORT, without an specific error cause.
5541 * This looks no different than if no listener was
5544 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5545 "Remote encapsulation port changed");
5546 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
5547 mflowtype, mflowid, inp->fibnum,
5553 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5554 (offset + sizeof(struct sctp_init_chunk)),
5555 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5558 if (op_err == NULL) {
5559 char msg[SCTP_DIAG_INFO_LEN];
5561 snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
5562 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
5565 sctp_send_abort(init_pkt, iphlen, src, dst, sh,
5566 init_chk->init.initiate_tag, op_err,
5567 mflowtype, mflowid, inp->fibnum,
5571 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
5573 /* No memory, INIT timer will re-attempt. */
5575 sctp_m_freem(op_err);
5578 chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
5582 * We might not overwrite the identification[] completely and on
5583 * some platforms time_entered will contain some padding. Therefore
5584 * zero out the cookie to avoid putting uninitialized memory on the
5587 memset(&stc, 0, sizeof(struct sctp_state_cookie));
5589 /* the time I built cookie */
5590 (void)SCTP_GETTIME_TIMEVAL(&now);
5591 stc.time_entered.tv_sec = now.tv_sec;
5592 stc.time_entered.tv_usec = now.tv_usec;
5594 /* populate any tie tags */
5596 /* unlock before tag selections */
5597 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5598 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5599 stc.cookie_life = asoc->cookie_life;
5600 net = asoc->primary_destination;
5602 stc.tie_tag_my_vtag = 0;
5603 stc.tie_tag_peer_vtag = 0;
5604 /* life I will award this cookie */
5605 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5608 /* copy in the ports for later check */
5609 stc.myport = sh->dest_port;
5610 stc.peerport = sh->src_port;
5613 * If we wanted to honor cookie life extensions, we would add to
5614 * stc.cookie_life. For now we should NOT honor any extension
5616 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5617 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5618 stc.ipv6_addr_legal = 1;
5619 if (SCTP_IPV6_V6ONLY(inp)) {
5620 stc.ipv4_addr_legal = 0;
5622 stc.ipv4_addr_legal = 1;
5625 stc.ipv6_addr_legal = 0;
5626 stc.ipv4_addr_legal = 1;
5631 switch (dst->sa_family) {
5635 /* lookup address */
5636 stc.address[0] = src4->sin_addr.s_addr;
5640 stc.addr_type = SCTP_IPV4_ADDRESS;
5641 /* local from address */
5642 stc.laddress[0] = dst4->sin_addr.s_addr;
5643 stc.laddress[1] = 0;
5644 stc.laddress[2] = 0;
5645 stc.laddress[3] = 0;
5646 stc.laddr_type = SCTP_IPV4_ADDRESS;
5647 /* scope_id is only for v6 */
5649 if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
5650 (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))) {
5653 /* Must use the address in this case */
5654 if (sctp_is_address_on_local_host(src, vrf_id)) {
5655 stc.loopback_scope = 1;
5658 stc.local_scope = 0;
5666 stc.addr_type = SCTP_IPV6_ADDRESS;
5667 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
5668 stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
5669 if (sctp_is_address_on_local_host(src, vrf_id)) {
5670 stc.loopback_scope = 1;
5671 stc.local_scope = 0;
5674 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
5675 IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
5677 * If the new destination or source
5678 * is a LINK_LOCAL we must have
5679 * common both site and local scope.
5680 * Don't set local scope though
5681 * since we must depend on the
5682 * source to be added implicitly. We
5683 * cannot assure just because we
5684 * share one link that all links are
5687 stc.local_scope = 0;
5691 * we start counting for the private
5692 * address stuff at 1. since the
5693 * link local we source from won't
5694 * show up in our scoped count.
5698 * pull out the scope_id from
5701 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
5702 IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
5704 * If the new destination or source
5705 * is SITE_LOCAL then we must have
5706 * site scope in common.
5710 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
5711 stc.laddr_type = SCTP_IPV6_ADDRESS;
5721 /* set the scope per the existing tcb */
5724 struct sctp_nets *lnet;
5727 stc.loopback_scope = asoc->scope.loopback_scope;
5728 stc.ipv4_scope = asoc->scope.ipv4_local_scope;
5729 stc.site_scope = asoc->scope.site_scope;
5730 stc.local_scope = asoc->scope.local_scope;
5732 /* Why do we not consider IPv4 LL addresses? */
5733 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5734 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5735 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5737 * if we have a LL address, start
5745 /* use the net pointer */
5746 to = (struct sockaddr *)&net->ro._l_addr;
5747 switch (to->sa_family) {
5750 sin = (struct sockaddr_in *)to;
5751 stc.address[0] = sin->sin_addr.s_addr;
5755 stc.addr_type = SCTP_IPV4_ADDRESS;
5756 if (net->src_addr_selected == 0) {
5758 * strange case here, the INIT should have
5759 * did the selection.
5761 net->ro._s_addr = sctp_source_address_selection(inp,
5762 stcb, (sctp_route_t *)&net->ro,
5764 if (net->ro._s_addr == NULL)
5767 net->src_addr_selected = 1;
5770 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5771 stc.laddress[1] = 0;
5772 stc.laddress[2] = 0;
5773 stc.laddress[3] = 0;
5774 stc.laddr_type = SCTP_IPV4_ADDRESS;
5775 /* scope_id is only for v6 */
5781 sin6 = (struct sockaddr_in6 *)to;
5782 memcpy(&stc.address, &sin6->sin6_addr,
5783 sizeof(struct in6_addr));
5784 stc.addr_type = SCTP_IPV6_ADDRESS;
5785 stc.scope_id = sin6->sin6_scope_id;
5786 if (net->src_addr_selected == 0) {
5788 * strange case here, the INIT should have
5789 * done the selection.
5791 net->ro._s_addr = sctp_source_address_selection(inp,
5792 stcb, (sctp_route_t *)&net->ro,
5794 if (net->ro._s_addr == NULL)
5797 net->src_addr_selected = 1;
5799 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5800 sizeof(struct in6_addr));
5801 stc.laddr_type = SCTP_IPV6_ADDRESS;
5806 /* Now lets put the SCTP header in place */
5807 initack = mtod(m, struct sctp_init_ack_chunk *);
5808 /* Save it off for quick ref */
5809 stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
5811 memcpy(stc.identification, SCTP_VERSION_STRING,
5812 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5813 memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
5814 /* now the chunk header */
5815 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5816 initack->ch.chunk_flags = 0;
5817 /* fill in later from mbuf we build */
5818 initack->ch.chunk_length = 0;
5819 /* place in my tag */
5820 if ((asoc != NULL) &&
5821 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5822 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5823 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5824 /* re-use the v-tags and init-seq here */
5825 initack->init.initiate_tag = htonl(asoc->my_vtag);
5826 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5828 uint32_t vtag, itsn;
5831 atomic_add_int(&asoc->refcnt, 1);
5832 SCTP_TCB_UNLOCK(stcb);
5834 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5835 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5837 * Got a duplicate vtag on some guy behind a
5838 * nat make sure we don't use it.
5842 initack->init.initiate_tag = htonl(vtag);
5843 /* get a TSN to use too */
5844 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5845 initack->init.initial_tsn = htonl(itsn);
5846 SCTP_TCB_LOCK(stcb);
5847 atomic_add_int(&asoc->refcnt, -1);
5849 SCTP_INP_INCR_REF(inp);
5850 SCTP_INP_RUNLOCK(inp);
5851 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5852 initack->init.initiate_tag = htonl(vtag);
5853 /* get a TSN to use too */
5854 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5855 SCTP_INP_RLOCK(inp);
5856 SCTP_INP_DECR_REF(inp);
5859 /* save away my tag to */
5860 stc.my_vtag = initack->init.initiate_tag;
5862 /* set up some of the credits. */
5863 so = inp->sctp_socket;
5865 /* memory problem */
5869 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5871 /* set what I want */
5872 his_limit = ntohs(init_chk->init.num_inbound_streams);
5873 /* choose what I want */
5875 if (asoc->streamoutcnt > asoc->pre_open_streams) {
5876 i_want = asoc->streamoutcnt;
5878 i_want = asoc->pre_open_streams;
5881 i_want = inp->sctp_ep.pre_open_stream_count;
5883 if (his_limit < i_want) {
5884 /* I Want more :< */
5885 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5887 /* I can have what I want :> */
5888 initack->init.num_outbound_streams = htons(i_want);
5890 /* tell him his limit. */
5891 initack->init.num_inbound_streams =
5892 htons(inp->sctp_ep.max_open_streams_intome);
5894 /* adaptation layer indication parameter */
5895 if (inp->sctp_ep.adaptation_layer_indicator_provided) {
5896 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
5897 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t)+chunk_len);
5898 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5899 ali->ph.param_length = htons(parameter_len);
5900 ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
5901 chunk_len += parameter_len;
5904 if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
5905 ((asoc == NULL) && (inp->ecn_supported == 1))) {
5906 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5907 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5908 ph->param_type = htons(SCTP_ECN_CAPABLE);
5909 ph->param_length = htons(parameter_len);
5910 chunk_len += parameter_len;
5912 /* PR-SCTP supported parameter */
5913 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5914 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5915 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5916 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5917 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
5918 ph->param_length = htons(parameter_len);
5919 chunk_len += parameter_len;
5921 /* Add NAT friendly parameter */
5923 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
5924 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+chunk_len);
5925 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5926 ph->param_length = htons(parameter_len);
5927 chunk_len += parameter_len;
5929 /* And now tell the peer which extensions we support */
5931 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+chunk_len);
5932 if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
5933 ((asoc == NULL) && (inp->prsctp_supported == 1))) {
5934 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5935 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5936 ((asoc == NULL) && (inp->idata_supported == 1))) {
5937 pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
5940 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5941 ((asoc == NULL) && (inp->auth_supported == 1))) {
5942 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5944 if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
5945 ((asoc == NULL) && (inp->asconf_supported == 1))) {
5946 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5947 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5949 if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
5950 ((asoc == NULL) && (inp->reconfig_supported == 1))) {
5951 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5953 if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
5954 ((asoc == NULL) && (inp->idata_supported == 1))) {
5955 pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
5957 if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
5958 ((asoc == NULL) && (inp->nrsack_supported == 1))) {
5959 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5961 if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
5962 ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
5963 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5966 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
5967 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5968 pr_supported->ph.param_length = htons(parameter_len);
5969 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5970 chunk_len += parameter_len;
5972 /* add authentication parameters */
5973 if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
5974 ((asoc == NULL) && (inp->auth_supported == 1))) {
5975 struct sctp_auth_random *randp;
5976 struct sctp_auth_hmac_algo *hmacs;
5977 struct sctp_auth_chunk_list *chunks;
5979 if (padding_len > 0) {
5980 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5981 chunk_len += padding_len;
5984 /* generate and add RANDOM parameter */
5985 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+chunk_len);
5986 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
5987 SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5988 randp->ph.param_type = htons(SCTP_RANDOM);
5989 randp->ph.param_length = htons(parameter_len);
5990 SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
5991 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
5992 chunk_len += parameter_len;
5994 if (padding_len > 0) {
5995 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
5996 chunk_len += padding_len;
5999 /* add HMAC_ALGO parameter */
6000 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+chunk_len);
6001 parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
6002 sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
6003 (uint8_t *)hmacs->hmac_ids);
6004 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
6005 hmacs->ph.param_length = htons(parameter_len);
6006 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6007 chunk_len += parameter_len;
6009 if (padding_len > 0) {
6010 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6011 chunk_len += padding_len;
6014 /* add CHUNKS parameter */
6015 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+chunk_len);
6016 parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
6017 sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
6018 chunks->chunk_types);
6019 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
6020 chunks->ph.param_length = htons(parameter_len);
6021 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6022 chunk_len += parameter_len;
6024 SCTP_BUF_LEN(m) = chunk_len;
6026 /* now the addresses */
6028 * To optimize this we could put the scoping stuff into a structure
6029 * and remove the individual uint8's from the stc structure. Then we
6030 * could just sifa in the address within the stc.. but for now this
6031 * is a quick hack to get the address stuff teased apart.
6033 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
6034 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
6035 scp.loopback_scope = stc.loopback_scope;
6036 scp.ipv4_local_scope = stc.ipv4_scope;
6037 scp.local_scope = stc.local_scope;
6038 scp.site_scope = stc.site_scope;
6039 m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
6041 &padding_len, &chunk_len);
6042 /* padding_len can only be positive, if no addresses have been added */
6043 if (padding_len > 0) {
6044 memset(mtod(m, caddr_t)+chunk_len, 0, padding_len);
6045 chunk_len += padding_len;
6046 SCTP_BUF_LEN(m) += padding_len;
6049 /* tack on the operational error if present */
6052 for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6053 parameter_len += SCTP_BUF_LEN(m_tmp);
6055 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6056 SCTP_BUF_NEXT(m_last) = op_err;
6057 while (SCTP_BUF_NEXT(m_last) != NULL) {
6058 m_last = SCTP_BUF_NEXT(m_last);
6060 chunk_len += parameter_len;
6062 if (padding_len > 0) {
6063 m_last = sctp_add_pad_tombuf(m_last, padding_len);
6064 if (m_last == NULL) {
6065 /* Houston we have a problem, no space */
6069 chunk_len += padding_len;
6072 /* Now we must build a cookie */
6073 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
6074 if (m_cookie == NULL) {
6075 /* memory problem */
6079 /* Now append the cookie to the end and update the space/size */
6080 SCTP_BUF_NEXT(m_last) = m_cookie;
6082 for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
6083 parameter_len += SCTP_BUF_LEN(m_tmp);
6084 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
6088 padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
6089 chunk_len += parameter_len;
6092 * Place in the size, but we don't include the last pad (if any) in
6095 initack->ch.chunk_length = htons(chunk_len);
6098 * Time to sign the cookie, we don't sign over the cookie signature
6099 * though thus we set trailer.
6101 (void)sctp_hmac_m(SCTP_HMAC,
6102 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
6103 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
6104 (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
6106 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
6107 * here since the timer will drive a retranmission.
6109 if (padding_len > 0) {
6110 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
6115 if (stc.loopback_scope) {
6116 over_addr = (union sctp_sockstore *)dst;
6121 if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
6123 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
6126 SCTP_SO_NOT_LOCKED))) {
6127 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
6128 if (error == ENOBUFS) {
6130 asoc->ifp_had_enobuf = 1;
6132 SCTP_STAT_INCR(sctps_lowlevelerr);
6136 asoc->ifp_had_enobuf = 0;
6139 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6144 sctp_prune_prsctp(struct sctp_tcb *stcb,
6145 struct sctp_association *asoc,
6146 struct sctp_sndrcvinfo *srcv,
6150 struct sctp_tmit_chunk *chk, *nchk;
6152 SCTP_TCB_LOCK_ASSERT(stcb);
6153 if ((asoc->prsctp_supported) &&
6154 (asoc->sent_queue_cnt_removeable > 0)) {
6155 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6157 * Look for chunks marked with the PR_SCTP flag AND
6158 * the buffer space flag. If the one being sent is
6159 * equal or greater priority then purge the old one
6160 * and free some space.
6162 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6164 * This one is PR-SCTP AND buffer space
6167 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6169 * Lower numbers equates to higher
6170 * priority so if the one we are
6171 * looking at has a larger or equal
6172 * priority we want to drop the data
6173 * and NOT retransmit it.
6177 * We release the book_size
6178 * if the mbuf is here
6183 if (chk->sent > SCTP_DATAGRAM_UNSENT)
6187 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6190 freed_spc += ret_spc;
6191 if (freed_spc >= dataout) {
6194 } /* if chunk was present */
6195 } /* if of sufficient priority */
6196 } /* if chunk has enabled */
6197 } /* tailqforeach */
6199 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
6200 /* Here we must move to the sent queue and mark */
6201 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
6202 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
6205 * We release the book_size
6206 * if the mbuf is here
6210 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
6213 freed_spc += ret_spc;
6214 if (freed_spc >= dataout) {
6217 } /* end if chk->data */
6218 } /* end if right class */
6219 } /* end if chk pr-sctp */
6220 } /* tailqforeachsafe (chk) */
6221 } /* if enabled in asoc */
6225 sctp_get_frag_point(struct sctp_tcb *stcb,
6226 struct sctp_association *asoc)
6231 * For endpoints that have both v6 and v4 addresses we must reserve
6232 * room for the ipv6 header, for those that are only dealing with V4
6233 * we use a larger frag point.
6235 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6236 ovh = SCTP_MIN_OVERHEAD;
6238 ovh = SCTP_MIN_V4_OVERHEAD;
6240 ovh += SCTP_DATA_CHUNK_OVERHEAD(stcb);
6241 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
6242 siz = asoc->smallest_mtu - ovh;
6244 siz = (stcb->asoc.sctp_frag_point - ovh);
6246 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
6248 /* A data chunk MUST fit in a cluster */
6249 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
6252 /* adjust for an AUTH chunk if DATA requires auth */
6253 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
6254 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6257 /* make it an even word boundary please */
6264 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
6267 * We assume that the user wants PR_SCTP_TTL if the user provides a
6268 * positive lifetime but does not specify any PR_SCTP policy.
6270 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
6271 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6272 } else if (sp->timetolive > 0) {
6273 sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
6274 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
6278 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
6279 case CHUNK_FLAGS_PR_SCTP_BUF:
6281 * Time to live is a priority stored in tv_sec when doing
6282 * the buffer drop thing.
6284 sp->ts.tv_sec = sp->timetolive;
6287 case CHUNK_FLAGS_PR_SCTP_TTL:
6291 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6292 tv.tv_sec = sp->timetolive / 1000;
6293 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
6295 * TODO sctp_constants.h needs alternative time
6296 * macros when _KERNEL is undefined.
6298 timevaladd(&sp->ts, &tv);
6301 case CHUNK_FLAGS_PR_SCTP_RTX:
6303 * Time to live is a the number or retransmissions stored in
6306 sp->ts.tv_sec = sp->timetolive;
6310 SCTPDBG(SCTP_DEBUG_USRREQ1,
6311 "Unknown PR_SCTP policy %u.\n",
6312 PR_SCTP_POLICY(sp->sinfo_flags));
6318 sctp_msg_append(struct sctp_tcb *stcb,
6319 struct sctp_nets *net,
6321 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
6325 struct sctp_stream_queue_pending *sp = NULL;
6326 struct sctp_stream_out *strm;
6329 * Given an mbuf chain, put it into the association send queue and
6330 * place it on the wheel
6332 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
6333 /* Invalid stream number */
6334 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6338 if ((stcb->asoc.stream_locked) &&
6339 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
6340 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
6344 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
6345 /* Now can we send this? */
6346 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
6347 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
6348 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
6349 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
6350 /* got data while shutting down */
6351 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
6355 sctp_alloc_a_strmoq(stcb, sp);
6357 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6361 sp->sinfo_flags = srcv->sinfo_flags;
6362 sp->timetolive = srcv->sinfo_timetolive;
6363 sp->ppid = srcv->sinfo_ppid;
6364 sp->context = srcv->sinfo_context;
6366 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
6368 atomic_add_int(&sp->net->ref_count, 1);
6372 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
6373 sp->sid = srcv->sinfo_stream;
6374 sp->msg_is_complete = 1;
6375 sp->sender_all_done = 1;
6378 sp->tail_mbuf = NULL;
6379 sctp_set_prsctp_policy(sp);
6381 * We could in theory (for sendall) sifa the length in, but we would
6382 * still have to hunt through the chain since we need to setup the
6386 for (at = m; at; at = SCTP_BUF_NEXT(at)) {
6387 if (SCTP_BUF_NEXT(at) == NULL)
6389 sp->length += SCTP_BUF_LEN(at);
6391 if (srcv->sinfo_keynumber_valid) {
6392 sp->auth_keyid = srcv->sinfo_keynumber;
6394 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
6396 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
6397 sctp_auth_key_acquire(stcb, sp->auth_keyid);
6398 sp->holds_key_ref = 1;
6400 if (hold_stcb_lock == 0) {
6401 SCTP_TCB_SEND_LOCK(stcb);
6403 sctp_snd_sb_alloc(stcb, sp->length);
6404 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
6405 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
6406 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
6408 if (hold_stcb_lock == 0) {
6409 SCTP_TCB_SEND_UNLOCK(stcb);
6419 static struct mbuf *
6420 sctp_copy_mbufchain(struct mbuf *clonechain,
6421 struct mbuf *outchain,
6422 struct mbuf **endofchain,
6425 uint8_t copy_by_ref)
6428 struct mbuf *appendchain;
6432 if (endofchain == NULL) {
6436 sctp_m_freem(outchain);
6439 if (can_take_mbuf) {
6440 appendchain = clonechain;
6443 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
6445 /* Its not in a cluster */
6446 if (*endofchain == NULL) {
6447 /* lets get a mbuf cluster */
6448 if (outchain == NULL) {
6449 /* This is the general case */
6451 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6452 if (outchain == NULL) {
6455 SCTP_BUF_LEN(outchain) = 0;
6456 *endofchain = outchain;
6457 /* get the prepend space */
6458 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
6461 * We really should not get a NULL
6467 if (SCTP_BUF_NEXT(m) == NULL) {
6471 m = SCTP_BUF_NEXT(m);
6474 if (*endofchain == NULL) {
6476 * huh, TSNH XXX maybe we
6479 sctp_m_freem(outchain);
6483 /* get the new end of length */
6484 len = (int)M_TRAILINGSPACE(*endofchain);
6486 /* how much is left at the end? */
6487 len = (int)M_TRAILINGSPACE(*endofchain);
6489 /* Find the end of the data, for appending */
6490 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6492 /* Now lets copy it out */
6493 if (len >= sizeofcpy) {
6494 /* It all fits, copy it in */
6495 m_copydata(clonechain, 0, sizeofcpy, cp);
6496 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6498 /* fill up the end of the chain */
6500 m_copydata(clonechain, 0, len, cp);
6501 SCTP_BUF_LEN((*endofchain)) += len;
6502 /* now we need another one */
6505 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
6510 SCTP_BUF_NEXT((*endofchain)) = m;
6512 cp = mtod((*endofchain), caddr_t);
6513 m_copydata(clonechain, len, sizeofcpy, cp);
6514 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6518 /* copy the old fashion way */
6519 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
6520 #ifdef SCTP_MBUF_LOGGING
6521 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6522 sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
6527 if (appendchain == NULL) {
6530 sctp_m_freem(outchain);
6534 /* tack on to the end */
6535 if (*endofchain != NULL) {
6536 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6540 if (SCTP_BUF_NEXT(m) == NULL) {
6541 SCTP_BUF_NEXT(m) = appendchain;
6544 m = SCTP_BUF_NEXT(m);
6548 * save off the end and update the end-chain position
6552 if (SCTP_BUF_NEXT(m) == NULL) {
6556 m = SCTP_BUF_NEXT(m);
6560 /* save off the end and update the end-chain position */
6563 if (SCTP_BUF_NEXT(m) == NULL) {
6567 m = SCTP_BUF_NEXT(m);
6569 return (appendchain);
6574 sctp_med_chunk_output(struct sctp_inpcb *inp,
6575 struct sctp_tcb *stcb,
6576 struct sctp_association *asoc,
6579 int control_only, int from_where,
6580 struct timeval *now, int *now_filled, int frag_point, int so_locked
6581 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6587 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6588 uint32_t val SCTP_UNUSED)
6590 struct sctp_copy_all *ca;
6593 int added_control = 0;
6594 int un_sent, do_chunk_output = 1;
6595 struct sctp_association *asoc;
6596 struct sctp_nets *net;
6598 ca = (struct sctp_copy_all *)ptr;
6599 if (ca->m == NULL) {
6602 if (ca->inp != inp) {
6606 if (ca->sndlen > 0) {
6607 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
6609 /* can't copy so we are done */
6613 #ifdef SCTP_MBUF_LOGGING
6614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6615 sctp_log_mbc(m, SCTP_MBUF_ICOPY);
6621 SCTP_TCB_LOCK_ASSERT(stcb);
6622 if (stcb->asoc.alternate) {
6623 net = stcb->asoc.alternate;
6625 net = stcb->asoc.primary_destination;
6627 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6628 /* Abort this assoc with m as the user defined reason */
6630 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
6632 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
6633 0, M_NOWAIT, 1, MT_DATA);
6634 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
6637 struct sctp_paramhdr *ph;
6639 ph = mtod(m, struct sctp_paramhdr *);
6640 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6641 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
6644 * We add one here to keep the assoc from dis-appearing on
6647 atomic_add_int(&stcb->asoc.refcnt, 1);
6648 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
6650 * sctp_abort_an_association calls sctp_free_asoc() free
6651 * association will NOT free it since we incremented the
6652 * refcnt .. we do this to prevent it being freed and things
6653 * getting tricky since we could end up (from free_asoc)
6654 * calling inpcb_free which would get a recursive lock call
6655 * to the iterator lock.. But as a consequence of that the
6656 * stcb will return to us un-locked.. since free_asoc
6657 * returns with either no TCB or the TCB unlocked, we must
6658 * relock.. to unlock in the iterator timer :-0
6660 SCTP_TCB_LOCK(stcb);
6661 atomic_add_int(&stcb->asoc.refcnt, -1);
6662 goto no_chunk_output;
6665 ret = sctp_msg_append(stcb, net, m,
6669 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6670 /* shutdown this assoc */
6671 if (TAILQ_EMPTY(&asoc->send_queue) &&
6672 TAILQ_EMPTY(&asoc->sent_queue) &&
6673 sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
6674 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6678 * there is nothing queued to send, so I'm
6681 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6682 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6683 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6685 * only send SHUTDOWN the first time
6688 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6689 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6691 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6692 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6693 sctp_stop_timers_for_shutdown(stcb);
6694 sctp_send_shutdown(stcb, net);
6695 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6697 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6698 asoc->primary_destination);
6700 do_chunk_output = 0;
6704 * we still got (or just got) data to send,
6705 * so set SHUTDOWN_PENDING
6708 * XXX sockets draft says that SCTP_EOF
6709 * should be sent with no data. currently,
6710 * we will allow user data to be sent first
6711 * and move to SHUTDOWN-PENDING
6713 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6714 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6715 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6716 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
6717 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6719 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6720 if (TAILQ_EMPTY(&asoc->send_queue) &&
6721 TAILQ_EMPTY(&asoc->sent_queue) &&
6722 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6723 struct mbuf *op_err;
6724 char msg[SCTP_DIAG_INFO_LEN];
6727 snprintf(msg, sizeof(msg),
6728 "%s:%d at %s", __FILE__, __LINE__, __func__);
6729 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
6731 atomic_add_int(&stcb->asoc.refcnt, 1);
6732 sctp_abort_an_association(stcb->sctp_ep, stcb,
6733 op_err, SCTP_SO_NOT_LOCKED);
6734 atomic_add_int(&stcb->asoc.refcnt, -1);
6735 goto no_chunk_output;
6737 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6738 asoc->primary_destination);
6744 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6745 (stcb->asoc.stream_queue_cnt * SCTP_DATA_CHUNK_OVERHEAD(stcb)));
6747 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6748 (stcb->asoc.total_flight > 0) &&
6749 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
6750 do_chunk_output = 0;
6752 if (do_chunk_output)
6753 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6754 else if (added_control) {
6755 int num_out, reason, now_filled = 0;
6759 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6760 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6761 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6772 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
6774 struct sctp_copy_all *ca;
6776 ca = (struct sctp_copy_all *)ptr;
6778 * Do a notify here? Kacheong suggests that the notify be done at
6779 * the send time.. so you would push up a notification if any send
6780 * failed. Don't know if this is feasible since the only failures we
6781 * have is "memory" related and if you cannot get an mbuf to send
6782 * the data you surely can't get an mbuf to send up to notify the
6783 * user you can't send the data :->
6786 /* now free everything */
6787 sctp_m_freem(ca->m);
6788 SCTP_FREE(ca, SCTP_M_COPYAL);
6791 static struct mbuf *
6792 sctp_copy_out_all(struct uio *uio, int len)
6794 struct mbuf *ret, *at;
6795 int left, willcpy, cancpy, error;
6797 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
6803 SCTP_BUF_LEN(ret) = 0;
6804 /* save space for the data chunk header */
6805 cancpy = (int)M_TRAILINGSPACE(ret);
6806 willcpy = min(cancpy, left);
6809 /* Align data to the end */
6810 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6816 SCTP_BUF_LEN(at) = willcpy;
6817 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6820 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
6821 if (SCTP_BUF_NEXT(at) == NULL) {
6824 at = SCTP_BUF_NEXT(at);
6825 SCTP_BUF_LEN(at) = 0;
6826 cancpy = (int)M_TRAILINGSPACE(at);
6827 willcpy = min(cancpy, left);
6834 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6835 struct sctp_sndrcvinfo *srcv)
6838 struct sctp_copy_all *ca;
6840 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6844 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6847 memset(ca, 0, sizeof(struct sctp_copy_all));
6851 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6854 * take off the sendall flag, it would be bad if we failed to do
6857 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6858 /* get length and mbuf chain */
6860 ca->sndlen = (int)uio->uio_resid;
6861 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6862 if (ca->m == NULL) {
6863 SCTP_FREE(ca, SCTP_M_COPYAL);
6864 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6868 /* Gather the length of the send */
6872 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
6873 ca->sndlen += SCTP_BUF_LEN(mat);
6876 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6877 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6878 SCTP_ASOC_ANY_STATE,
6880 sctp_sendall_completes, inp, 1);
6882 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6883 SCTP_FREE(ca, SCTP_M_COPYAL);
6884 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6892 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6894 struct sctp_tmit_chunk *chk, *nchk;
6896 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
6897 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6898 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6899 asoc->ctrl_queue_cnt--;
6901 sctp_m_freem(chk->data);
6904 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6910 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6912 struct sctp_association *asoc;
6913 struct sctp_tmit_chunk *chk, *nchk;
6914 struct sctp_asconf_chunk *acp;
6917 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
6918 /* find SCTP_ASCONF chunk in queue */
6919 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6921 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6922 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
6927 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6928 asoc->ctrl_queue_cnt--;
6930 sctp_m_freem(chk->data);
6933 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
6940 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6941 struct sctp_association *asoc,
6942 struct sctp_tmit_chunk **data_list,
6944 struct sctp_nets *net)
6947 struct sctp_tmit_chunk *tp1;
6949 for (i = 0; i < bundle_at; i++) {
6950 /* off of the send queue */
6951 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
6952 asoc->send_queue_cnt--;
6955 * Any chunk NOT 0 you zap the time chunk 0 gets
6956 * zapped or set based on if a RTO measurment is
6959 data_list[i]->do_rtt = 0;
6962 data_list[i]->sent_rcv_time = net->last_sent_time;
6963 data_list[i]->rec.data.cwnd_at_send = net->cwnd;
6964 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
6965 if (data_list[i]->whoTo == NULL) {
6966 data_list[i]->whoTo = net;
6967 atomic_add_int(&net->ref_count, 1);
6969 /* on to the sent queue */
6970 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6971 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
6972 struct sctp_tmit_chunk *tpp;
6974 /* need to move back */
6976 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6978 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6982 if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
6985 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6987 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6992 /* This does not lower until the cum-ack passes it */
6993 asoc->sent_queue_cnt++;
6994 if ((asoc->peers_rwnd <= 0) &&
6995 (asoc->total_flight == 0) &&
6997 /* Mark the chunk as being a window probe */
6998 SCTP_STAT_INCR(sctps_windowprobed);
7000 #ifdef SCTP_AUDITING_ENABLED
7001 sctp_audit_log(0xC2, 3);
7003 data_list[i]->sent = SCTP_DATAGRAM_SENT;
7004 data_list[i]->snd_count = 1;
7005 data_list[i]->rec.data.chunk_was_revoked = 0;
7006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
7007 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
7008 data_list[i]->whoTo->flight_size,
7009 data_list[i]->book_size,
7010 (uint32_t)(uintptr_t)data_list[i]->whoTo,
7011 data_list[i]->rec.data.tsn);
7013 sctp_flight_size_increase(data_list[i]);
7014 sctp_total_flight_increase(stcb, data_list[i]);
7015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
7016 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
7017 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
7019 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
7020 (uint32_t)(data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
7021 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
7022 /* SWS sender side engages */
7023 asoc->peers_rwnd = 0;
7026 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
7027 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
7032 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
7033 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7038 struct sctp_tmit_chunk *chk, *nchk;
7040 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
7041 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7042 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7043 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7044 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7045 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
7046 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7047 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7048 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7049 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7050 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7051 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7052 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7053 /* Stray chunks must be cleaned up */
7055 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7056 asoc->ctrl_queue_cnt--;
7058 sctp_m_freem(chk->data);
7061 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7062 asoc->fwd_tsn_cnt--;
7064 sctp_free_a_chunk(stcb, chk, so_locked);
7065 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7066 /* special handling, we must look into the param */
7067 if (chk != asoc->str_reset) {
7068 goto clean_up_anyway;
7075 sctp_can_we_split_this(struct sctp_tcb *stcb, uint32_t length,
7076 uint32_t space_left, uint32_t frag_point, int eeor_on)
7079 * Make a decision on if I should split a msg into multiple parts.
7080 * This is only asked of incomplete messages.
7084 * If we are doing EEOR we need to always send it if its the
7085 * entire thing, since it might be all the guy is putting in
7088 if (space_left >= length) {
7090 * If we have data outstanding,
7091 * we get another chance when the sack
7092 * arrives to transmit - wait for more data
7094 if (stcb->asoc.total_flight == 0) {
7096 * If nothing is in flight, we zero the
7104 /* You can fill the rest */
7105 return (space_left);
7109 * For those strange folk that make the send buffer
7110 * smaller than our fragmentation point, we can't
7111 * get a full msg in so we have to allow splitting.
7113 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
7116 if ((length <= space_left) ||
7117 ((length - space_left) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
7118 /* Sub-optimial residual don't split in non-eeor mode. */
7122 * If we reach here length is larger than the space_left. Do we wish
7123 * to split it for the sake of packet putting together?
7125 if (space_left >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
7126 /* Its ok to split it */
7127 return (min(space_left, frag_point));
7129 /* Nope, can't split */
7134 sctp_move_to_outqueue(struct sctp_tcb *stcb,
7135 struct sctp_stream_out *strq,
7136 uint32_t space_left,
7137 uint32_t frag_point,
7142 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7147 /* Move from the stream to the send_queue keeping track of the total */
7148 struct sctp_association *asoc;
7149 struct sctp_stream_queue_pending *sp;
7150 struct sctp_tmit_chunk *chk;
7151 struct sctp_data_chunk *dchkh = NULL;
7152 struct sctp_idata_chunk *ndchkh = NULL;
7153 uint32_t to_move, length;
7155 uint8_t rcv_flags = 0;
7157 uint8_t send_lock_up = 0;
7159 SCTP_TCB_LOCK_ASSERT(stcb);
7162 /* sa_ignore FREED_MEMORY */
7163 sp = TAILQ_FIRST(&strq->outqueue);
7165 if (send_lock_up == 0) {
7166 SCTP_TCB_SEND_LOCK(stcb);
7169 sp = TAILQ_FIRST(&strq->outqueue);
7173 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
7174 (stcb->asoc.idata_supported == 0) &&
7175 (strq->last_msg_incomplete)) {
7176 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
7178 strq->last_msg_incomplete);
7179 strq->last_msg_incomplete = 0;
7183 SCTP_TCB_SEND_UNLOCK(stcb);
7188 if ((sp->msg_is_complete) && (sp->length == 0)) {
7189 if (sp->sender_all_done) {
7191 * We are doing differed cleanup. Last time through
7192 * when we took all the data the sender_all_done was
7195 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
7196 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
7197 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7198 sp->sender_all_done,
7200 sp->msg_is_complete,
7204 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
7205 SCTP_TCB_SEND_LOCK(stcb);
7208 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7209 TAILQ_REMOVE(&strq->outqueue, sp, next);
7210 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7211 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7212 (strq->chunks_on_queues == 0) &&
7213 TAILQ_EMPTY(&strq->outqueue)) {
7214 stcb->asoc.trigger_reset = 1;
7217 sctp_free_remote_addr(sp->net);
7221 sctp_m_freem(sp->data);
7224 sctp_free_a_strmoq(stcb, sp, so_locked);
7225 /* we can't be locked to it */
7227 SCTP_TCB_SEND_UNLOCK(stcb);
7230 /* back to get the next msg */
7234 * sender just finished this but still holds a
7242 /* is there some to get */
7243 if (sp->length == 0) {
7248 } else if (sp->discard_rest) {
7249 if (send_lock_up == 0) {
7250 SCTP_TCB_SEND_LOCK(stcb);
7253 /* Whack down the size */
7254 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
7255 if ((stcb->sctp_socket != NULL) &&
7256 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
7257 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
7258 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
7261 sctp_m_freem(sp->data);
7263 sp->tail_mbuf = NULL;
7272 some_taken = sp->some_taken;
7274 length = sp->length;
7275 if (sp->msg_is_complete) {
7276 /* The message is complete */
7277 to_move = min(length, frag_point);
7278 if (to_move == length) {
7279 /* All of it fits in the MTU */
7280 if (sp->some_taken) {
7281 rcv_flags |= SCTP_DATA_LAST_FRAG;
7283 rcv_flags |= SCTP_DATA_NOT_FRAG;
7285 sp->put_last_out = 1;
7286 if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
7287 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7290 /* Not all of it fits, we fragment */
7291 if (sp->some_taken == 0) {
7292 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7297 to_move = sctp_can_we_split_this(stcb, length, space_left, frag_point, eeor_mode);
7300 * We use a snapshot of length in case it
7301 * is expanding during the compare.
7306 if (to_move >= llen) {
7308 if (send_lock_up == 0) {
7310 * We are taking all of an incomplete msg
7311 * thus we need a send lock.
7313 SCTP_TCB_SEND_LOCK(stcb);
7315 if (sp->msg_is_complete) {
7317 * the sender finished the
7324 if (sp->some_taken == 0) {
7325 rcv_flags |= SCTP_DATA_FIRST_FRAG;
7329 /* Nothing to take. */
7336 /* If we reach here, we can copy out a chunk */
7337 sctp_alloc_a_chunk(stcb, chk);
7339 /* No chunk memory */
7345 * Setup for unordered if needed by looking at the user sent info
7348 if (sp->sinfo_flags & SCTP_UNORDERED) {
7349 rcv_flags |= SCTP_DATA_UNORDERED;
7351 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
7352 (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
7353 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
7355 /* clear out the chunk before setting up */
7356 memset(chk, 0, sizeof(*chk));
7357 chk->rec.data.rcv_flags = rcv_flags;
7359 if (to_move >= length) {
7360 /* we think we can steal the whole thing */
7361 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
7362 SCTP_TCB_SEND_LOCK(stcb);
7365 if (to_move < sp->length) {
7366 /* bail, it changed */
7369 chk->data = sp->data;
7370 chk->last_mbuf = sp->tail_mbuf;
7371 /* register the stealing */
7372 sp->data = sp->tail_mbuf = NULL;
7377 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
7378 chk->last_mbuf = NULL;
7379 if (chk->data == NULL) {
7380 sp->some_taken = some_taken;
7381 sctp_free_a_chunk(stcb, chk, so_locked);
7386 #ifdef SCTP_MBUF_LOGGING
7387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
7388 sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
7391 /* Pull off the data */
7392 m_adj(sp->data, to_move);
7393 /* Now lets work our way down and compact it */
7395 while (m && (SCTP_BUF_LEN(m) == 0)) {
7396 sp->data = SCTP_BUF_NEXT(m);
7397 SCTP_BUF_NEXT(m) = NULL;
7398 if (sp->tail_mbuf == m) {
7400 * Freeing tail? TSNH since
7401 * we supposedly were taking less
7402 * than the sp->length.
7405 panic("Huh, freing tail? - TSNH");
7407 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
7408 sp->tail_mbuf = sp->data = NULL;
7417 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
7418 chk->copy_by_ref = 1;
7420 chk->copy_by_ref = 0;
7423 * get last_mbuf and counts of mb usage This is ugly but hopefully
7424 * its only one mbuf.
7426 if (chk->last_mbuf == NULL) {
7427 chk->last_mbuf = chk->data;
7428 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
7429 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
7432 if (to_move > length) {
7433 /*- This should not happen either
7434 * since we always lower to_move to the size
7435 * of sp->length if its larger.
7438 panic("Huh, how can to_move be larger?");
7440 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7444 atomic_subtract_int(&sp->length, to_move);
7446 leading = SCTP_DATA_CHUNK_OVERHEAD(stcb);
7447 if (M_LEADINGSPACE(chk->data) < leading) {
7448 /* Not enough room for a chunk header, get some */
7451 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
7454 * we're in trouble here. _PREPEND below will free
7455 * all the data if there is no leading space, so we
7456 * must put the data back and restore.
7458 if (send_lock_up == 0) {
7459 SCTP_TCB_SEND_LOCK(stcb);
7462 if (sp->data == NULL) {
7463 /* unsteal the data */
7464 sp->data = chk->data;
7465 sp->tail_mbuf = chk->last_mbuf;
7469 /* reassemble the data */
7471 sp->data = chk->data;
7472 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7474 sp->some_taken = some_taken;
7475 atomic_add_int(&sp->length, to_move);
7478 sctp_free_a_chunk(stcb, chk, so_locked);
7482 SCTP_BUF_LEN(m) = 0;
7483 SCTP_BUF_NEXT(m) = chk->data;
7485 M_ALIGN(chk->data, 4);
7488 SCTP_BUF_PREPEND(chk->data, SCTP_DATA_CHUNK_OVERHEAD(stcb), M_NOWAIT);
7489 if (chk->data == NULL) {
7490 /* HELP, TSNH since we assured it would not above? */
7492 panic("prepend failes HELP?");
7494 SCTP_PRINTF("prepend fails HELP?\n");
7495 sctp_free_a_chunk(stcb, chk, so_locked);
7501 sctp_snd_sb_alloc(stcb, SCTP_DATA_CHUNK_OVERHEAD(stcb));
7502 chk->book_size = chk->send_size = (uint16_t)(to_move + SCTP_DATA_CHUNK_OVERHEAD(stcb));
7503 chk->book_size_scale = 0;
7504 chk->sent = SCTP_DATAGRAM_UNSENT;
7507 chk->asoc = &stcb->asoc;
7508 chk->pad_inplace = 0;
7509 chk->no_fr_allowed = 0;
7510 if (stcb->asoc.idata_supported == 0) {
7511 if (rcv_flags & SCTP_DATA_UNORDERED) {
7512 /* Just use 0. The receiver ignores the values. */
7513 chk->rec.data.mid = 0;
7515 chk->rec.data.mid = strq->next_mid_ordered;
7516 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7517 strq->next_mid_ordered++;
7521 if (rcv_flags & SCTP_DATA_UNORDERED) {
7522 chk->rec.data.mid = strq->next_mid_unordered;
7523 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7524 strq->next_mid_unordered++;
7527 chk->rec.data.mid = strq->next_mid_ordered;
7528 if (rcv_flags & SCTP_DATA_LAST_FRAG) {
7529 strq->next_mid_ordered++;
7533 chk->rec.data.sid = sp->sid;
7534 chk->rec.data.ppid = sp->ppid;
7535 chk->rec.data.context = sp->context;
7536 chk->rec.data.doing_fast_retransmit = 0;
7538 chk->rec.data.timetodrop = sp->ts;
7539 chk->flags = sp->act_flags;
7542 chk->whoTo = sp->net;
7543 atomic_add_int(&chk->whoTo->ref_count, 1);
7547 if (sp->holds_key_ref) {
7548 chk->auth_keyid = sp->auth_keyid;
7549 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7550 chk->holds_key_ref = 1;
7552 chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
7553 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7554 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7555 (uint32_t)(uintptr_t)stcb, sp->length,
7556 (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
7559 if (stcb->asoc.idata_supported == 0) {
7560 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7562 ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
7565 * Put the rest of the things in place now. Size was done earlier in
7566 * previous loop prior to padding.
7569 #ifdef SCTP_ASOCLOG_OF_TSNS
7570 SCTP_TCB_LOCK_ASSERT(stcb);
7571 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7572 asoc->tsn_out_at = 0;
7573 asoc->tsn_out_wrapped = 1;
7575 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
7576 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
7577 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
7578 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7579 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7580 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7581 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7582 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7585 if (stcb->asoc.idata_supported == 0) {
7586 dchkh->ch.chunk_type = SCTP_DATA;
7587 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7588 dchkh->dp.tsn = htonl(chk->rec.data.tsn);
7589 dchkh->dp.sid = htons(strq->sid);
7590 dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
7591 dchkh->dp.ppid = chk->rec.data.ppid;
7592 dchkh->ch.chunk_length = htons(chk->send_size);
7594 ndchkh->ch.chunk_type = SCTP_IDATA;
7595 ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7596 ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
7597 ndchkh->dp.sid = htons(strq->sid);
7598 ndchkh->dp.reserved = htons(0);
7599 ndchkh->dp.mid = htonl(chk->rec.data.mid);
7601 ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
7603 ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
7605 ndchkh->ch.chunk_length = htons(chk->send_size);
7607 /* Now advance the chk->send_size by the actual pad needed. */
7608 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7613 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7614 lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
7616 chk->last_mbuf = lm;
7617 chk->pad_inplace = 1;
7619 chk->send_size += pads;
7621 if (PR_SCTP_ENABLED(chk->flags)) {
7622 asoc->pr_sctp_cnt++;
7624 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7625 /* All done pull and kill the message */
7626 if (sp->put_last_out == 0) {
7627 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7628 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7629 sp->sender_all_done,
7631 sp->msg_is_complete,
7635 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7636 SCTP_TCB_SEND_LOCK(stcb);
7639 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7640 TAILQ_REMOVE(&strq->outqueue, sp, next);
7641 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
7642 if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
7643 (strq->chunks_on_queues == 0) &&
7644 TAILQ_EMPTY(&strq->outqueue)) {
7645 stcb->asoc.trigger_reset = 1;
7648 sctp_free_remote_addr(sp->net);
7652 sctp_m_freem(sp->data);
7655 sctp_free_a_strmoq(stcb, sp, so_locked);
7657 asoc->chunks_on_out_queue++;
7658 strq->chunks_on_queues++;
7659 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7660 asoc->send_queue_cnt++;
7663 SCTP_TCB_SEND_UNLOCK(stcb);
7670 sctp_fill_outqueue(struct sctp_tcb *stcb,
7671 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
7672 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7677 struct sctp_association *asoc;
7678 struct sctp_stream_out *strq;
7679 uint32_t space_left, moved, total_moved;
7682 SCTP_TCB_LOCK_ASSERT(stcb);
7685 switch (net->ro._l_addr.sa.sa_family) {
7688 space_left = net->mtu - SCTP_MIN_V4_OVERHEAD;
7693 space_left = net->mtu - SCTP_MIN_OVERHEAD;
7698 space_left = net->mtu;
7701 /* Need an allowance for the data chunk header too */
7702 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7704 /* must make even word boundary */
7705 space_left &= 0xfffffffc;
7706 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7709 while ((space_left > 0) && (strq != NULL)) {
7710 moved = sctp_move_to_outqueue(stcb, strq, space_left, frag_point,
7711 &giveup, eeor_mode, &bail, so_locked);
7712 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved);
7713 if ((giveup != 0) || (bail != 0)) {
7716 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
7717 total_moved += moved;
7718 space_left -= moved;
7719 if (space_left >= SCTP_DATA_CHUNK_OVERHEAD(stcb)) {
7720 space_left -= SCTP_DATA_CHUNK_OVERHEAD(stcb);
7724 space_left &= 0xfffffffc;
7729 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
7731 if (total_moved == 0) {
7732 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7733 (net == stcb->asoc.primary_destination)) {
7734 /* ran dry for primary network net */
7735 SCTP_STAT_INCR(sctps_primary_randry);
7736 } else if (stcb->asoc.sctp_cmt_on_off > 0) {
7737 /* ran dry with CMT on */
7738 SCTP_STAT_INCR(sctps_cmt_randry);
7744 sctp_fix_ecn_echo(struct sctp_association *asoc)
7746 struct sctp_tmit_chunk *chk;
7748 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7749 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7750 chk->sent = SCTP_DATAGRAM_UNSENT;
7756 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
7758 struct sctp_association *asoc;
7759 struct sctp_tmit_chunk *chk;
7760 struct sctp_stream_queue_pending *sp;
7767 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
7768 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
7769 if (sp->net == net) {
7770 sctp_free_remote_addr(sp->net);
7775 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7776 if (chk->whoTo == net) {
7777 sctp_free_remote_addr(chk->whoTo);
7784 sctp_med_chunk_output(struct sctp_inpcb *inp,
7785 struct sctp_tcb *stcb,
7786 struct sctp_association *asoc,
7789 int control_only, int from_where,
7790 struct timeval *now, int *now_filled, int frag_point, int so_locked
7791 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7797 * Ok this is the generic chunk service queue. we must do the
7799 * - Service the stream queue that is next, moving any
7800 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7801 * LAST to the out queue in one pass) and assigning TSN's. This
7802 * only applys though if the peer does not support NDATA. For NDATA
7803 * chunks its ok to not send the entire message ;-)
7804 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
7805 * fomulate and send the low level chunks. Making sure to combine
7806 * any control in the control chunk queue also.
7808 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
7809 struct mbuf *outchain, *endoutchain;
7810 struct sctp_tmit_chunk *chk, *nchk;
7812 /* temp arrays for unlinking */
7813 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7814 int no_fragmentflg, error;
7815 unsigned int max_rwnd_per_dest, max_send_per_dest;
7816 int one_chunk, hbflag, skip_data_for_this_net;
7817 int asconf, cookie, no_out_cnt;
7818 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7819 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7821 uint32_t auth_offset = 0;
7822 struct sctp_auth_chunk *auth = NULL;
7823 uint16_t auth_keyid;
7824 int override_ok = 1;
7825 int skip_fill_up = 0;
7826 int data_auth_reqd = 0;
7829 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7836 auth_keyid = stcb->asoc.authinfo.active_keyid;
7837 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7838 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7839 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7844 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7846 * First lets prime the pump. For each destination, if there is room
7847 * in the flight size, attempt to pull an MTU's worth out of the
7848 * stream queues into the general send_queue
7850 #ifdef SCTP_AUDITING_ENABLED
7851 sctp_audit_log(0xC2, 2);
7853 SCTP_TCB_LOCK_ASSERT(stcb);
7860 /* Nothing to possible to send? */
7861 if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
7862 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
7863 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7864 TAILQ_EMPTY(&asoc->send_queue) &&
7865 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
7870 if (asoc->peers_rwnd == 0) {
7871 /* No room in peers rwnd */
7873 if (asoc->total_flight > 0) {
7874 /* we are allowed one chunk in flight */
7878 if (stcb->asoc.ecn_echo_cnt_onq) {
7879 /* Record where a sack goes, if any */
7880 if (no_data_chunks &&
7881 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
7882 /* Nothing but ECNe to send - we don't do that */
7883 goto nothing_to_send;
7885 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7886 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7887 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7888 sack_goes_to = chk->whoTo;
7893 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7894 if (stcb->sctp_socket)
7895 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
7897 max_send_per_dest = 0;
7898 if (no_data_chunks == 0) {
7899 /* How many non-directed chunks are there? */
7900 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7901 if (chk->whoTo == NULL) {
7903 * We already have non-directed chunks on
7904 * the queue, no need to do a fill-up.
7912 if ((no_data_chunks == 0) &&
7913 (skip_fill_up == 0) &&
7914 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
7915 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7917 * This for loop we are in takes in each net, if
7918 * its's got space in cwnd and has data sent to it
7919 * (when CMT is off) then it calls
7920 * sctp_fill_outqueue for the net. This gets data on
7921 * the send queue for that network.
7923 * In sctp_fill_outqueue TSN's are assigned and data
7924 * is copied out of the stream buffers. Note mostly
7925 * copy by reference (we hope).
7927 net->window_probe = 0;
7928 if ((net != stcb->asoc.alternate) &&
7929 ((net->dest_state & SCTP_ADDR_PF) ||
7930 (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
7931 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
7932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7933 sctp_log_cwnd(stcb, net, 1,
7934 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7938 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
7939 (net->flight_size == 0)) {
7940 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
7942 if (net->flight_size >= net->cwnd) {
7943 /* skip this network, no room - can't fill */
7944 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7945 sctp_log_cwnd(stcb, net, 3,
7946 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7950 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7951 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7953 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
7955 /* memory alloc failure */
7961 /* now service each destination and send out what we can for it */
7962 /* Nothing to send? */
7963 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7964 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7965 TAILQ_EMPTY(&asoc->send_queue)) {
7969 if (asoc->sctp_cmt_on_off > 0) {
7970 /* get the last start point */
7971 start_at = asoc->last_net_cmt_send_started;
7972 if (start_at == NULL) {
7973 /* null so to beginning */
7974 start_at = TAILQ_FIRST(&asoc->nets);
7976 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7977 if (start_at == NULL) {
7978 start_at = TAILQ_FIRST(&asoc->nets);
7981 asoc->last_net_cmt_send_started = start_at;
7983 start_at = TAILQ_FIRST(&asoc->nets);
7985 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7986 if (chk->whoTo == NULL) {
7987 if (asoc->alternate) {
7988 chk->whoTo = asoc->alternate;
7990 chk->whoTo = asoc->primary_destination;
7992 atomic_add_int(&chk->whoTo->ref_count, 1);
7995 old_start_at = NULL;
7996 again_one_more_time:
7997 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7998 /* how much can we send? */
7999 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
8000 if (old_start_at && (old_start_at == net)) {
8001 /* through list ocmpletely. */
8005 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8006 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
8007 (net->flight_size >= net->cwnd)) {
8009 * Nothing on control or asconf and flight is full,
8010 * we can skip even in the CMT case.
8015 endoutchain = outchain = NULL;
8018 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8019 skip_data_for_this_net = 1;
8021 skip_data_for_this_net = 0;
8023 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8026 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8031 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8041 if (mtu > asoc->peers_rwnd) {
8042 if (asoc->total_flight > 0) {
8043 /* We have a packet in flight somewhere */
8044 r_mtu = asoc->peers_rwnd;
8046 /* We are always allowed to send one MTU out */
8054 /************************/
8055 /* ASCONF transmission */
8056 /************************/
8057 /* Now first lets go through the asconf queue */
8058 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
8059 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
8062 if (chk->whoTo == NULL) {
8063 if (asoc->alternate == NULL) {
8064 if (asoc->primary_destination != net) {
8068 if (asoc->alternate != net) {
8073 if (chk->whoTo != net) {
8077 if (chk->data == NULL) {
8080 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
8081 chk->sent != SCTP_DATAGRAM_RESEND) {
8085 * if no AUTH is yet included and this chunk
8086 * requires it, make sure to account for it. We
8087 * don't apply the size until the AUTH chunk is
8088 * actually added below in case there is no room for
8089 * this chunk. NOTE: we overload the use of "omtu"
8092 if ((auth == NULL) &&
8093 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8094 stcb->asoc.peer_auth_chunks)) {
8095 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8098 /* Here we do NOT factor the r_mtu */
8099 if ((chk->send_size < (int)(mtu - omtu)) ||
8100 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8102 * We probably should glom the mbuf chain
8103 * from the chk->data for control but the
8104 * problem is it becomes yet one more level
8105 * of tracking to do if for some reason
8106 * output fails. Then I have got to
8107 * reconstruct the merged control chain.. el
8108 * yucko.. for now we take the easy way and
8112 * Add an AUTH chunk, if chunk requires it
8113 * save the offset into the chain for AUTH
8115 if ((auth == NULL) &&
8116 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8117 stcb->asoc.peer_auth_chunks))) {
8118 outchain = sctp_add_auth_chunk(outchain,
8123 chk->rec.chunk_id.id);
8124 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8126 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8127 (int)chk->rec.chunk_id.can_take_data,
8128 chk->send_size, chk->copy_by_ref);
8129 if (outchain == NULL) {
8131 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8134 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8135 /* update our MTU size */
8136 if (mtu > (chk->send_size + omtu))
8137 mtu -= (chk->send_size + omtu);
8140 to_out += (chk->send_size + omtu);
8141 /* Do clear IP_DF ? */
8142 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8145 if (chk->rec.chunk_id.can_take_data)
8148 * set hb flag since we can use these for
8154 * should sysctl this: don't bundle data
8155 * with ASCONF since it requires AUTH
8158 chk->sent = SCTP_DATAGRAM_SENT;
8159 if (chk->whoTo == NULL) {
8161 atomic_add_int(&net->ref_count, 1);
8166 * Ok we are out of room but we can
8167 * output without effecting the
8168 * flight size since this little guy
8169 * is a control only packet.
8171 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8173 * do NOT clear the asconf flag as
8174 * it is used to do appropriate
8175 * source address selection.
8177 if (*now_filled == 0) {
8178 (void)SCTP_GETTIME_TIMEVAL(now);
8181 net->last_sent_time = *now;
8183 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8184 (struct sockaddr *)&net->ro._l_addr,
8185 outchain, auth_offset, auth,
8186 stcb->asoc.authinfo.active_keyid,
8187 no_fragmentflg, 0, asconf,
8188 inp->sctp_lport, stcb->rport,
8189 htonl(stcb->asoc.peer_vtag),
8194 * error, we could not
8197 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8198 if (from_where == 0) {
8199 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8201 if (error == ENOBUFS) {
8202 asoc->ifp_had_enobuf = 1;
8203 SCTP_STAT_INCR(sctps_lowlevelerr);
8205 /* error, could not output */
8206 if (error == EHOSTUNREACH) {
8212 sctp_move_chunks_from_net(stcb, net);
8217 asoc->ifp_had_enobuf = 0;
8220 * increase the number we sent, if a
8221 * cookie is sent we don't tell them
8224 outchain = endoutchain = NULL;
8228 *num_out += ctl_cnt;
8229 /* recalc a clean slate and setup */
8230 switch (net->ro._l_addr.sa.sa_family) {
8233 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8238 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8255 /************************/
8256 /* Control transmission */
8257 /************************/
8258 /* Now first lets go through the control queue */
8259 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
8260 if ((sack_goes_to) &&
8261 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
8262 (chk->whoTo != sack_goes_to)) {
8264 * if we have a sack in queue, and we are
8265 * looking at an ecn echo that is NOT queued
8266 * to where the sack is going..
8268 if (chk->whoTo == net) {
8270 * Don't transmit it to where its
8271 * going (current net)
8274 } else if (sack_goes_to == net) {
8276 * But do transmit it to this
8279 goto skip_net_check;
8282 if (chk->whoTo == NULL) {
8283 if (asoc->alternate == NULL) {
8284 if (asoc->primary_destination != net) {
8288 if (asoc->alternate != net) {
8293 if (chk->whoTo != net) {
8298 if (chk->data == NULL) {
8301 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
8303 * It must be unsent. Cookies and ASCONF's
8304 * hang around but there timers will force
8305 * when marked for resend.
8310 * if no AUTH is yet included and this chunk
8311 * requires it, make sure to account for it. We
8312 * don't apply the size until the AUTH chunk is
8313 * actually added below in case there is no room for
8314 * this chunk. NOTE: we overload the use of "omtu"
8317 if ((auth == NULL) &&
8318 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8319 stcb->asoc.peer_auth_chunks)) {
8320 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8323 /* Here we do NOT factor the r_mtu */
8324 if ((chk->send_size <= (int)(mtu - omtu)) ||
8325 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
8327 * We probably should glom the mbuf chain
8328 * from the chk->data for control but the
8329 * problem is it becomes yet one more level
8330 * of tracking to do if for some reason
8331 * output fails. Then I have got to
8332 * reconstruct the merged control chain.. el
8333 * yucko.. for now we take the easy way and
8337 * Add an AUTH chunk, if chunk requires it
8338 * save the offset into the chain for AUTH
8340 if ((auth == NULL) &&
8341 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8342 stcb->asoc.peer_auth_chunks))) {
8343 outchain = sctp_add_auth_chunk(outchain,
8348 chk->rec.chunk_id.id);
8349 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8351 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
8352 (int)chk->rec.chunk_id.can_take_data,
8353 chk->send_size, chk->copy_by_ref);
8354 if (outchain == NULL) {
8356 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8359 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8360 /* update our MTU size */
8361 if (mtu > (chk->send_size + omtu))
8362 mtu -= (chk->send_size + omtu);
8365 to_out += (chk->send_size + omtu);
8366 /* Do clear IP_DF ? */
8367 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8370 if (chk->rec.chunk_id.can_take_data)
8372 /* Mark things to be removed, if needed */
8373 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8374 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
8375 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
8376 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
8377 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
8378 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
8379 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
8380 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
8381 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
8382 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
8383 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
8384 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
8387 /* remove these chunks at the end */
8388 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
8389 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
8390 /* turn off the timer */
8391 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8392 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8394 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
8400 * Other chunks, since they have
8401 * timers running (i.e. COOKIE) we
8402 * just "trust" that it gets sent or
8406 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8409 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8411 * Increment ecne send count
8412 * here this means we may be
8413 * over-zealous in our
8414 * counting if the send
8415 * fails, but its the best
8416 * place to do it (we used
8417 * to do it in the queue of
8418 * the chunk, but that did
8419 * not tell how many times
8422 SCTP_STAT_INCR(sctps_sendecne);
8424 chk->sent = SCTP_DATAGRAM_SENT;
8425 if (chk->whoTo == NULL) {
8427 atomic_add_int(&net->ref_count, 1);
8433 * Ok we are out of room but we can
8434 * output without effecting the
8435 * flight size since this little guy
8436 * is a control only packet.
8439 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
8441 * do NOT clear the asconf
8442 * flag as it is used to do
8443 * appropriate source
8444 * address selection.
8448 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8451 /* Only HB or ASCONF advances time */
8453 if (*now_filled == 0) {
8454 (void)SCTP_GETTIME_TIMEVAL(now);
8457 net->last_sent_time = *now;
8460 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8461 (struct sockaddr *)&net->ro._l_addr,
8464 stcb->asoc.authinfo.active_keyid,
8465 no_fragmentflg, 0, asconf,
8466 inp->sctp_lport, stcb->rport,
8467 htonl(stcb->asoc.peer_vtag),
8472 * error, we could not
8475 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8476 if (from_where == 0) {
8477 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8479 if (error == ENOBUFS) {
8480 asoc->ifp_had_enobuf = 1;
8481 SCTP_STAT_INCR(sctps_lowlevelerr);
8483 if (error == EHOSTUNREACH) {
8489 sctp_move_chunks_from_net(stcb, net);
8494 asoc->ifp_had_enobuf = 0;
8497 * increase the number we sent, if a
8498 * cookie is sent we don't tell them
8501 outchain = endoutchain = NULL;
8505 *num_out += ctl_cnt;
8506 /* recalc a clean slate and setup */
8507 switch (net->ro._l_addr.sa.sa_family) {
8510 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8515 mtu = net->mtu - SCTP_MIN_OVERHEAD;
8532 /* JRI: if dest is in PF state, do not send data to it */
8533 if ((asoc->sctp_cmt_on_off > 0) &&
8534 (net != stcb->asoc.alternate) &&
8535 (net->dest_state & SCTP_ADDR_PF)) {
8538 if (net->flight_size >= net->cwnd) {
8541 if ((asoc->sctp_cmt_on_off > 0) &&
8542 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
8543 (net->flight_size > max_rwnd_per_dest)) {
8547 * We need a specific accounting for the usage of the send
8548 * buffer. We also need to check the number of messages per
8549 * net. For now, this is better than nothing and it disabled
8552 if ((asoc->sctp_cmt_on_off > 0) &&
8553 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
8554 (max_send_per_dest > 0) &&
8555 (net->flight_size > max_send_per_dest)) {
8558 /*********************/
8559 /* Data transmission */
8560 /*********************/
8562 * if AUTH for DATA is required and no AUTH has been added
8563 * yet, account for this in the mtu now... if no data can be
8564 * bundled, this adjustment won't matter anyways since the
8565 * packet will be going out...
8567 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8568 stcb->asoc.peer_auth_chunks);
8569 if (data_auth_reqd && (auth == NULL)) {
8570 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8572 /* now lets add any data within the MTU constraints */
8573 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8576 if (net->mtu > SCTP_MIN_V4_OVERHEAD)
8577 omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8584 if (net->mtu > SCTP_MIN_OVERHEAD)
8585 omtu = net->mtu - SCTP_MIN_OVERHEAD;
8595 if ((((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
8596 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
8597 (skip_data_for_this_net == 0)) ||
8599 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
8600 if (no_data_chunks) {
8601 /* let only control go out */
8605 if (net->flight_size >= net->cwnd) {
8606 /* skip this net, no room for data */
8610 if ((chk->whoTo != NULL) &&
8611 (chk->whoTo != net)) {
8612 /* Don't send the chunk on this net */
8615 if (asoc->sctp_cmt_on_off == 0) {
8616 if ((asoc->alternate) &&
8617 (asoc->alternate != net) &&
8618 (chk->whoTo == NULL)) {
8620 } else if ((net != asoc->primary_destination) &&
8621 (asoc->alternate == NULL) &&
8622 (chk->whoTo == NULL)) {
8626 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8628 * strange, we have a chunk that is
8629 * to big for its destination and
8630 * yet no fragment ok flag.
8631 * Something went wrong when the
8632 * PMTU changed...we did not mark
8633 * this chunk for some reason?? I
8634 * will fix it here by letting IP
8635 * fragment it for now and printing
8636 * a warning. This really should not
8639 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8640 chk->send_size, mtu);
8641 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8643 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8644 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8645 struct sctp_data_chunk *dchkh;
8647 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8648 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8650 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8651 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8652 /* ok we will add this one */
8655 * Add an AUTH chunk, if chunk
8656 * requires it, save the offset into
8657 * the chain for AUTH
8659 if (data_auth_reqd) {
8661 outchain = sctp_add_auth_chunk(outchain,
8667 auth_keyid = chk->auth_keyid;
8669 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8670 } else if (override_ok) {
8675 auth_keyid = chk->auth_keyid;
8677 } else if (auth_keyid != chk->auth_keyid) {
8685 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8686 chk->send_size, chk->copy_by_ref);
8687 if (outchain == NULL) {
8688 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8689 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8690 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8693 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8696 /* upate our MTU size */
8697 /* Do clear IP_DF ? */
8698 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8701 /* unsigned subtraction of mtu */
8702 if (mtu > chk->send_size)
8703 mtu -= chk->send_size;
8706 /* unsigned subtraction of r_mtu */
8707 if (r_mtu > chk->send_size)
8708 r_mtu -= chk->send_size;
8712 to_out += chk->send_size;
8713 if ((to_out > mx_mtu) && no_fragmentflg) {
8715 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8717 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8721 chk->window_probe = 0;
8722 data_list[bundle_at++] = chk;
8723 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8726 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8727 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8728 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8730 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8732 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8733 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8743 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8745 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8746 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8747 data_list[0]->window_probe = 1;
8748 net->window_probe = 1;
8754 * Must be sent in order of the
8755 * TSN's (on a network)
8759 } /* for (chunk gather loop for this net) */
8760 } /* if asoc.state OPEN */
8762 /* Is there something to send for this destination? */
8764 /* We may need to start a control timer or two */
8766 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8769 * do NOT clear the asconf flag as it is
8770 * used to do appropriate source address
8775 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8778 /* must start a send timer if data is being sent */
8779 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8781 * no timer running on this destination
8784 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8786 if (bundle_at || hbflag) {
8787 /* For data/asconf and hb set time */
8788 if (*now_filled == 0) {
8789 (void)SCTP_GETTIME_TIMEVAL(now);
8792 net->last_sent_time = *now;
8794 /* Now send it, if there is anything to send :> */
8795 if ((error = sctp_lowlevel_chunk_output(inp,
8798 (struct sockaddr *)&net->ro._l_addr,
8806 inp->sctp_lport, stcb->rport,
8807 htonl(stcb->asoc.peer_vtag),
8811 /* error, we could not output */
8812 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8813 if (from_where == 0) {
8814 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8816 if (error == ENOBUFS) {
8817 asoc->ifp_had_enobuf = 1;
8818 SCTP_STAT_INCR(sctps_lowlevelerr);
8820 if (error == EHOSTUNREACH) {
8822 * Destination went unreachable
8825 sctp_move_chunks_from_net(stcb, net);
8829 * I add this line to be paranoid. As far as
8830 * I can tell the continue, takes us back to
8831 * the top of the for, but just to make sure
8832 * I will reset these again here.
8834 ctl_cnt = bundle_at = 0;
8835 continue; /* This takes us back to the
8836 * for() for the nets. */
8838 asoc->ifp_had_enobuf = 0;
8844 *num_out += (ctl_cnt + bundle_at);
8847 /* setup for a RTO measurement */
8848 tsns_sent = data_list[0]->rec.data.tsn;
8849 /* fill time if not already filled */
8850 if (*now_filled == 0) {
8851 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8853 *now = asoc->time_last_sent;
8855 asoc->time_last_sent = *now;
8857 if (net->rto_needed) {
8858 data_list[0]->do_rtt = 1;
8859 net->rto_needed = 0;
8861 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8862 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8868 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8869 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8872 if (old_start_at == NULL) {
8873 old_start_at = start_at;
8874 start_at = TAILQ_FIRST(&asoc->nets);
8876 goto again_one_more_time;
8879 * At the end there should be no NON timed chunks hanging on this
8882 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8883 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8885 if ((*num_out == 0) && (*reason_code == 0)) {
8890 sctp_clean_up_ctl(stcb, asoc, so_locked);
8895 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8898 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8899 * the control chunk queue.
8901 struct sctp_chunkhdr *hdr;
8902 struct sctp_tmit_chunk *chk;
8903 struct mbuf *mat, *last_mbuf;
8904 uint32_t chunk_length;
8905 uint16_t padding_length;
8907 SCTP_TCB_LOCK_ASSERT(stcb);
8908 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
8909 if (op_err == NULL) {
8914 for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
8915 chunk_length += SCTP_BUF_LEN(mat);
8916 if (SCTP_BUF_NEXT(mat) == NULL) {
8920 if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
8921 sctp_m_freem(op_err);
8924 padding_length = chunk_length % 4;
8925 if (padding_length != 0) {
8926 padding_length = 4 - padding_length;
8928 if (padding_length != 0) {
8929 if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
8930 sctp_m_freem(op_err);
8934 sctp_alloc_a_chunk(stcb, chk);
8937 sctp_m_freem(op_err);
8940 chk->copy_by_ref = 0;
8941 chk->send_size = (uint16_t)chunk_length;
8942 chk->sent = SCTP_DATAGRAM_UNSENT;
8944 chk->asoc = &stcb->asoc;
8947 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8948 chk->rec.chunk_id.can_take_data = 0;
8949 hdr = mtod(op_err, struct sctp_chunkhdr *);
8950 hdr->chunk_type = SCTP_OPERATION_ERROR;
8951 hdr->chunk_flags = 0;
8952 hdr->chunk_length = htons(chk->send_size);
8953 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8954 chk->asoc->ctrl_queue_cnt++;
8958 sctp_send_cookie_echo(struct mbuf *m,
8960 struct sctp_tcb *stcb,
8961 struct sctp_nets *net)
8964 * pull out the cookie and put it at the front of the control chunk
8968 struct mbuf *cookie;
8969 struct sctp_paramhdr param, *phdr;
8970 struct sctp_chunkhdr *hdr;
8971 struct sctp_tmit_chunk *chk;
8972 uint16_t ptype, plen;
8974 SCTP_TCB_LOCK_ASSERT(stcb);
8975 /* First find the cookie in the param area */
8977 at = offset + sizeof(struct sctp_init_chunk);
8979 phdr = sctp_get_next_param(m, at, ¶m, sizeof(param));
8983 ptype = ntohs(phdr->param_type);
8984 plen = ntohs(phdr->param_length);
8985 if (ptype == SCTP_STATE_COOKIE) {
8988 /* found the cookie */
8989 if ((pad = (plen % 4))) {
8992 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
8993 if (cookie == NULL) {
8997 #ifdef SCTP_MBUF_LOGGING
8998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8999 sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
9004 at += SCTP_SIZE32(plen);
9006 /* ok, we got the cookie lets change it into a cookie echo chunk */
9007 /* first the change from param to cookie */
9008 hdr = mtod(cookie, struct sctp_chunkhdr *);
9009 hdr->chunk_type = SCTP_COOKIE_ECHO;
9010 hdr->chunk_flags = 0;
9011 /* get the chunk stuff now and place it in the FRONT of the queue */
9012 sctp_alloc_a_chunk(stcb, chk);
9015 sctp_m_freem(cookie);
9018 chk->copy_by_ref = 0;
9019 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
9020 chk->rec.chunk_id.can_take_data = 0;
9021 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9022 chk->send_size = plen;
9023 chk->sent = SCTP_DATAGRAM_UNSENT;
9025 chk->asoc = &stcb->asoc;
9028 atomic_add_int(&chk->whoTo->ref_count, 1);
9029 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
9030 chk->asoc->ctrl_queue_cnt++;
9035 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
9039 struct sctp_nets *net)
9042 * take a HB request and make it into a HB ack and send it.
9044 struct mbuf *outchain;
9045 struct sctp_chunkhdr *chdr;
9046 struct sctp_tmit_chunk *chk;
9050 /* must have a net pointer */
9053 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
9054 if (outchain == NULL) {
9055 /* gak out of memory */
9058 #ifdef SCTP_MBUF_LOGGING
9059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9060 sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
9063 chdr = mtod(outchain, struct sctp_chunkhdr *);
9064 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
9065 chdr->chunk_flags = 0;
9066 if (chk_length % 4) {
9068 uint32_t cpthis = 0;
9071 padlen = 4 - (chk_length % 4);
9072 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
9074 sctp_alloc_a_chunk(stcb, chk);
9077 sctp_m_freem(outchain);
9080 chk->copy_by_ref = 0;
9081 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
9082 chk->rec.chunk_id.can_take_data = 1;
9084 chk->send_size = chk_length;
9085 chk->sent = SCTP_DATAGRAM_UNSENT;
9087 chk->asoc = &stcb->asoc;
9088 chk->data = outchain;
9090 atomic_add_int(&chk->whoTo->ref_count, 1);
9091 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9092 chk->asoc->ctrl_queue_cnt++;
9096 sctp_send_cookie_ack(struct sctp_tcb *stcb)
9098 /* formulate and queue a cookie-ack back to sender */
9099 struct mbuf *cookie_ack;
9100 struct sctp_chunkhdr *hdr;
9101 struct sctp_tmit_chunk *chk;
9103 SCTP_TCB_LOCK_ASSERT(stcb);
9105 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
9106 if (cookie_ack == NULL) {
9110 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
9111 sctp_alloc_a_chunk(stcb, chk);
9114 sctp_m_freem(cookie_ack);
9117 chk->copy_by_ref = 0;
9118 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
9119 chk->rec.chunk_id.can_take_data = 1;
9121 chk->send_size = sizeof(struct sctp_chunkhdr);
9122 chk->sent = SCTP_DATAGRAM_UNSENT;
9124 chk->asoc = &stcb->asoc;
9125 chk->data = cookie_ack;
9126 if (chk->asoc->last_control_chunk_from != NULL) {
9127 chk->whoTo = chk->asoc->last_control_chunk_from;
9128 atomic_add_int(&chk->whoTo->ref_count, 1);
9132 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
9133 hdr->chunk_type = SCTP_COOKIE_ACK;
9134 hdr->chunk_flags = 0;
9135 hdr->chunk_length = htons(chk->send_size);
9136 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
9137 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9138 chk->asoc->ctrl_queue_cnt++;
9144 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
9146 /* formulate and queue a SHUTDOWN-ACK back to the sender */
9147 struct mbuf *m_shutdown_ack;
9148 struct sctp_shutdown_ack_chunk *ack_cp;
9149 struct sctp_tmit_chunk *chk;
9151 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9152 if (m_shutdown_ack == NULL) {
9156 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
9157 sctp_alloc_a_chunk(stcb, chk);
9160 sctp_m_freem(m_shutdown_ack);
9163 chk->copy_by_ref = 0;
9164 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
9165 chk->rec.chunk_id.can_take_data = 1;
9167 chk->send_size = sizeof(struct sctp_chunkhdr);
9168 chk->sent = SCTP_DATAGRAM_UNSENT;
9171 chk->asoc = &stcb->asoc;
9172 chk->data = m_shutdown_ack;
9175 atomic_add_int(&chk->whoTo->ref_count, 1);
9177 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
9178 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
9179 ack_cp->ch.chunk_flags = 0;
9180 ack_cp->ch.chunk_length = htons(chk->send_size);
9181 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
9182 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9183 chk->asoc->ctrl_queue_cnt++;
9188 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
9190 /* formulate and queue a SHUTDOWN to the sender */
9191 struct mbuf *m_shutdown;
9192 struct sctp_shutdown_chunk *shutdown_cp;
9193 struct sctp_tmit_chunk *chk;
9195 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
9196 if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
9197 /* We already have a SHUTDOWN queued. Reuse it. */
9199 sctp_free_remote_addr(chk->whoTo);
9206 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
9207 if (m_shutdown == NULL) {
9211 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
9212 sctp_alloc_a_chunk(stcb, chk);
9215 sctp_m_freem(m_shutdown);
9218 chk->copy_by_ref = 0;
9219 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
9220 chk->rec.chunk_id.can_take_data = 1;
9222 chk->send_size = sizeof(struct sctp_shutdown_chunk);
9223 chk->sent = SCTP_DATAGRAM_UNSENT;
9226 chk->asoc = &stcb->asoc;
9227 chk->data = m_shutdown;
9230 atomic_add_int(&chk->whoTo->ref_count, 1);
9232 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
9233 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
9234 shutdown_cp->ch.chunk_flags = 0;
9235 shutdown_cp->ch.chunk_length = htons(chk->send_size);
9236 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9237 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
9238 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9239 chk->asoc->ctrl_queue_cnt++;
9241 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
9244 atomic_add_int(&chk->whoTo->ref_count, 1);
9246 shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
9247 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
9248 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9254 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
9257 * formulate and queue an ASCONF to the peer. ASCONF parameters
9258 * should be queued on the assoc queue.
9260 struct sctp_tmit_chunk *chk;
9261 struct mbuf *m_asconf;
9264 SCTP_TCB_LOCK_ASSERT(stcb);
9266 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
9267 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
9268 /* can't send a new one if there is one in flight already */
9271 /* compose an ASCONF chunk, maximum length is PMTU */
9272 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
9273 if (m_asconf == NULL) {
9276 sctp_alloc_a_chunk(stcb, chk);
9279 sctp_m_freem(m_asconf);
9282 chk->copy_by_ref = 0;
9283 chk->rec.chunk_id.id = SCTP_ASCONF;
9284 chk->rec.chunk_id.can_take_data = 0;
9285 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9286 chk->data = m_asconf;
9287 chk->send_size = len;
9288 chk->sent = SCTP_DATAGRAM_UNSENT;
9290 chk->asoc = &stcb->asoc;
9293 atomic_add_int(&chk->whoTo->ref_count, 1);
9295 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
9296 chk->asoc->ctrl_queue_cnt++;
9301 sctp_send_asconf_ack(struct sctp_tcb *stcb)
9304 * formulate and queue a asconf-ack back to sender. the asconf-ack
9305 * must be stored in the tcb.
9307 struct sctp_tmit_chunk *chk;
9308 struct sctp_asconf_ack *ack, *latest_ack;
9310 struct sctp_nets *net = NULL;
9312 SCTP_TCB_LOCK_ASSERT(stcb);
9313 /* Get the latest ASCONF-ACK */
9314 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
9315 if (latest_ack == NULL) {
9318 if (latest_ack->last_sent_to != NULL &&
9319 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
9320 /* we're doing a retransmission */
9321 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
9324 if (stcb->asoc.last_control_chunk_from == NULL) {
9325 if (stcb->asoc.alternate) {
9326 net = stcb->asoc.alternate;
9328 net = stcb->asoc.primary_destination;
9331 net = stcb->asoc.last_control_chunk_from;
9336 if (stcb->asoc.last_control_chunk_from == NULL) {
9337 if (stcb->asoc.alternate) {
9338 net = stcb->asoc.alternate;
9340 net = stcb->asoc.primary_destination;
9343 net = stcb->asoc.last_control_chunk_from;
9346 latest_ack->last_sent_to = net;
9348 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
9349 if (ack->data == NULL) {
9352 /* copy the asconf_ack */
9353 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
9354 if (m_ack == NULL) {
9355 /* couldn't copy it */
9358 #ifdef SCTP_MBUF_LOGGING
9359 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
9360 sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
9364 sctp_alloc_a_chunk(stcb, chk);
9368 sctp_m_freem(m_ack);
9371 chk->copy_by_ref = 0;
9372 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
9373 chk->rec.chunk_id.can_take_data = 1;
9374 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
9377 atomic_add_int(&chk->whoTo->ref_count, 1);
9380 chk->send_size = ack->len;
9381 chk->sent = SCTP_DATAGRAM_UNSENT;
9383 chk->asoc = &stcb->asoc;
9385 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
9386 chk->asoc->ctrl_queue_cnt++;
9393 sctp_chunk_retransmission(struct sctp_inpcb *inp,
9394 struct sctp_tcb *stcb,
9395 struct sctp_association *asoc,
9396 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
9397 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9403 * send out one MTU of retransmission. If fast_retransmit is
9404 * happening we ignore the cwnd. Otherwise we obey the cwnd and
9405 * rwnd. For a Cookie or Asconf in the control chunk queue we
9406 * retransmit them by themselves.
9408 * For data chunks we will pick out the lowest TSN's in the sent_queue
9409 * marked for resend and bundle them all together (up to a MTU of
9410 * destination). The address to send to should have been
9411 * selected/changed where the retransmission was marked (i.e. in FR
9412 * or t3-timeout routines).
9414 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
9415 struct sctp_tmit_chunk *chk, *fwd;
9416 struct mbuf *m, *endofchain;
9417 struct sctp_nets *net = NULL;
9418 uint32_t tsns_sent = 0;
9419 int no_fragmentflg, bundle_at, cnt_thru;
9421 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
9422 struct sctp_auth_chunk *auth = NULL;
9423 uint32_t auth_offset = 0;
9424 uint16_t auth_keyid;
9425 int override_ok = 1;
9426 int data_auth_reqd = 0;
9429 SCTP_TCB_LOCK_ASSERT(stcb);
9430 tmr_started = ctl_cnt = bundle_at = error = 0;
9435 endofchain = m = NULL;
9436 auth_keyid = stcb->asoc.authinfo.active_keyid;
9437 #ifdef SCTP_AUDITING_ENABLED
9438 sctp_audit_log(0xC3, 1);
9440 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
9441 (TAILQ_EMPTY(&asoc->control_send_queue))) {
9442 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
9443 asoc->sent_queue_retran_cnt);
9444 asoc->sent_queue_cnt = 0;
9445 asoc->sent_queue_cnt_removeable = 0;
9446 /* send back 0/0 so we enter normal transmission */
9450 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9451 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
9452 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
9453 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
9454 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9457 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
9458 if (chk != asoc->str_reset) {
9460 * not eligible for retran if its
9467 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9471 * Add an AUTH chunk, if chunk requires it save the
9472 * offset into the chain for AUTH
9474 if ((auth == NULL) &&
9475 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
9476 stcb->asoc.peer_auth_chunks))) {
9477 m = sctp_add_auth_chunk(m, &endofchain,
9478 &auth, &auth_offset,
9480 chk->rec.chunk_id.id);
9481 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9483 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9489 /* do we have control chunks to retransmit? */
9491 /* Start a timer no matter if we succeed or fail */
9492 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9493 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9494 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9495 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9496 chk->snd_count++; /* update our count */
9497 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9498 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9499 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9500 no_fragmentflg, 0, 0,
9501 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9502 chk->whoTo->port, NULL,
9505 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9506 if (error == ENOBUFS) {
9507 asoc->ifp_had_enobuf = 1;
9508 SCTP_STAT_INCR(sctps_lowlevelerr);
9512 asoc->ifp_had_enobuf = 0;
9518 * We don't want to mark the net->sent time here since this
9519 * we use this for HB and retrans cannot measure RTT
9521 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9523 chk->sent = SCTP_DATAGRAM_SENT;
9524 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9528 /* Clean up the fwd-tsn list */
9529 sctp_clean_up_ctl(stcb, asoc, so_locked);
9534 * Ok, it is just data retransmission we need to do or that and a
9535 * fwd-tsn with it all.
9537 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9538 return (SCTP_RETRAN_DONE);
9540 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9541 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9542 /* not yet open, resend the cookie and that is it */
9545 #ifdef SCTP_AUDITING_ENABLED
9546 sctp_auditing(20, inp, stcb, NULL);
9548 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9549 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9550 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9551 /* No, not sent to this net or not ready for rtx */
9554 if (chk->data == NULL) {
9555 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9556 chk->rec.data.tsn, chk->snd_count, chk->sent);
9559 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9560 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9561 struct mbuf *op_err;
9562 char msg[SCTP_DIAG_INFO_LEN];
9564 snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
9565 chk->rec.data.tsn, chk->snd_count);
9566 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
9568 atomic_add_int(&stcb->asoc.refcnt, 1);
9569 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
9571 SCTP_TCB_LOCK(stcb);
9572 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9573 return (SCTP_RETRAN_EXIT);
9575 /* pick up the net */
9577 switch (net->ro._l_addr.sa.sa_family) {
9580 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9585 mtu = net->mtu - SCTP_MIN_OVERHEAD;
9594 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9595 /* No room in peers rwnd */
9598 tsn = asoc->last_acked_seq + 1;
9599 if (tsn == chk->rec.data.tsn) {
9601 * we make a special exception for this
9602 * case. The peer has no rwnd but is missing
9603 * the lowest chunk.. which is probably what
9604 * is holding up the rwnd.
9606 goto one_chunk_around;
9611 if (asoc->peers_rwnd < mtu) {
9613 if ((asoc->peers_rwnd == 0) &&
9614 (asoc->total_flight == 0)) {
9615 chk->window_probe = 1;
9616 chk->whoTo->window_probe = 1;
9619 #ifdef SCTP_AUDITING_ENABLED
9620 sctp_audit_log(0xC3, 2);
9624 net->fast_retran_ip = 0;
9625 if (chk->rec.data.doing_fast_retransmit == 0) {
9627 * if no FR in progress skip destination that have
9628 * flight_size > cwnd.
9630 if (net->flight_size >= net->cwnd) {
9635 * Mark the destination net to have FR recovery
9639 net->fast_retran_ip = 1;
9643 * if no AUTH is yet included and this chunk requires it,
9644 * make sure to account for it. We don't apply the size
9645 * until the AUTH chunk is actually added below in case
9646 * there is no room for this chunk.
9648 if (data_auth_reqd && (auth == NULL)) {
9649 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9653 if ((chk->send_size <= (mtu - dmtu)) ||
9654 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9655 /* ok we will add this one */
9656 if (data_auth_reqd) {
9658 m = sctp_add_auth_chunk(m,
9664 auth_keyid = chk->auth_keyid;
9666 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9667 } else if (override_ok) {
9668 auth_keyid = chk->auth_keyid;
9670 } else if (chk->auth_keyid != auth_keyid) {
9671 /* different keyid, so done bundling */
9675 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9677 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9680 /* Do clear IP_DF ? */
9681 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9684 /* upate our MTU size */
9685 if (mtu > (chk->send_size + dmtu))
9686 mtu -= (chk->send_size + dmtu);
9689 data_list[bundle_at++] = chk;
9690 if (one_chunk && (asoc->total_flight <= 0)) {
9691 SCTP_STAT_INCR(sctps_windowprobed);
9694 if (one_chunk == 0) {
9696 * now are there anymore forward from chk to pick
9699 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
9700 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9701 /* Nope, not for retran */
9704 if (fwd->whoTo != net) {
9705 /* Nope, not the net in question */
9708 if (data_auth_reqd && (auth == NULL)) {
9709 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9712 if (fwd->send_size <= (mtu - dmtu)) {
9713 if (data_auth_reqd) {
9715 m = sctp_add_auth_chunk(m,
9721 auth_keyid = fwd->auth_keyid;
9723 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9724 } else if (override_ok) {
9725 auth_keyid = fwd->auth_keyid;
9727 } else if (fwd->auth_keyid != auth_keyid) {
9735 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9737 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9740 /* Do clear IP_DF ? */
9741 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9744 /* upate our MTU size */
9745 if (mtu > (fwd->send_size + dmtu))
9746 mtu -= (fwd->send_size + dmtu);
9749 data_list[bundle_at++] = fwd;
9750 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9754 /* can't fit so we are done */
9759 /* Is there something to send for this destination? */
9762 * No matter if we fail/or succeed we should start a
9763 * timer. A failure is like a lost IP packet :-)
9765 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9767 * no timer running on this destination
9770 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9773 /* Now lets send it, if there is anything to send :> */
9774 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9775 (struct sockaddr *)&net->ro._l_addr, m,
9776 auth_offset, auth, auth_keyid,
9777 no_fragmentflg, 0, 0,
9778 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9782 /* error, we could not output */
9783 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
9784 if (error == ENOBUFS) {
9785 asoc->ifp_had_enobuf = 1;
9786 SCTP_STAT_INCR(sctps_lowlevelerr);
9790 asoc->ifp_had_enobuf = 0;
9797 * We don't want to mark the net->sent time here
9798 * since this we use this for HB and retrans cannot
9801 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9803 /* For auto-close */
9805 if (*now_filled == 0) {
9806 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9807 *now = asoc->time_last_sent;
9810 asoc->time_last_sent = *now;
9812 *cnt_out += bundle_at;
9813 #ifdef SCTP_AUDITING_ENABLED
9814 sctp_audit_log(0xC4, bundle_at);
9817 tsns_sent = data_list[0]->rec.data.tsn;
9819 for (i = 0; i < bundle_at; i++) {
9820 SCTP_STAT_INCR(sctps_sendretransdata);
9821 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9823 * When we have a revoked data, and we
9824 * retransmit it, then we clear the revoked
9825 * flag since this flag dictates if we
9826 * subtracted from the fs
9828 if (data_list[i]->rec.data.chunk_was_revoked) {
9829 /* Deflate the cwnd */
9830 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9831 data_list[i]->rec.data.chunk_was_revoked = 0;
9833 data_list[i]->snd_count++;
9834 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9835 /* record the time */
9836 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9837 if (data_list[i]->book_size_scale) {
9839 * need to double the book size on
9842 data_list[i]->book_size_scale = 0;
9844 * Since we double the booksize, we
9845 * must also double the output queue
9846 * size, since this get shrunk when
9847 * we free by this amount.
9849 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9850 data_list[i]->book_size *= 2;
9854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9855 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9856 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9858 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9859 (uint32_t)(data_list[i]->send_size +
9860 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9863 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9864 data_list[i]->whoTo->flight_size,
9865 data_list[i]->book_size,
9866 (uint32_t)(uintptr_t)data_list[i]->whoTo,
9867 data_list[i]->rec.data.tsn);
9869 sctp_flight_size_increase(data_list[i]);
9870 sctp_total_flight_increase(stcb, data_list[i]);
9871 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9872 /* SWS sender side engages */
9873 asoc->peers_rwnd = 0;
9876 (data_list[i]->rec.data.doing_fast_retransmit)) {
9877 SCTP_STAT_INCR(sctps_sendfastretrans);
9878 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9879 (tmr_started == 0)) {
9881 * ok we just fast-retrans'd
9882 * the lowest TSN, i.e the
9883 * first on the list. In
9884 * this case we want to give
9885 * some more time to get a
9886 * SACK back without a
9889 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9890 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
9891 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9895 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9896 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9898 #ifdef SCTP_AUDITING_ENABLED
9899 sctp_auditing(21, inp, stcb, NULL);
9905 if (asoc->sent_queue_retran_cnt <= 0) {
9906 /* all done we have no more to retran */
9907 asoc->sent_queue_retran_cnt = 0;
9911 /* No more room in rwnd */
9914 /* stop the for loop here. we sent out a packet */
9921 sctp_timer_validation(struct sctp_inpcb *inp,
9922 struct sctp_tcb *stcb,
9923 struct sctp_association *asoc)
9925 struct sctp_nets *net;
9927 /* Validate that a timer is running somewhere */
9928 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9929 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9930 /* Here is a timer */
9934 SCTP_TCB_LOCK_ASSERT(stcb);
9935 /* Gak, we did not have a timer somewhere */
9936 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9937 if (asoc->alternate) {
9938 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
9940 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9946 sctp_chunk_output(struct sctp_inpcb *inp,
9947 struct sctp_tcb *stcb,
9950 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9956 * Ok this is the generic chunk service queue. we must do the
9958 * - See if there are retransmits pending, if so we must
9960 * - Service the stream queue that is next, moving any
9961 * message (note I must get a complete message i.e.
9962 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9964 * - Check to see if the cwnd/rwnd allows any output, if so we
9965 * go ahead and fomulate and send the low level chunks. Making sure
9966 * to combine any control in the control chunk queue also.
9968 struct sctp_association *asoc;
9969 struct sctp_nets *net;
9970 int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
9971 unsigned int burst_cnt = 0;
9975 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9978 unsigned int tot_frs = 0;
9982 /* The Nagle algorithm is only applied when handling a send call. */
9983 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9984 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9992 SCTP_TCB_LOCK_ASSERT(stcb);
9994 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9996 if ((un_sent <= 0) &&
9997 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9998 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9999 (asoc->sent_queue_retran_cnt == 0) &&
10000 (asoc->trigger_reset == 0)) {
10001 /* Nothing to do unless there is something to be sent left */
10005 * Do we have something to send, data or control AND a sack timer
10006 * running, if so piggy-back the sack.
10008 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
10009 sctp_send_sack(stcb, so_locked);
10010 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
10012 while (asoc->sent_queue_retran_cnt) {
10014 * Ok, it is retransmission time only, we send out only ONE
10015 * packet with a single call off to the retran code.
10017 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
10019 * Special hook for handling cookiess discarded
10020 * by peer that carried data. Send cookie-ack only
10021 * and then the next call with get the retran's.
10023 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10025 &now, &now_filled, frag_point, so_locked);
10027 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
10028 /* if its not from a HB then do it */
10030 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
10036 * its from any other place, we don't allow retran
10037 * output (only control)
10042 /* Can't send anymore */
10044 * now lets push out control by calling med-level
10045 * output once. this assures that we WILL send HB's
10048 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
10050 &now, &now_filled, frag_point, so_locked);
10051 #ifdef SCTP_AUDITING_ENABLED
10052 sctp_auditing(8, inp, stcb, NULL);
10054 sctp_timer_validation(inp, stcb, asoc);
10059 * The count was off.. retran is not happening so do
10060 * the normal retransmission.
10062 #ifdef SCTP_AUDITING_ENABLED
10063 sctp_auditing(9, inp, stcb, NULL);
10065 if (ret == SCTP_RETRAN_EXIT) {
10070 if (from_where == SCTP_OUTPUT_FROM_T3) {
10071 /* Only one transmission allowed out of a timeout */
10072 #ifdef SCTP_AUDITING_ENABLED
10073 sctp_auditing(10, inp, stcb, NULL);
10075 /* Push out any control */
10076 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
10077 &now, &now_filled, frag_point, so_locked);
10080 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
10081 /* Hit FR burst limit */
10084 if ((num_out == 0) && (ret == 0)) {
10085 /* No more retrans to send */
10089 #ifdef SCTP_AUDITING_ENABLED
10090 sctp_auditing(12, inp, stcb, NULL);
10092 /* Check for bad destinations, if they exist move chunks around. */
10093 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
10094 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
10096 * if possible move things off of this address we
10097 * still may send below due to the dormant state but
10098 * we try to find an alternate address to send to
10099 * and if we have one we move all queued data on the
10100 * out wheel to this alternate address.
10102 if (net->ref_count > 1)
10103 sctp_move_chunks_from_net(stcb, net);
10106 * if ((asoc->sat_network) || (net->addr_is_local))
10107 * { burst_limit = asoc->max_burst *
10108 * SCTP_SAT_NETWORK_BURST_INCR; }
10110 if (asoc->max_burst > 0) {
10111 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
10112 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
10114 * JRS - Use the congestion
10115 * control given in the
10116 * congestion control module
10118 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
10119 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10120 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
10122 SCTP_STAT_INCR(sctps_maxburstqueued);
10124 net->fast_retran_ip = 0;
10126 if (net->flight_size == 0) {
10128 * Should be decaying the
10140 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
10141 &reason_code, 0, from_where,
10142 &now, &now_filled, frag_point, so_locked);
10144 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
10145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10146 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
10148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10149 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
10150 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
10154 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
10156 tot_out += num_out;
10158 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10159 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
10160 if (num_out == 0) {
10161 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
10166 * When the Nagle algorithm is used, look at how
10167 * much is unsent, then if its smaller than an MTU
10168 * and we have data in flight we stop, except if we
10169 * are handling a fragmented user message.
10171 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
10172 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
10173 (stcb->asoc.total_flight > 0)) {
10174 /* && sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
10178 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
10179 TAILQ_EMPTY(&asoc->send_queue) &&
10180 sctp_is_there_unsent_data(stcb, so_locked) == 0) {
10181 /* Nothing left to send */
10184 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
10185 /* Nothing left to send */
10188 } while (num_out &&
10189 ((asoc->max_burst == 0) ||
10190 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
10191 (burst_cnt < asoc->max_burst)));
10193 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
10194 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
10195 SCTP_STAT_INCR(sctps_maxburstqueued);
10196 asoc->burst_limit_applied = 1;
10197 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
10198 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
10201 asoc->burst_limit_applied = 0;
10204 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
10205 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
10207 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
10211 * Now we need to clean up the control chunk chain if a ECNE is on
10212 * it. It must be marked as UNSENT again so next call will continue
10213 * to send it until such time that we get a CWR, to remove it.
10215 if (stcb->asoc.ecn_echo_cnt_onq)
10216 sctp_fix_ecn_echo(asoc);
10218 if (stcb->asoc.trigger_reset) {
10219 if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0) {
10229 struct sctp_inpcb *inp,
10231 struct sockaddr *addr,
10232 struct mbuf *control,
10237 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10240 if (inp->sctp_socket == NULL) {
10241 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
10244 return (sctp_sosend(inp->sctp_socket,
10246 (struct uio *)NULL,
10254 send_forward_tsn(struct sctp_tcb *stcb,
10255 struct sctp_association *asoc)
10257 struct sctp_tmit_chunk *chk, *at, *tp1, *last;
10258 struct sctp_forward_tsn_chunk *fwdtsn;
10259 struct sctp_strseq *strseq;
10260 struct sctp_strseq_mid *strseq_m;
10261 uint32_t advance_peer_ack_point;
10262 unsigned int cnt_of_space, i, ovh;
10263 unsigned int space_needed;
10264 unsigned int cnt_of_skipped = 0;
10266 SCTP_TCB_LOCK_ASSERT(stcb);
10267 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10268 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
10269 /* mark it to unsent */
10270 chk->sent = SCTP_DATAGRAM_UNSENT;
10271 chk->snd_count = 0;
10272 /* Do we correct its output location? */
10274 sctp_free_remote_addr(chk->whoTo);
10277 goto sctp_fill_in_rest;
10280 /* Ok if we reach here we must build one */
10281 sctp_alloc_a_chunk(stcb, chk);
10285 asoc->fwd_tsn_cnt++;
10286 chk->copy_by_ref = 0;
10288 * We don't do the old thing here since this is used not for on-wire
10289 * but to tell if we are sending a fwd-tsn by the stack during
10290 * output. And if its a IFORWARD or a FORWARD it is a fwd-tsn.
10292 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
10293 chk->rec.chunk_id.can_take_data = 0;
10297 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
10298 if (chk->data == NULL) {
10299 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
10302 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10303 chk->sent = SCTP_DATAGRAM_UNSENT;
10304 chk->snd_count = 0;
10305 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
10306 asoc->ctrl_queue_cnt++;
10309 * Here we go through and fill out the part that deals with
10310 * stream/seq of the ones we skip.
10312 SCTP_BUF_LEN(chk->data) = 0;
10313 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10314 if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
10315 (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
10316 /* no more to look at */
10319 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10320 /* We don't report these */
10325 if (asoc->idata_supported) {
10326 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10327 (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
10329 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
10330 (cnt_of_skipped * sizeof(struct sctp_strseq)));
10332 cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
10334 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
10335 ovh = SCTP_MIN_OVERHEAD;
10337 ovh = SCTP_MIN_V4_OVERHEAD;
10339 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
10340 /* trim to a mtu size */
10341 cnt_of_space = asoc->smallest_mtu - ovh;
10343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10344 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10345 0xff, 0, cnt_of_skipped,
10346 asoc->advanced_peer_ack_point);
10348 advance_peer_ack_point = asoc->advanced_peer_ack_point;
10349 if (cnt_of_space < space_needed) {
10351 * ok we must trim down the chunk by lowering the
10352 * advance peer ack point.
10354 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10355 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10356 0xff, 0xff, cnt_of_space,
10359 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
10360 if (asoc->idata_supported) {
10361 cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
10363 cnt_of_skipped /= sizeof(struct sctp_strseq);
10366 * Go through and find the TSN that will be the one
10369 at = TAILQ_FIRST(&asoc->sent_queue);
10371 for (i = 0; i < cnt_of_skipped; i++) {
10372 tp1 = TAILQ_NEXT(at, sctp_next);
10379 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
10380 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
10381 0xff, cnt_of_skipped, at->rec.data.tsn,
10382 asoc->advanced_peer_ack_point);
10386 * last now points to last one I can report, update
10390 advance_peer_ack_point = last->rec.data.tsn;
10392 if (asoc->idata_supported) {
10393 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10394 cnt_of_skipped * sizeof(struct sctp_strseq_mid);
10396 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
10397 cnt_of_skipped * sizeof(struct sctp_strseq);
10400 chk->send_size = space_needed;
10401 /* Setup the chunk */
10402 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
10403 fwdtsn->ch.chunk_length = htons(chk->send_size);
10404 fwdtsn->ch.chunk_flags = 0;
10405 if (asoc->idata_supported) {
10406 fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
10408 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
10410 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
10411 SCTP_BUF_LEN(chk->data) = chk->send_size;
10414 * Move pointer to after the fwdtsn and transfer to the
10417 if (asoc->idata_supported) {
10418 strseq_m = (struct sctp_strseq_mid *)fwdtsn;
10421 strseq = (struct sctp_strseq *)fwdtsn;
10425 * Now populate the strseq list. This is done blindly
10426 * without pulling out duplicate stream info. This is
10427 * inefficent but won't harm the process since the peer will
10428 * look at these in sequence and will thus release anything.
10429 * It could mean we exceed the PMTU and chop off some that
10430 * we could have included.. but this is unlikely (aka 1432/4
10431 * would mean 300+ stream seq's would have to be reported in
10432 * one FWD-TSN. With a bit of work we can later FIX this to
10433 * optimize and pull out duplicates.. but it does add more
10434 * overhead. So for now... not!
10437 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
10438 if (i >= cnt_of_skipped) {
10441 if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
10442 /* We don't report these */
10445 if (at->rec.data.tsn == advance_peer_ack_point) {
10446 at->rec.data.fwd_tsn_cnt = 0;
10448 if (asoc->idata_supported) {
10449 strseq_m->sid = htons(at->rec.data.sid);
10450 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
10451 strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
10453 strseq_m->flags = 0;
10455 strseq_m->mid = htonl(at->rec.data.mid);
10458 strseq->sid = htons(at->rec.data.sid);
10459 strseq->ssn = htons((uint16_t)at->rec.data.mid);
10468 sctp_send_sack(struct sctp_tcb *stcb, int so_locked
10469 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10475 * Queue up a SACK or NR-SACK in the control queue.
10476 * We must first check to see if a SACK or NR-SACK is
10477 * somehow on the control queue.
10478 * If so, we will take and and remove the old one.
10480 struct sctp_association *asoc;
10481 struct sctp_tmit_chunk *chk, *a_chk;
10482 struct sctp_sack_chunk *sack;
10483 struct sctp_nr_sack_chunk *nr_sack;
10484 struct sctp_gap_ack_block *gap_descriptor;
10485 const struct sack_track *selector;
10490 int limit_reached = 0;
10491 unsigned int i, siz, j;
10492 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10495 uint32_t highest_tsn;
10500 if (stcb->asoc.nrsack_supported == 1) {
10501 type = SCTP_NR_SELECTIVE_ACK;
10503 type = SCTP_SELECTIVE_ACK;
10506 asoc = &stcb->asoc;
10507 SCTP_TCB_LOCK_ASSERT(stcb);
10508 if (asoc->last_data_chunk_from == NULL) {
10509 /* Hmm we never received anything */
10512 sctp_slide_mapping_arrays(stcb);
10513 sctp_set_rwnd(stcb, asoc);
10514 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10515 if (chk->rec.chunk_id.id == type) {
10516 /* Hmm, found a sack already on queue, remove it */
10517 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10518 asoc->ctrl_queue_cnt--;
10521 sctp_m_freem(a_chk->data);
10522 a_chk->data = NULL;
10524 if (a_chk->whoTo) {
10525 sctp_free_remote_addr(a_chk->whoTo);
10526 a_chk->whoTo = NULL;
10531 if (a_chk == NULL) {
10532 sctp_alloc_a_chunk(stcb, a_chk);
10533 if (a_chk == NULL) {
10534 /* No memory so we drop the idea, and set a timer */
10535 if (stcb->asoc.delayed_ack) {
10536 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10537 stcb->sctp_ep, stcb, NULL,
10538 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
10539 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10540 stcb->sctp_ep, stcb, NULL);
10542 stcb->asoc.send_sack = 1;
10546 a_chk->copy_by_ref = 0;
10547 a_chk->rec.chunk_id.id = type;
10548 a_chk->rec.chunk_id.can_take_data = 1;
10550 /* Clear our pkt counts */
10551 asoc->data_pkts_seen = 0;
10554 a_chk->asoc = asoc;
10555 a_chk->snd_count = 0;
10556 a_chk->send_size = 0; /* fill in later */
10557 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10558 a_chk->whoTo = NULL;
10560 if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
10562 * Ok, the destination for the SACK is unreachable, lets see if
10563 * we can select an alternate to asoc->last_data_chunk_from
10565 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10566 if (a_chk->whoTo == NULL) {
10567 /* Nope, no alternate */
10568 a_chk->whoTo = asoc->last_data_chunk_from;
10571 a_chk->whoTo = asoc->last_data_chunk_from;
10573 if (a_chk->whoTo) {
10574 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10576 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
10577 highest_tsn = asoc->highest_tsn_inside_map;
10579 highest_tsn = asoc->highest_tsn_inside_nr_map;
10581 if (highest_tsn == asoc->cumulative_tsn) {
10583 if (type == SCTP_SELECTIVE_ACK) {
10584 space_req = sizeof(struct sctp_sack_chunk);
10586 space_req = sizeof(struct sctp_nr_sack_chunk);
10589 /* gaps get a cluster */
10590 space_req = MCLBYTES;
10592 /* Ok now lets formulate a MBUF with our sack */
10593 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
10594 if ((a_chk->data == NULL) ||
10595 (a_chk->whoTo == NULL)) {
10596 /* rats, no mbuf memory */
10598 /* was a problem with the destination */
10599 sctp_m_freem(a_chk->data);
10600 a_chk->data = NULL;
10602 sctp_free_a_chunk(stcb, a_chk, so_locked);
10603 /* sa_ignore NO_NULL_CHK */
10604 if (stcb->asoc.delayed_ack) {
10605 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10606 stcb->sctp_ep, stcb, NULL,
10607 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
10608 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10609 stcb->sctp_ep, stcb, NULL);
10611 stcb->asoc.send_sack = 1;
10615 /* ok, lets go through and fill it in */
10616 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10617 space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
10618 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10619 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10621 limit = mtod(a_chk->data, caddr_t);
10626 if ((asoc->sctp_cmt_on_off > 0) &&
10627 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10629 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10630 * received, then set high bit to 1, else 0. Reset
10633 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10634 asoc->cmt_dac_pkts_rcvd = 0;
10636 #ifdef SCTP_ASOCLOG_OF_TSNS
10637 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10638 stcb->asoc.cumack_log_atsnt++;
10639 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10640 stcb->asoc.cumack_log_atsnt = 0;
10643 /* reset the readers interpretation */
10644 stcb->freed_by_sorcv_sincelast = 0;
10646 if (type == SCTP_SELECTIVE_ACK) {
10647 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10649 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10650 if (highest_tsn > asoc->mapping_array_base_tsn) {
10651 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10653 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10657 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10658 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10659 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10660 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10662 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10666 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10669 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10671 if (((type == SCTP_SELECTIVE_ACK) &&
10672 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
10673 ((type == SCTP_NR_SELECTIVE_ACK) &&
10674 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
10675 /* we have a gap .. maybe */
10676 for (i = 0; i < siz; i++) {
10677 tsn_map = asoc->mapping_array[i];
10678 if (type == SCTP_SELECTIVE_ACK) {
10679 tsn_map |= asoc->nr_mapping_array[i];
10683 * Clear all bits corresponding to TSNs
10684 * smaller or equal to the cumulative TSN.
10686 tsn_map &= (~0U << (1 - offset));
10688 selector = &sack_array[tsn_map];
10689 if (mergeable && selector->right_edge) {
10691 * Backup, left and right edges were ok to
10697 if (selector->num_entries == 0)
10700 for (j = 0; j < selector->num_entries; j++) {
10701 if (mergeable && selector->right_edge) {
10703 * do a merge by NOT setting
10709 * no merge, set the left
10713 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10715 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10718 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10724 if (selector->left_edge) {
10728 if (limit_reached) {
10729 /* Reached the limit stop */
10735 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10736 (limit_reached == 0)) {
10740 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
10741 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10743 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10746 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
10749 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10751 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
10752 /* we have a gap .. maybe */
10753 for (i = 0; i < siz; i++) {
10754 tsn_map = asoc->nr_mapping_array[i];
10757 * Clear all bits corresponding to
10758 * TSNs smaller or equal to the
10761 tsn_map &= (~0U << (1 - offset));
10763 selector = &sack_array[tsn_map];
10764 if (mergeable && selector->right_edge) {
10766 * Backup, left and right edges were
10769 num_nr_gap_blocks--;
10772 if (selector->num_entries == 0)
10775 for (j = 0; j < selector->num_entries; j++) {
10776 if (mergeable && selector->right_edge) {
10778 * do a merge by NOT
10785 * no merge, set the
10789 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10791 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10792 num_nr_gap_blocks++;
10794 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10800 if (selector->left_edge) {
10804 if (limit_reached) {
10805 /* Reached the limit stop */
10812 /* now we must add any dups we are going to report. */
10813 if ((limit_reached == 0) && (asoc->numduptsns)) {
10814 dup = (uint32_t *)gap_descriptor;
10815 for (i = 0; i < asoc->numduptsns; i++) {
10816 *dup = htonl(asoc->dup_tsns[i]);
10819 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10824 asoc->numduptsns = 0;
10827 * now that the chunk is prepared queue it to the control chunk
10830 if (type == SCTP_SELECTIVE_ACK) {
10831 a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
10832 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10833 num_dups * sizeof(int32_t));
10834 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10835 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10836 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10837 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10838 sack->sack.num_dup_tsns = htons(num_dups);
10839 sack->ch.chunk_type = type;
10840 sack->ch.chunk_flags = flags;
10841 sack->ch.chunk_length = htons(a_chk->send_size);
10843 a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
10844 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10845 num_dups * sizeof(int32_t));
10846 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10847 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10848 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10849 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10850 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10851 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10852 nr_sack->nr_sack.reserved = 0;
10853 nr_sack->ch.chunk_type = type;
10854 nr_sack->ch.chunk_flags = flags;
10855 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10857 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10858 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10859 asoc->ctrl_queue_cnt++;
10860 asoc->send_sack = 0;
10861 SCTP_STAT_INCR(sctps_sendsacks);
10866 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10867 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10872 struct mbuf *m_abort, *m, *m_last;
10873 struct mbuf *m_out, *m_end = NULL;
10874 struct sctp_abort_chunk *abort;
10875 struct sctp_auth_chunk *auth = NULL;
10876 struct sctp_nets *net;
10878 uint32_t auth_offset = 0;
10880 uint16_t cause_len, chunk_len, padding_len;
10882 SCTP_TCB_LOCK_ASSERT(stcb);
10884 * Add an AUTH chunk, if chunk requires it and save the offset into
10885 * the chain for AUTH
10887 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10888 stcb->asoc.peer_auth_chunks)) {
10889 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
10890 stcb, SCTP_ABORT_ASSOCIATION);
10891 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10895 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
10896 if (m_abort == NULL) {
10898 sctp_m_freem(m_out);
10901 sctp_m_freem(operr);
10905 /* link in any error */
10906 SCTP_BUF_NEXT(m_abort) = operr;
10909 for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
10910 cause_len += (uint16_t)SCTP_BUF_LEN(m);
10911 if (SCTP_BUF_NEXT(m) == NULL) {
10915 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
10916 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
10917 padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
10918 if (m_out == NULL) {
10919 /* NO Auth chunk prepended, so reserve space in front */
10920 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10923 /* Put AUTH chunk at the front of the chain */
10924 SCTP_BUF_NEXT(m_end) = m_abort;
10926 if (stcb->asoc.alternate) {
10927 net = stcb->asoc.alternate;
10929 net = stcb->asoc.primary_destination;
10931 /* Fill in the ABORT chunk header. */
10932 abort = mtod(m_abort, struct sctp_abort_chunk *);
10933 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10934 if (stcb->asoc.peer_vtag == 0) {
10935 /* This happens iff the assoc is in COOKIE-WAIT state. */
10936 vtag = stcb->asoc.my_vtag;
10937 abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
10939 vtag = stcb->asoc.peer_vtag;
10940 abort->ch.chunk_flags = 0;
10942 abort->ch.chunk_length = htons(chunk_len);
10943 /* Add padding, if necessary. */
10944 if (padding_len > 0) {
10945 if ((m_last == NULL) ||
10946 (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
10947 sctp_m_freem(m_out);
10951 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10952 (struct sockaddr *)&net->ro._l_addr,
10953 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
10954 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
10955 stcb->asoc.primary_destination->port, NULL,
10958 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
10959 if (error == ENOBUFS) {
10960 stcb->asoc.ifp_had_enobuf = 1;
10961 SCTP_STAT_INCR(sctps_lowlevelerr);
10964 stcb->asoc.ifp_had_enobuf = 0;
10966 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10970 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10971 struct sctp_nets *net,
10974 /* formulate and SEND a SHUTDOWN-COMPLETE */
10975 struct mbuf *m_shutdown_comp;
10976 struct sctp_shutdown_complete_chunk *shutdown_complete;
10981 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
10982 if (m_shutdown_comp == NULL) {
10986 if (reflect_vtag) {
10987 flags = SCTP_HAD_NO_TCB;
10988 vtag = stcb->asoc.my_vtag;
10991 vtag = stcb->asoc.peer_vtag;
10993 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10994 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10995 shutdown_complete->ch.chunk_flags = flags;
10996 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10997 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10998 if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10999 (struct sockaddr *)&net->ro._l_addr,
11000 m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
11001 stcb->sctp_ep->sctp_lport, stcb->rport,
11005 SCTP_SO_NOT_LOCKED))) {
11006 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
11007 if (error == ENOBUFS) {
11008 stcb->asoc.ifp_had_enobuf = 1;
11009 SCTP_STAT_INCR(sctps_lowlevelerr);
11012 stcb->asoc.ifp_had_enobuf = 0;
11014 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11019 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
11020 struct sctphdr *sh, uint32_t vtag,
11021 uint8_t type, struct mbuf *cause,
11022 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11023 uint32_t vrf_id, uint16_t port)
11025 struct mbuf *o_pak;
11027 struct sctphdr *shout;
11028 struct sctp_chunkhdr *ch;
11029 #if defined(INET) || defined(INET6)
11030 struct udphdr *udp;
11033 int len, cause_len, padding_len;
11035 struct sockaddr_in *src_sin, *dst_sin;
11039 struct sockaddr_in6 *src_sin6, *dst_sin6;
11040 struct ip6_hdr *ip6;
11043 /* Compute the length of the cause and add final padding. */
11045 if (cause != NULL) {
11046 struct mbuf *m_at, *m_last = NULL;
11048 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
11049 if (SCTP_BUF_NEXT(m_at) == NULL)
11051 cause_len += SCTP_BUF_LEN(m_at);
11053 padding_len = cause_len % 4;
11054 if (padding_len != 0) {
11055 padding_len = 4 - padding_len;
11057 if (padding_len != 0) {
11058 if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
11059 sctp_m_freem(cause);
11066 /* Get an mbuf for the header. */
11067 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
11068 switch (dst->sa_family) {
11071 len += sizeof(struct ip);
11076 len += sizeof(struct ip6_hdr);
11082 #if defined(INET) || defined(INET6)
11084 len += sizeof(struct udphdr);
11087 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
11088 if (mout == NULL) {
11090 sctp_m_freem(cause);
11094 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11095 SCTP_BUF_LEN(mout) = len;
11096 SCTP_BUF_NEXT(mout) = cause;
11097 M_SETFIB(mout, fibnum);
11098 mout->m_pkthdr.flowid = mflowid;
11099 M_HASHTYPE_SET(mout, mflowtype);
11106 switch (dst->sa_family) {
11109 src_sin = (struct sockaddr_in *)src;
11110 dst_sin = (struct sockaddr_in *)dst;
11111 ip = mtod(mout, struct ip *);
11112 ip->ip_v = IPVERSION;
11113 ip->ip_hl = (sizeof(struct ip) >> 2);
11115 ip->ip_off = htons(IP_DF);
11117 ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
11119 ip->ip_p = IPPROTO_UDP;
11121 ip->ip_p = IPPROTO_SCTP;
11123 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
11124 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
11126 len = sizeof(struct ip);
11127 shout = (struct sctphdr *)((caddr_t)ip + len);
11132 src_sin6 = (struct sockaddr_in6 *)src;
11133 dst_sin6 = (struct sockaddr_in6 *)dst;
11134 ip6 = mtod(mout, struct ip6_hdr *);
11135 ip6->ip6_flow = htonl(0x60000000);
11136 if (V_ip6_auto_flowlabel) {
11137 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
11139 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11141 ip6->ip6_nxt = IPPROTO_UDP;
11143 ip6->ip6_nxt = IPPROTO_SCTP;
11145 ip6->ip6_src = dst_sin6->sin6_addr;
11146 ip6->ip6_dst = src_sin6->sin6_addr;
11147 len = sizeof(struct ip6_hdr);
11148 shout = (struct sctphdr *)((caddr_t)ip6 + len);
11153 shout = mtod(mout, struct sctphdr *);
11156 #if defined(INET) || defined(INET6)
11158 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
11159 sctp_m_freem(mout);
11162 udp = (struct udphdr *)shout;
11163 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11164 udp->uh_dport = port;
11166 udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
11167 sizeof(struct sctphdr) +
11168 sizeof(struct sctp_chunkhdr) +
11169 cause_len + padding_len));
11170 len += sizeof(struct udphdr);
11171 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
11176 shout->src_port = sh->dest_port;
11177 shout->dest_port = sh->src_port;
11178 shout->checksum = 0;
11180 shout->v_tag = htonl(vtag);
11182 shout->v_tag = sh->v_tag;
11184 len += sizeof(struct sctphdr);
11185 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
11186 ch->chunk_type = type;
11188 ch->chunk_flags = 0;
11190 ch->chunk_flags = SCTP_HAD_NO_TCB;
11192 ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
11193 len += sizeof(struct sctp_chunkhdr);
11194 len += cause_len + padding_len;
11196 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11197 sctp_m_freem(mout);
11200 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11201 switch (dst->sa_family) {
11206 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11211 ip->ip_len = htons(len);
11213 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
11214 SCTP_STAT_INCR(sctps_sendswcrc);
11216 SCTP_ENABLE_UDP_CSUM(o_pak);
11219 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11220 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11221 SCTP_STAT_INCR(sctps_sendhwcrc);
11223 #ifdef SCTP_PACKET_LOGGING
11224 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11225 sctp_packet_log(o_pak);
11228 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
11233 ip6->ip6_plen = (uint16_t)(len - sizeof(struct ip6_hdr));
11235 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11236 SCTP_STAT_INCR(sctps_sendswcrc);
11237 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11238 udp->uh_sum = 0xffff;
11241 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
11242 mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
11243 SCTP_STAT_INCR(sctps_sendhwcrc);
11245 #ifdef SCTP_PACKET_LOGGING
11246 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
11247 sctp_packet_log(o_pak);
11250 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
11254 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
11256 sctp_m_freem(mout);
11257 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11260 SCTP_STAT_INCR(sctps_sendpackets);
11261 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11262 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11267 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
11268 struct sctphdr *sh,
11269 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
11270 uint32_t vrf_id, uint16_t port)
11272 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
11273 mflowtype, mflowid, fibnum,
11278 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net, int so_locked
11279 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
11284 struct sctp_tmit_chunk *chk;
11285 struct sctp_heartbeat_chunk *hb;
11286 struct timeval now;
11288 SCTP_TCB_LOCK_ASSERT(stcb);
11292 (void)SCTP_GETTIME_TIMEVAL(&now);
11293 switch (net->ro._l_addr.sa.sa_family) {
11305 sctp_alloc_a_chunk(stcb, chk);
11307 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11310 chk->copy_by_ref = 0;
11311 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11312 chk->rec.chunk_id.can_take_data = 1;
11314 chk->asoc = &stcb->asoc;
11315 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11317 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11318 if (chk->data == NULL) {
11319 sctp_free_a_chunk(stcb, chk, so_locked);
11322 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11323 SCTP_BUF_LEN(chk->data) = chk->send_size;
11324 chk->sent = SCTP_DATAGRAM_UNSENT;
11325 chk->snd_count = 0;
11327 atomic_add_int(&chk->whoTo->ref_count, 1);
11328 /* Now we have a mbuf that we can fill in with the details */
11329 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11330 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11331 /* fill out chunk header */
11332 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11333 hb->ch.chunk_flags = 0;
11334 hb->ch.chunk_length = htons(chk->send_size);
11335 /* Fill out hb parameter */
11336 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11337 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11338 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11339 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11340 /* Did our user request this one, put it in */
11341 hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
11342 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
11343 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11345 * we only take from the entropy pool if the address is not
11348 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11349 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11351 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11352 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11354 switch (net->ro._l_addr.sa.sa_family) {
11357 memcpy(hb->heartbeat.hb_info.address,
11358 &net->ro._l_addr.sin.sin_addr,
11359 sizeof(net->ro._l_addr.sin.sin_addr));
11364 memcpy(hb->heartbeat.hb_info.address,
11365 &net->ro._l_addr.sin6.sin6_addr,
11366 sizeof(net->ro._l_addr.sin6.sin6_addr));
11371 sctp_m_freem(chk->data);
11374 sctp_free_a_chunk(stcb, chk, so_locked);
11378 net->hb_responded = 0;
11379 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11380 stcb->asoc.ctrl_queue_cnt++;
11381 SCTP_STAT_INCR(sctps_sendheartbeat);
11386 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11389 struct sctp_association *asoc;
11390 struct sctp_ecne_chunk *ecne;
11391 struct sctp_tmit_chunk *chk;
11396 asoc = &stcb->asoc;
11397 SCTP_TCB_LOCK_ASSERT(stcb);
11398 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11399 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
11400 /* found a previous ECN_ECHO update it if needed */
11401 uint32_t cnt, ctsn;
11403 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11404 ctsn = ntohl(ecne->tsn);
11405 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11406 ecne->tsn = htonl(high_tsn);
11407 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11409 cnt = ntohl(ecne->num_pkts_since_cwr);
11411 ecne->num_pkts_since_cwr = htonl(cnt);
11415 /* nope could not find one to update so we must build one */
11416 sctp_alloc_a_chunk(stcb, chk);
11420 SCTP_STAT_INCR(sctps_queue_upd_ecne);
11421 chk->copy_by_ref = 0;
11422 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11423 chk->rec.chunk_id.can_take_data = 0;
11425 chk->asoc = &stcb->asoc;
11426 chk->send_size = sizeof(struct sctp_ecne_chunk);
11427 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11428 if (chk->data == NULL) {
11429 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11432 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11433 SCTP_BUF_LEN(chk->data) = chk->send_size;
11434 chk->sent = SCTP_DATAGRAM_UNSENT;
11435 chk->snd_count = 0;
11437 atomic_add_int(&chk->whoTo->ref_count, 1);
11439 stcb->asoc.ecn_echo_cnt_onq++;
11440 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11441 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11442 ecne->ch.chunk_flags = 0;
11443 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11444 ecne->tsn = htonl(high_tsn);
11445 ecne->num_pkts_since_cwr = htonl(1);
11446 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
11447 asoc->ctrl_queue_cnt++;
11451 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11452 struct mbuf *m, int len, int iphlen, int bad_crc)
11454 struct sctp_association *asoc;
11455 struct sctp_pktdrop_chunk *drp;
11456 struct sctp_tmit_chunk *chk;
11462 struct sctp_chunkhdr *ch, chunk_buf;
11463 unsigned int chk_length;
11468 asoc = &stcb->asoc;
11469 SCTP_TCB_LOCK_ASSERT(stcb);
11470 if (asoc->pktdrop_supported == 0) {
11472 * peer must declare support before I send one.
11476 if (stcb->sctp_socket == NULL) {
11479 sctp_alloc_a_chunk(stcb, chk);
11483 chk->copy_by_ref = 0;
11484 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11485 chk->rec.chunk_id.can_take_data = 1;
11488 chk->send_size = len;
11489 /* Validate that we do not have an ABORT in here. */
11490 offset = iphlen + sizeof(struct sctphdr);
11491 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11492 sizeof(*ch), (uint8_t *)&chunk_buf);
11493 while (ch != NULL) {
11494 chk_length = ntohs(ch->chunk_length);
11495 if (chk_length < sizeof(*ch)) {
11496 /* break to abort land */
11499 switch (ch->chunk_type) {
11500 case SCTP_PACKET_DROPPED:
11501 case SCTP_ABORT_ASSOCIATION:
11502 case SCTP_INITIATION_ACK:
11504 * We don't respond with an PKT-DROP to an ABORT
11505 * or PKT-DROP. We also do not respond to an
11506 * INIT-ACK, because we can't know if the initiation
11507 * tag is correct or not.
11509 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11514 offset += SCTP_SIZE32(chk_length);
11515 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11516 sizeof(*ch), (uint8_t *)&chunk_buf);
11519 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11520 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11522 * only send 1 mtu worth, trim off the excess on the end.
11525 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11528 chk->asoc = &stcb->asoc;
11529 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11530 if (chk->data == NULL) {
11532 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11535 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11536 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11538 sctp_m_freem(chk->data);
11542 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11543 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11544 chk->book_size_scale = 0;
11546 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11547 drp->trunc_len = htons(fullsz);
11549 * Len is already adjusted to size minus overhead above take
11550 * out the pkt_drop chunk itself from it.
11552 chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
11553 len = chk->send_size;
11555 /* no truncation needed */
11556 drp->ch.chunk_flags = 0;
11557 drp->trunc_len = htons(0);
11560 drp->ch.chunk_flags |= SCTP_BADCRC;
11562 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11563 SCTP_BUF_LEN(chk->data) = chk->send_size;
11564 chk->sent = SCTP_DATAGRAM_UNSENT;
11565 chk->snd_count = 0;
11567 /* we should hit here */
11569 atomic_add_int(&chk->whoTo->ref_count, 1);
11573 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11574 drp->ch.chunk_length = htons(chk->send_size);
11575 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11579 drp->bottle_bw = htonl(spc);
11580 if (asoc->my_rwnd) {
11581 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11582 asoc->size_on_all_streams +
11583 asoc->my_rwnd_control_len +
11584 stcb->sctp_socket->so_rcv.sb_cc);
11587 * If my rwnd is 0, possibly from mbuf depletion as well as
11588 * space used, tell the peer there is NO space aka onq == bw
11590 drp->current_onq = htonl(spc);
11594 m_copydata(m, iphlen, len, (caddr_t)datap);
11595 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11596 asoc->ctrl_queue_cnt++;
11600 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
11602 struct sctp_association *asoc;
11603 struct sctp_cwr_chunk *cwr;
11604 struct sctp_tmit_chunk *chk;
11606 SCTP_TCB_LOCK_ASSERT(stcb);
11610 asoc = &stcb->asoc;
11611 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11612 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
11614 * found a previous CWR queued to same destination
11615 * update it if needed
11619 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11620 ctsn = ntohl(cwr->tsn);
11621 if (SCTP_TSN_GT(high_tsn, ctsn)) {
11622 cwr->tsn = htonl(high_tsn);
11624 if (override & SCTP_CWR_REDUCE_OVERRIDE) {
11625 /* Make sure override is carried */
11626 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
11631 sctp_alloc_a_chunk(stcb, chk);
11635 chk->copy_by_ref = 0;
11636 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11637 chk->rec.chunk_id.can_take_data = 1;
11639 chk->asoc = &stcb->asoc;
11640 chk->send_size = sizeof(struct sctp_cwr_chunk);
11641 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
11642 if (chk->data == NULL) {
11643 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
11646 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11647 SCTP_BUF_LEN(chk->data) = chk->send_size;
11648 chk->sent = SCTP_DATAGRAM_UNSENT;
11649 chk->snd_count = 0;
11651 atomic_add_int(&chk->whoTo->ref_count, 1);
11652 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11653 cwr->ch.chunk_type = SCTP_ECN_CWR;
11654 cwr->ch.chunk_flags = override;
11655 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11656 cwr->tsn = htonl(high_tsn);
11657 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11658 asoc->ctrl_queue_cnt++;
11662 sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
11663 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11665 uint16_t len, old_len, i;
11666 struct sctp_stream_reset_out_request *req_out;
11667 struct sctp_chunkhdr *ch;
11669 int number_entries = 0;
11671 ch = mtod(chk->data, struct sctp_chunkhdr *);
11672 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11673 /* get to new offset for the param. */
11674 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11675 /* now how long will this param be? */
11676 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11677 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11678 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11679 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11683 if (number_entries == 0) {
11686 if (number_entries == stcb->asoc.streamoutcnt) {
11687 number_entries = 0;
11689 if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
11690 number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
11692 len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11693 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11694 req_out->ph.param_length = htons(len);
11695 req_out->request_seq = htonl(seq);
11696 req_out->response_seq = htonl(resp_seq);
11697 req_out->send_reset_at_tsn = htonl(last_sent);
11699 if (number_entries) {
11700 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11701 if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
11702 (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
11703 TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
11704 req_out->list_of_streams[at] = htons(i);
11706 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11707 if (at >= number_entries) {
11713 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
11714 stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
11717 if (SCTP_SIZE32(len) > len) {
11719 * Need to worry about the pad we may end up adding to the
11720 * end. This is easy since the struct is either aligned to 4
11721 * bytes or 2 bytes off.
11723 req_out->list_of_streams[number_entries] = 0;
11725 /* now fix the chunk length */
11726 ch->chunk_length = htons(len + old_len);
11727 chk->book_size = len + old_len;
11728 chk->book_size_scale = 0;
11729 chk->send_size = SCTP_SIZE32(chk->book_size);
11730 SCTP_BUF_LEN(chk->data) = chk->send_size;
11735 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11736 int number_entries, uint16_t *list,
11739 uint16_t len, old_len, i;
11740 struct sctp_stream_reset_in_request *req_in;
11741 struct sctp_chunkhdr *ch;
11743 ch = mtod(chk->data, struct sctp_chunkhdr *);
11744 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11746 /* get to new offset for the param. */
11747 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11748 /* now how long will this param be? */
11749 len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11750 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11751 req_in->ph.param_length = htons(len);
11752 req_in->request_seq = htonl(seq);
11753 if (number_entries) {
11754 for (i = 0; i < number_entries; i++) {
11755 req_in->list_of_streams[i] = htons(list[i]);
11758 if (SCTP_SIZE32(len) > len) {
11760 * Need to worry about the pad we may end up adding to the
11761 * end. This is easy since the struct is either aligned to 4
11762 * bytes or 2 bytes off.
11764 req_in->list_of_streams[number_entries] = 0;
11766 /* now fix the chunk length */
11767 ch->chunk_length = htons(len + old_len);
11768 chk->book_size = len + old_len;
11769 chk->book_size_scale = 0;
11770 chk->send_size = SCTP_SIZE32(chk->book_size);
11771 SCTP_BUF_LEN(chk->data) = chk->send_size;
11776 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11779 uint16_t len, old_len;
11780 struct sctp_stream_reset_tsn_request *req_tsn;
11781 struct sctp_chunkhdr *ch;
11783 ch = mtod(chk->data, struct sctp_chunkhdr *);
11784 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11786 /* get to new offset for the param. */
11787 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11788 /* now how long will this param be? */
11789 len = sizeof(struct sctp_stream_reset_tsn_request);
11790 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11791 req_tsn->ph.param_length = htons(len);
11792 req_tsn->request_seq = htonl(seq);
11794 /* now fix the chunk length */
11795 ch->chunk_length = htons(len + old_len);
11796 chk->send_size = len + old_len;
11797 chk->book_size = SCTP_SIZE32(chk->send_size);
11798 chk->book_size_scale = 0;
11799 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11804 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11805 uint32_t resp_seq, uint32_t result)
11807 uint16_t len, old_len;
11808 struct sctp_stream_reset_response *resp;
11809 struct sctp_chunkhdr *ch;
11811 ch = mtod(chk->data, struct sctp_chunkhdr *);
11812 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11814 /* get to new offset for the param. */
11815 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11816 /* now how long will this param be? */
11817 len = sizeof(struct sctp_stream_reset_response);
11818 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11819 resp->ph.param_length = htons(len);
11820 resp->response_seq = htonl(resp_seq);
11821 resp->result = ntohl(result);
11823 /* now fix the chunk length */
11824 ch->chunk_length = htons(len + old_len);
11825 chk->book_size = len + old_len;
11826 chk->book_size_scale = 0;
11827 chk->send_size = SCTP_SIZE32(chk->book_size);
11828 SCTP_BUF_LEN(chk->data) = chk->send_size;
11833 sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
11834 struct sctp_stream_reset_list *ent,
11837 struct sctp_association *asoc;
11838 struct sctp_tmit_chunk *chk;
11839 struct sctp_chunkhdr *ch;
11841 asoc = &stcb->asoc;
11844 * Reset our last reset action to the new one IP -> response
11845 * (PERFORMED probably). This assures that if we fail to send, a
11846 * retran from the peer will get the new response.
11848 asoc->last_reset_action[0] = response;
11849 if (asoc->stream_reset_outstanding) {
11852 sctp_alloc_a_chunk(stcb, chk);
11854 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11857 chk->copy_by_ref = 0;
11858 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11859 chk->rec.chunk_id.can_take_data = 0;
11861 chk->asoc = &stcb->asoc;
11862 chk->book_size = sizeof(struct sctp_chunkhdr);
11863 chk->send_size = SCTP_SIZE32(chk->book_size);
11864 chk->book_size_scale = 0;
11865 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
11866 if (chk->data == NULL) {
11867 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
11868 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11871 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11872 /* setup chunk parameters */
11873 chk->sent = SCTP_DATAGRAM_UNSENT;
11874 chk->snd_count = 0;
11875 if (stcb->asoc.alternate) {
11876 chk->whoTo = stcb->asoc.alternate;
11878 chk->whoTo = stcb->asoc.primary_destination;
11880 ch = mtod(chk->data, struct sctp_chunkhdr *);
11881 ch->chunk_type = SCTP_STREAM_RESET;
11882 ch->chunk_flags = 0;
11883 ch->chunk_length = htons(chk->book_size);
11884 atomic_add_int(&chk->whoTo->ref_count, 1);
11885 SCTP_BUF_LEN(chk->data) = chk->send_size;
11886 sctp_add_stream_reset_result(chk, ent->seq, response);
11887 /* insert the chunk for sending */
11888 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11891 asoc->ctrl_queue_cnt++;
11895 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11896 uint32_t resp_seq, uint32_t result,
11897 uint32_t send_una, uint32_t recv_next)
11899 uint16_t len, old_len;
11900 struct sctp_stream_reset_response_tsn *resp;
11901 struct sctp_chunkhdr *ch;
11903 ch = mtod(chk->data, struct sctp_chunkhdr *);
11904 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11906 /* get to new offset for the param. */
11907 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11908 /* now how long will this param be? */
11909 len = sizeof(struct sctp_stream_reset_response_tsn);
11910 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11911 resp->ph.param_length = htons(len);
11912 resp->response_seq = htonl(resp_seq);
11913 resp->result = htonl(result);
11914 resp->senders_next_tsn = htonl(send_una);
11915 resp->receivers_next_tsn = htonl(recv_next);
11917 /* now fix the chunk length */
11918 ch->chunk_length = htons(len + old_len);
11919 chk->book_size = len + old_len;
11920 chk->send_size = SCTP_SIZE32(chk->book_size);
11921 chk->book_size_scale = 0;
11922 SCTP_BUF_LEN(chk->data) = chk->send_size;
11927 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
11931 uint16_t len, old_len;
11932 struct sctp_chunkhdr *ch;
11933 struct sctp_stream_reset_add_strm *addstr;
11935 ch = mtod(chk->data, struct sctp_chunkhdr *);
11936 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11938 /* get to new offset for the param. */
11939 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11940 /* now how long will this param be? */
11941 len = sizeof(struct sctp_stream_reset_add_strm);
11944 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
11945 addstr->ph.param_length = htons(len);
11946 addstr->request_seq = htonl(seq);
11947 addstr->number_of_streams = htons(adding);
11948 addstr->reserved = 0;
11950 /* now fix the chunk length */
11951 ch->chunk_length = htons(len + old_len);
11952 chk->send_size = len + old_len;
11953 chk->book_size = SCTP_SIZE32(chk->send_size);
11954 chk->book_size_scale = 0;
11955 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11960 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
11964 uint16_t len, old_len;
11965 struct sctp_chunkhdr *ch;
11966 struct sctp_stream_reset_add_strm *addstr;
11968 ch = mtod(chk->data, struct sctp_chunkhdr *);
11969 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11971 /* get to new offset for the param. */
11972 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11973 /* now how long will this param be? */
11974 len = sizeof(struct sctp_stream_reset_add_strm);
11976 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
11977 addstr->ph.param_length = htons(len);
11978 addstr->request_seq = htonl(seq);
11979 addstr->number_of_streams = htons(adding);
11980 addstr->reserved = 0;
11982 /* now fix the chunk length */
11983 ch->chunk_length = htons(len + old_len);
11984 chk->send_size = len + old_len;
11985 chk->book_size = SCTP_SIZE32(chk->send_size);
11986 chk->book_size_scale = 0;
11987 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11992 sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
11994 struct sctp_association *asoc;
11995 struct sctp_tmit_chunk *chk;
11996 struct sctp_chunkhdr *ch;
11999 asoc = &stcb->asoc;
12000 asoc->trigger_reset = 0;
12001 if (asoc->stream_reset_outstanding) {
12004 sctp_alloc_a_chunk(stcb, chk);
12006 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12009 chk->copy_by_ref = 0;
12010 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12011 chk->rec.chunk_id.can_take_data = 0;
12013 chk->asoc = &stcb->asoc;
12014 chk->book_size = sizeof(struct sctp_chunkhdr);
12015 chk->send_size = SCTP_SIZE32(chk->book_size);
12016 chk->book_size_scale = 0;
12017 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12018 if (chk->data == NULL) {
12019 sctp_free_a_chunk(stcb, chk, so_locked);
12020 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12023 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12025 /* setup chunk parameters */
12026 chk->sent = SCTP_DATAGRAM_UNSENT;
12027 chk->snd_count = 0;
12028 if (stcb->asoc.alternate) {
12029 chk->whoTo = stcb->asoc.alternate;
12031 chk->whoTo = stcb->asoc.primary_destination;
12033 ch = mtod(chk->data, struct sctp_chunkhdr *);
12034 ch->chunk_type = SCTP_STREAM_RESET;
12035 ch->chunk_flags = 0;
12036 ch->chunk_length = htons(chk->book_size);
12037 atomic_add_int(&chk->whoTo->ref_count, 1);
12038 SCTP_BUF_LEN(chk->data) = chk->send_size;
12039 seq = stcb->asoc.str_reset_seq_out;
12040 if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
12042 asoc->stream_reset_outstanding++;
12044 m_freem(chk->data);
12046 sctp_free_a_chunk(stcb, chk, so_locked);
12049 asoc->str_reset = chk;
12050 /* insert the chunk for sending */
12051 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12054 asoc->ctrl_queue_cnt++;
12056 if (stcb->asoc.send_sack) {
12057 sctp_send_sack(stcb, so_locked);
12059 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12064 sctp_send_str_reset_req(struct sctp_tcb *stcb,
12065 uint16_t number_entries, uint16_t *list,
12066 uint8_t send_in_req,
12067 uint8_t send_tsn_req,
12068 uint8_t add_stream,
12070 uint16_t adding_i, uint8_t peer_asked)
12072 struct sctp_association *asoc;
12073 struct sctp_tmit_chunk *chk;
12074 struct sctp_chunkhdr *ch;
12075 int can_send_out_req = 0;
12078 asoc = &stcb->asoc;
12079 if (asoc->stream_reset_outstanding) {
12081 * Already one pending, must get ACK back to clear the flag.
12083 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
12086 if ((send_in_req == 0) && (send_tsn_req == 0) &&
12087 (add_stream == 0)) {
12088 /* nothing to do */
12089 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12092 if (send_tsn_req && send_in_req) {
12093 /* error, can't do that */
12094 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12096 } else if (send_in_req) {
12097 can_send_out_req = 1;
12099 if (number_entries > (MCLBYTES -
12100 SCTP_MIN_OVERHEAD -
12101 sizeof(struct sctp_chunkhdr) -
12102 sizeof(struct sctp_stream_reset_out_request)) /
12103 sizeof(uint16_t)) {
12104 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12107 sctp_alloc_a_chunk(stcb, chk);
12109 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12112 chk->copy_by_ref = 0;
12113 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
12114 chk->rec.chunk_id.can_take_data = 0;
12116 chk->asoc = &stcb->asoc;
12117 chk->book_size = sizeof(struct sctp_chunkhdr);
12118 chk->send_size = SCTP_SIZE32(chk->book_size);
12119 chk->book_size_scale = 0;
12121 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
12122 if (chk->data == NULL) {
12123 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
12124 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12127 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
12129 /* setup chunk parameters */
12130 chk->sent = SCTP_DATAGRAM_UNSENT;
12131 chk->snd_count = 0;
12132 if (stcb->asoc.alternate) {
12133 chk->whoTo = stcb->asoc.alternate;
12135 chk->whoTo = stcb->asoc.primary_destination;
12137 atomic_add_int(&chk->whoTo->ref_count, 1);
12138 ch = mtod(chk->data, struct sctp_chunkhdr *);
12139 ch->chunk_type = SCTP_STREAM_RESET;
12140 ch->chunk_flags = 0;
12141 ch->chunk_length = htons(chk->book_size);
12142 SCTP_BUF_LEN(chk->data) = chk->send_size;
12144 seq = stcb->asoc.str_reset_seq_out;
12145 if (can_send_out_req) {
12148 ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
12151 asoc->stream_reset_outstanding++;
12154 if ((add_stream & 1) &&
12155 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
12156 /* Need to allocate more */
12157 struct sctp_stream_out *oldstream;
12158 struct sctp_stream_queue_pending *sp, *nsp;
12160 #if defined(SCTP_DETAILED_STR_STATS)
12164 oldstream = stcb->asoc.strmout;
12165 /* get some more */
12166 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
12167 (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
12169 if (stcb->asoc.strmout == NULL) {
12172 stcb->asoc.strmout = oldstream;
12173 /* Turn off the bit */
12174 x = add_stream & 0xfe;
12179 * Ok now we proceed with copying the old out stuff and
12180 * initializing the new stuff.
12182 SCTP_TCB_SEND_LOCK(stcb);
12183 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
12184 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
12185 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12186 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
12187 stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
12188 stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
12189 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
12190 stcb->asoc.strmout[i].sid = i;
12191 stcb->asoc.strmout[i].state = oldstream[i].state;
12192 /* FIX ME FIX ME */
12194 * This should be a SS_COPY operation FIX ME STREAM
12197 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
12198 /* now anything on those queues? */
12199 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
12200 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
12201 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
12205 /* now the new streams */
12206 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
12207 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
12208 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
12209 stcb->asoc.strmout[i].chunks_on_queues = 0;
12210 #if defined(SCTP_DETAILED_STR_STATS)
12211 for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
12212 stcb->asoc.strmout[i].abandoned_sent[j] = 0;
12213 stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
12216 stcb->asoc.strmout[i].abandoned_sent[0] = 0;
12217 stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
12219 stcb->asoc.strmout[i].next_mid_ordered = 0;
12220 stcb->asoc.strmout[i].next_mid_unordered = 0;
12221 stcb->asoc.strmout[i].sid = i;
12222 stcb->asoc.strmout[i].last_msg_incomplete = 0;
12223 stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
12224 stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
12226 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
12227 SCTP_FREE(oldstream, SCTP_M_STRMO);
12228 SCTP_TCB_SEND_UNLOCK(stcb);
12231 if ((add_stream & 1) && (adding_o > 0)) {
12232 asoc->strm_pending_add_size = adding_o;
12233 asoc->peer_req_out = peer_asked;
12234 sctp_add_an_out_stream(chk, seq, adding_o);
12236 asoc->stream_reset_outstanding++;
12238 if ((add_stream & 2) && (adding_i > 0)) {
12239 sctp_add_an_in_stream(chk, seq, adding_i);
12241 asoc->stream_reset_outstanding++;
12244 sctp_add_stream_reset_in(chk, number_entries, list, seq);
12246 asoc->stream_reset_outstanding++;
12248 if (send_tsn_req) {
12249 sctp_add_stream_reset_tsn(chk, seq);
12250 asoc->stream_reset_outstanding++;
12252 asoc->str_reset = chk;
12253 /* insert the chunk for sending */
12254 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
12257 asoc->ctrl_queue_cnt++;
12258 if (stcb->asoc.send_sack) {
12259 sctp_send_sack(stcb, SCTP_SO_LOCKED);
12261 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
12266 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
12267 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12268 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12269 uint32_t vrf_id, uint16_t port)
12271 /* Don't respond to an ABORT with an ABORT. */
12272 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
12274 sctp_m_freem(cause);
12277 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
12278 mflowtype, mflowid, fibnum,
12284 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
12285 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
12286 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
12287 uint32_t vrf_id, uint16_t port)
12289 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
12290 mflowtype, mflowid, fibnum,
12295 static struct mbuf *
12296 sctp_copy_resume(struct uio *uio,
12298 int user_marks_eor,
12301 struct mbuf **new_tail)
12305 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12306 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12308 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12311 *sndout = m_length(m, NULL);
12312 *new_tail = m_last(m);
12318 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12322 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12324 if (sp->data == NULL) {
12325 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
12328 sp->tail_mbuf = m_last(sp->data);
12334 static struct sctp_stream_queue_pending *
12335 sctp_copy_it_in(struct sctp_tcb *stcb,
12336 struct sctp_association *asoc,
12337 struct sctp_sndrcvinfo *srcv,
12339 struct sctp_nets *net,
12341 int user_marks_eor,
12345 * This routine must be very careful in its work. Protocol
12346 * processing is up and running so care must be taken to spl...()
12347 * when you need to do something that may effect the stcb/asoc. The
12348 * sb is locked however. When data is copied the protocol processing
12349 * should be enabled since this is a slower operation...
12351 struct sctp_stream_queue_pending *sp = NULL;
12355 /* Now can we send this? */
12356 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12357 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12358 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12359 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12360 /* got data while shutting down */
12361 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12362 *error = ECONNRESET;
12365 sctp_alloc_a_strmoq(stcb, sp);
12367 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12372 sp->sender_all_done = 0;
12373 sp->sinfo_flags = srcv->sinfo_flags;
12374 sp->timetolive = srcv->sinfo_timetolive;
12375 sp->ppid = srcv->sinfo_ppid;
12376 sp->context = srcv->sinfo_context;
12378 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12380 sp->sid = srcv->sinfo_stream;
12381 sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
12382 if ((sp->length == (uint32_t)uio->uio_resid) &&
12383 ((user_marks_eor == 0) ||
12384 (srcv->sinfo_flags & SCTP_EOF) ||
12385 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12386 sp->msg_is_complete = 1;
12388 sp->msg_is_complete = 0;
12390 sp->sender_all_done = 0;
12391 sp->some_taken = 0;
12392 sp->put_last_out = 0;
12393 resv_in_first = SCTP_DATA_CHUNK_OVERHEAD(stcb);
12394 sp->data = sp->tail_mbuf = NULL;
12395 if (sp->length == 0) {
12398 if (srcv->sinfo_keynumber_valid) {
12399 sp->auth_keyid = srcv->sinfo_keynumber;
12401 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12403 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12404 sctp_auth_key_acquire(stcb, sp->auth_keyid);
12405 sp->holds_key_ref = 1;
12407 *error = sctp_copy_one(sp, uio, resv_in_first);
12410 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
12413 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12415 atomic_add_int(&sp->net->ref_count, 1);
12419 sctp_set_prsctp_policy(sp);
12427 sctp_sosend(struct socket *so,
12428 struct sockaddr *addr,
12431 struct mbuf *control,
12436 int error, use_sndinfo = 0;
12437 struct sctp_sndrcvinfo sndrcvninfo;
12438 struct sockaddr *addr_to_use;
12439 #if defined(INET) && defined(INET6)
12440 struct sockaddr_in sin;
12444 /* process cmsg snd/rcv info (maybe a assoc-id) */
12445 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
12446 sizeof(sndrcvninfo))) {
12451 addr_to_use = addr;
12452 #if defined(INET) && defined(INET6)
12453 if ((addr) && (addr->sa_family == AF_INET6)) {
12454 struct sockaddr_in6 *sin6;
12456 sin6 = (struct sockaddr_in6 *)addr;
12457 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12458 in6_sin6_2_sin(&sin, sin6);
12459 addr_to_use = (struct sockaddr *)&sin;
12463 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12466 use_sndinfo ? &sndrcvninfo : NULL
12474 sctp_lower_sosend(struct socket *so,
12475 struct sockaddr *addr,
12477 struct mbuf *i_pak,
12478 struct mbuf *control,
12480 struct sctp_sndrcvinfo *srcv
12485 unsigned int sndlen = 0, max_len;
12487 struct mbuf *top = NULL;
12488 int queue_only = 0, queue_only_for_init = 0;
12489 int free_cnt_applied = 0;
12491 int now_filled = 0;
12492 unsigned int inqueue_bytes = 0;
12493 struct sctp_block_entry be;
12494 struct sctp_inpcb *inp;
12495 struct sctp_tcb *stcb = NULL;
12496 struct timeval now;
12497 struct sctp_nets *net;
12498 struct sctp_association *asoc;
12499 struct sctp_inpcb *t_inp;
12500 int user_marks_eor;
12501 int create_lock_applied = 0;
12502 int nagle_applies = 0;
12503 int some_on_control = 0;
12504 int got_all_of_the_send = 0;
12505 int hold_tcblock = 0;
12506 int non_blocking = 0;
12507 uint32_t local_add_more, local_soresv = 0;
12509 uint16_t sinfo_flags;
12510 sctp_assoc_t sinfo_assoc_id;
12517 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12519 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12522 SCTP_RELEASE_PKT(i_pak);
12526 if ((uio == NULL) && (i_pak == NULL)) {
12527 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12530 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12531 atomic_add_int(&inp->total_sends, 1);
12533 if (uio->uio_resid < 0) {
12534 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12537 sndlen = (unsigned int)uio->uio_resid;
12539 top = SCTP_HEADER_TO_CHAIN(i_pak);
12540 sndlen = SCTP_HEADER_LEN(i_pak);
12542 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12545 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12546 (inp->sctp_socket->so_qlimit)) {
12547 /* The listener can NOT send */
12548 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12553 * Pre-screen address, if one is given the sin-len
12554 * must be set correctly!
12557 union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
12559 switch (raddr->sa.sa_family) {
12562 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
12563 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12567 port = raddr->sin.sin_port;
12572 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
12573 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12577 port = raddr->sin6.sin6_port;
12581 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12582 error = EAFNOSUPPORT;
12589 sinfo_flags = srcv->sinfo_flags;
12590 sinfo_assoc_id = srcv->sinfo_assoc_id;
12591 if (INVALID_SINFO_FLAG(sinfo_flags) ||
12592 PR_SCTP_INVALID_POLICY(sinfo_flags)) {
12593 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12597 if (srcv->sinfo_flags)
12598 SCTP_STAT_INCR(sctps_sends_with_flags);
12600 sinfo_flags = inp->def_send.sinfo_flags;
12601 sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
12603 if (sinfo_flags & SCTP_SENDALL) {
12604 /* its a sendall */
12605 error = sctp_sendall(inp, uio, top, srcv);
12609 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
12610 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12614 /* now we must find the assoc */
12615 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12616 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12617 SCTP_INP_RLOCK(inp);
12618 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12620 SCTP_TCB_LOCK(stcb);
12623 SCTP_INP_RUNLOCK(inp);
12624 } else if (sinfo_assoc_id) {
12625 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
12626 if (stcb != NULL) {
12631 * Since we did not use findep we must
12632 * increment it, and if we don't find a tcb
12635 SCTP_INP_WLOCK(inp);
12636 SCTP_INP_INCR_REF(inp);
12637 SCTP_INP_WUNLOCK(inp);
12638 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12639 if (stcb == NULL) {
12640 SCTP_INP_WLOCK(inp);
12641 SCTP_INP_DECR_REF(inp);
12642 SCTP_INP_WUNLOCK(inp);
12647 if ((stcb == NULL) && (addr)) {
12648 /* Possible implicit send? */
12649 SCTP_ASOC_CREATE_LOCK(inp);
12650 create_lock_applied = 1;
12651 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12652 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12653 /* Should I really unlock ? */
12654 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12659 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12660 (addr->sa_family == AF_INET6)) {
12661 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12665 SCTP_INP_WLOCK(inp);
12666 SCTP_INP_INCR_REF(inp);
12667 SCTP_INP_WUNLOCK(inp);
12668 /* With the lock applied look again */
12669 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12670 if ((stcb == NULL) && (control != NULL) && (port > 0)) {
12671 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
12673 if (stcb == NULL) {
12674 SCTP_INP_WLOCK(inp);
12675 SCTP_INP_DECR_REF(inp);
12676 SCTP_INP_WUNLOCK(inp);
12683 if (t_inp != inp) {
12684 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12689 if (stcb == NULL) {
12690 if (addr == NULL) {
12691 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12695 /* We must go ahead and start the INIT process */
12698 if ((sinfo_flags & SCTP_ABORT) ||
12699 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
12701 * User asks to abort a non-existant assoc,
12702 * or EOF a non-existant assoc with no data
12704 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12708 /* get an asoc/stcb struct */
12709 vrf_id = inp->def_vrf_id;
12711 if (create_lock_applied == 0) {
12712 panic("Error, should hold create lock and I don't?");
12715 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12716 inp->sctp_ep.pre_open_stream_count,
12719 if (stcb == NULL) {
12720 /* Error is setup for us in the call */
12723 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
12724 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
12726 * Set the connected flag so we can queue
12729 soisconnecting(so);
12732 if (create_lock_applied) {
12733 SCTP_ASOC_CREATE_UNLOCK(inp);
12734 create_lock_applied = 0;
12736 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12739 * Turn on queue only flag to prevent data from
12743 asoc = &stcb->asoc;
12744 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12745 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12747 /* initialize authentication params for the assoc */
12748 sctp_initialize_auth_params(inp, stcb);
12751 if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
12752 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
12753 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
12759 /* out with the INIT */
12760 queue_only_for_init = 1;
12762 * we may want to dig in after this call and adjust the MTU
12763 * value. It defaulted to 1500 (constant) but the ro
12764 * structure may now have an update and thus we may need to
12765 * change it BEFORE we append the message.
12769 asoc = &stcb->asoc;
12771 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
12772 if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
12774 net = sctp_findnet(stcb, addr);
12777 if ((net == NULL) ||
12778 ((port != 0) && (port != stcb->rport))) {
12779 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12784 if (stcb->asoc.alternate) {
12785 net = stcb->asoc.alternate;
12787 net = stcb->asoc.primary_destination;
12790 atomic_add_int(&stcb->total_sends, 1);
12791 /* Keep the stcb from being freed under our feet */
12792 atomic_add_int(&asoc->refcnt, 1);
12793 free_cnt_applied = 1;
12795 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12796 if (sndlen > asoc->smallest_mtu) {
12797 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12802 if (SCTP_SO_IS_NBIO(so)
12803 || (flags & MSG_NBIO)
12807 /* would we block? */
12808 if (non_blocking) {
12811 if (hold_tcblock == 0) {
12812 SCTP_TCB_LOCK(stcb);
12815 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
12816 if (user_marks_eor == 0) {
12821 if ((SCTP_SB_LIMIT_SND(so) < (amount + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12822 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12823 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12824 if (sndlen > SCTP_SB_LIMIT_SND(so))
12827 error = EWOULDBLOCK;
12830 stcb->asoc.sb_send_resv += sndlen;
12831 SCTP_TCB_UNLOCK(stcb);
12834 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12836 local_soresv = sndlen;
12837 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12838 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12839 error = ECONNRESET;
12842 if (create_lock_applied) {
12843 SCTP_ASOC_CREATE_UNLOCK(inp);
12844 create_lock_applied = 0;
12846 /* Is the stream no. valid? */
12847 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12848 /* Invalid stream number */
12849 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12853 if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
12854 (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
12856 * Can't queue any data while stream reset is underway.
12858 if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
12863 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
12866 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12867 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12870 /* we are now done with all control */
12872 sctp_m_freem(control);
12875 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12876 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12877 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12878 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12879 if (srcv->sinfo_flags & SCTP_ABORT) {
12882 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12883 error = ECONNRESET;
12887 /* Ok, we will attempt a msgsnd :> */
12889 p->td_ru.ru_msgsnd++;
12891 /* Are we aborting? */
12892 if (srcv->sinfo_flags & SCTP_ABORT) {
12894 int tot_demand, tot_out = 0, max_out;
12896 SCTP_STAT_INCR(sctps_sends_with_abort);
12897 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12898 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12899 /* It has to be up before we abort */
12900 /* how big is the user initiated abort? */
12901 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12905 if (hold_tcblock) {
12906 SCTP_TCB_UNLOCK(stcb);
12910 struct mbuf *cntm = NULL;
12912 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
12914 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
12915 tot_out += SCTP_BUF_LEN(cntm);
12919 /* Must fit in a MTU */
12921 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12922 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12924 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12928 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
12931 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12935 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12936 max_out -= sizeof(struct sctp_abort_msg);
12937 if (tot_out > max_out) {
12941 struct sctp_paramhdr *ph;
12943 /* now move forward the data pointer */
12944 ph = mtod(mm, struct sctp_paramhdr *);
12945 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12946 ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
12948 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12950 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12953 * Here if we can't get his data we
12954 * still abort we just don't get to
12955 * send the users note :-0
12962 SCTP_BUF_NEXT(mm) = top;
12966 if (hold_tcblock == 0) {
12967 SCTP_TCB_LOCK(stcb);
12969 atomic_add_int(&stcb->asoc.refcnt, -1);
12970 free_cnt_applied = 0;
12971 /* release this lock, otherwise we hang on ourselves */
12972 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
12973 /* now relock the stcb so everything is sane */
12977 * In this case top is already chained to mm avoid double
12978 * free, since we free it below if top != NULL and driver
12979 * would free it after sending the packet out
12986 /* Calculate the maximum we can send */
12987 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
12988 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12989 if (non_blocking) {
12990 /* we already checked for non-blocking above. */
12993 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12998 if (hold_tcblock) {
12999 SCTP_TCB_UNLOCK(stcb);
13002 if (asoc->strmout == NULL) {
13003 /* huh? software error */
13004 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13008 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13009 if ((user_marks_eor == 0) &&
13010 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13011 /* It will NEVER fit */
13012 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13016 if ((uio == NULL) && user_marks_eor) {
13018 * We do not support eeor mode for
13019 * sending with mbuf chains (like sendfile).
13021 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13025 if (user_marks_eor) {
13026 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13029 * For non-eeor the whole message must fit in
13030 * the socket send buffer.
13032 local_add_more = sndlen;
13035 if (non_blocking) {
13036 goto skip_preblock;
13038 if (((max_len <= local_add_more) &&
13039 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13041 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13042 /* No room right now ! */
13043 SOCKBUF_LOCK(&so->so_snd);
13044 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13045 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13046 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13047 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13048 (unsigned int)SCTP_SB_LIMIT_SND(so),
13051 stcb->asoc.stream_queue_cnt,
13052 stcb->asoc.chunks_on_out_queue,
13053 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13055 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
13058 stcb->block_entry = &be;
13059 error = sbwait(&so->so_snd);
13060 stcb->block_entry = NULL;
13061 if (error || so->so_error || be.error) {
13064 error = so->so_error;
13069 SOCKBUF_UNLOCK(&so->so_snd);
13072 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13073 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13074 asoc, stcb->asoc.total_output_queue_size);
13076 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13077 SOCKBUF_UNLOCK(&so->so_snd);
13080 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13082 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13083 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13087 SOCKBUF_UNLOCK(&so->so_snd);
13090 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13094 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13095 * case NOTE: uio will be null when top/mbuf is passed
13098 if (srcv->sinfo_flags & SCTP_EOF) {
13099 got_all_of_the_send = 1;
13102 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13108 struct sctp_stream_queue_pending *sp;
13109 struct sctp_stream_out *strm;
13112 SCTP_TCB_SEND_LOCK(stcb);
13113 if ((asoc->stream_locked) &&
13114 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13115 SCTP_TCB_SEND_UNLOCK(stcb);
13116 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13120 SCTP_TCB_SEND_UNLOCK(stcb);
13122 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13123 if (strm->last_msg_incomplete == 0) {
13125 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
13129 SCTP_TCB_SEND_LOCK(stcb);
13130 if (sp->msg_is_complete) {
13131 strm->last_msg_incomplete = 0;
13132 asoc->stream_locked = 0;
13135 * Just got locked to this guy in case of an
13138 strm->last_msg_incomplete = 1;
13139 if (stcb->asoc.idata_supported == 0) {
13140 asoc->stream_locked = 1;
13141 asoc->stream_locked_on = srcv->sinfo_stream;
13143 sp->sender_all_done = 0;
13145 sctp_snd_sb_alloc(stcb, sp->length);
13146 atomic_add_int(&asoc->stream_queue_cnt, 1);
13147 if (srcv->sinfo_flags & SCTP_UNORDERED) {
13148 SCTP_STAT_INCR(sctps_sends_with_unord);
13150 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13151 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
13152 SCTP_TCB_SEND_UNLOCK(stcb);
13154 SCTP_TCB_SEND_LOCK(stcb);
13155 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13156 SCTP_TCB_SEND_UNLOCK(stcb);
13158 /* ???? Huh ??? last msg is gone */
13160 panic("Warning: Last msg marked incomplete, yet nothing left?");
13162 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13163 strm->last_msg_incomplete = 0;
13169 while (uio->uio_resid > 0) {
13170 /* How much room do we have? */
13171 struct mbuf *new_tail, *mm;
13173 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13174 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13175 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13179 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13180 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13181 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13184 if (hold_tcblock) {
13185 SCTP_TCB_UNLOCK(stcb);
13188 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
13189 if ((mm == NULL) || error) {
13195 /* Update the mbuf and count */
13196 SCTP_TCB_SEND_LOCK(stcb);
13197 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13199 * we need to get out. Peer probably
13203 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13204 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13205 error = ECONNRESET;
13207 SCTP_TCB_SEND_UNLOCK(stcb);
13210 if (sp->tail_mbuf) {
13211 /* tack it to the end */
13212 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13213 sp->tail_mbuf = new_tail;
13215 /* A stolen mbuf */
13217 sp->tail_mbuf = new_tail;
13219 sctp_snd_sb_alloc(stcb, sndout);
13220 atomic_add_int(&sp->length, sndout);
13222 if (srcv->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
13223 sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
13225 /* Did we reach EOR? */
13226 if ((uio->uio_resid == 0) &&
13227 ((user_marks_eor == 0) ||
13228 (srcv->sinfo_flags & SCTP_EOF) ||
13229 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13230 sp->msg_is_complete = 1;
13232 sp->msg_is_complete = 0;
13234 SCTP_TCB_SEND_UNLOCK(stcb);
13236 if (uio->uio_resid == 0) {
13241 if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
13243 * This is ugly but we must assure locking
13246 if (hold_tcblock == 0) {
13247 SCTP_TCB_LOCK(stcb);
13250 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13251 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13252 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes)
13253 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13259 SCTP_TCB_UNLOCK(stcb);
13262 /* wait for space now */
13263 if (non_blocking) {
13264 /* Non-blocking io in place out */
13267 /* What about the INIT, send it maybe */
13268 if (queue_only_for_init) {
13269 if (hold_tcblock == 0) {
13270 SCTP_TCB_LOCK(stcb);
13273 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13274 /* a collision took us forward? */
13277 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13278 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13282 if ((net->flight_size > net->cwnd) &&
13283 (asoc->sctp_cmt_on_off == 0)) {
13284 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13286 } else if (asoc->ifp_had_enobuf) {
13287 SCTP_STAT_INCR(sctps_ifnomemqueued);
13288 if (net->flight_size > (2 * net->mtu)) {
13291 asoc->ifp_had_enobuf = 0;
13293 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13294 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13295 (stcb->asoc.total_flight > 0) &&
13296 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13297 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13300 * Ok, Nagle is set on and we have data outstanding.
13301 * Don't send anything and let SACKs drive out the
13302 * data unless we have a "full" segment to send.
13304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13305 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13307 SCTP_STAT_INCR(sctps_naglequeued);
13310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13311 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13312 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13314 SCTP_STAT_INCR(sctps_naglesent);
13317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13319 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13320 nagle_applies, un_sent);
13321 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13322 stcb->asoc.total_flight,
13323 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13325 if (queue_only_for_init)
13326 queue_only_for_init = 0;
13327 if ((queue_only == 0) && (nagle_applies == 0)) {
13329 * need to start chunk output
13330 * before blocking.. note that if
13331 * a lock is already applied, then
13332 * the input via the net is happening
13333 * and I don't need to start output :-D
13335 if (hold_tcblock == 0) {
13336 if (SCTP_TCB_TRYLOCK(stcb)) {
13338 sctp_chunk_output(inp,
13340 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13343 sctp_chunk_output(inp,
13345 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13347 if (hold_tcblock == 1) {
13348 SCTP_TCB_UNLOCK(stcb);
13352 SOCKBUF_LOCK(&so->so_snd);
13354 * This is a bit strange, but I think it will
13355 * work. The total_output_queue_size is locked and
13356 * protected by the TCB_LOCK, which we just released.
13357 * There is a race that can occur between releasing it
13358 * above, and me getting the socket lock, where sacks
13359 * come in but we have not put the SB_WAIT on the
13360 * so_snd buffer to get the wakeup. After the LOCK
13361 * is applied the sack_processing will also need to
13362 * LOCK the so->so_snd to do the actual sowwakeup(). So
13363 * once we have the socket buffer lock if we recheck the
13364 * size we KNOW we will get to sleep safely with the
13365 * wakeup flag in place.
13367 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * SCTP_DATA_CHUNK_OVERHEAD(stcb));
13368 if (SCTP_SB_LIMIT_SND(so) <= (inqueue_bytes +
13369 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13371 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13372 asoc, (size_t)uio->uio_resid);
13375 stcb->block_entry = &be;
13376 error = sbwait(&so->so_snd);
13377 stcb->block_entry = NULL;
13379 if (error || so->so_error || be.error) {
13382 error = so->so_error;
13387 SOCKBUF_UNLOCK(&so->so_snd);
13390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13391 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13392 asoc, stcb->asoc.total_output_queue_size);
13395 SOCKBUF_UNLOCK(&so->so_snd);
13396 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13400 SCTP_TCB_SEND_LOCK(stcb);
13401 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13402 SCTP_TCB_SEND_UNLOCK(stcb);
13406 if (sp->msg_is_complete == 0) {
13407 strm->last_msg_incomplete = 1;
13408 if (stcb->asoc.idata_supported == 0) {
13409 asoc->stream_locked = 1;
13410 asoc->stream_locked_on = srcv->sinfo_stream;
13413 sp->sender_all_done = 1;
13414 strm->last_msg_incomplete = 0;
13415 asoc->stream_locked = 0;
13418 SCTP_PRINTF("Huh no sp TSNH?\n");
13419 strm->last_msg_incomplete = 0;
13420 asoc->stream_locked = 0;
13422 SCTP_TCB_SEND_UNLOCK(stcb);
13423 if (uio->uio_resid == 0) {
13424 got_all_of_the_send = 1;
13427 /* We send in a 0, since we do NOT have any locks */
13428 error = sctp_msg_append(stcb, net, top, srcv, 0);
13430 if (srcv->sinfo_flags & SCTP_EOF) {
13432 * This should only happen for Panda for the mbuf
13433 * send case, which does NOT yet support EEOR mode.
13434 * Thus, we can just set this flag to do the proper
13437 got_all_of_the_send = 1;
13445 if ((srcv->sinfo_flags & SCTP_EOF) &&
13446 (got_all_of_the_send == 1)) {
13447 SCTP_STAT_INCR(sctps_sends_with_eof);
13449 if (hold_tcblock == 0) {
13450 SCTP_TCB_LOCK(stcb);
13453 if (TAILQ_EMPTY(&asoc->send_queue) &&
13454 TAILQ_EMPTY(&asoc->sent_queue) &&
13455 sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
13456 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13459 /* there is nothing queued to send, so I'm done... */
13460 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13461 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13462 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13463 struct sctp_nets *netp;
13465 /* only send SHUTDOWN the first time through */
13466 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13467 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13469 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13470 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13471 sctp_stop_timers_for_shutdown(stcb);
13472 if (stcb->asoc.alternate) {
13473 netp = stcb->asoc.alternate;
13475 netp = stcb->asoc.primary_destination;
13477 sctp_send_shutdown(stcb, netp);
13478 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13480 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13481 asoc->primary_destination);
13485 * we still got (or just got) data to send, so set
13489 * XXX sockets draft says that SCTP_EOF should be
13490 * sent with no data. currently, we will allow user
13491 * data to be sent first and move to
13494 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13495 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13496 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13497 if (hold_tcblock == 0) {
13498 SCTP_TCB_LOCK(stcb);
13501 if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc)) {
13502 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13504 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13505 if (TAILQ_EMPTY(&asoc->send_queue) &&
13506 TAILQ_EMPTY(&asoc->sent_queue) &&
13507 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13508 struct mbuf *op_err;
13509 char msg[SCTP_DIAG_INFO_LEN];
13512 if (free_cnt_applied) {
13513 atomic_add_int(&stcb->asoc.refcnt, -1);
13514 free_cnt_applied = 0;
13516 snprintf(msg, sizeof(msg),
13517 "%s:%d at %s", __FILE__, __LINE__, __func__);
13518 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
13520 sctp_abort_an_association(stcb->sctp_ep, stcb,
13521 op_err, SCTP_SO_LOCKED);
13523 * now relock the stcb so everything
13530 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13531 asoc->primary_destination);
13532 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13537 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13538 some_on_control = 1;
13540 if (queue_only_for_init) {
13541 if (hold_tcblock == 0) {
13542 SCTP_TCB_LOCK(stcb);
13545 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13546 /* a collision took us forward? */
13549 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13550 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13554 if ((net->flight_size > net->cwnd) &&
13555 (stcb->asoc.sctp_cmt_on_off == 0)) {
13556 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13558 } else if (asoc->ifp_had_enobuf) {
13559 SCTP_STAT_INCR(sctps_ifnomemqueued);
13560 if (net->flight_size > (2 * net->mtu)) {
13563 asoc->ifp_had_enobuf = 0;
13565 un_sent = stcb->asoc.total_output_queue_size - stcb->asoc.total_flight;
13566 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13567 (stcb->asoc.total_flight > 0) &&
13568 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13569 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13571 * Ok, Nagle is set on and we have data outstanding.
13572 * Don't send anything and let SACKs drive out the
13573 * data unless wen have a "full" segment to send.
13575 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13576 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13578 SCTP_STAT_INCR(sctps_naglequeued);
13581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13582 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13583 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13585 SCTP_STAT_INCR(sctps_naglesent);
13588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13589 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13590 nagle_applies, un_sent);
13591 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13592 stcb->asoc.total_flight,
13593 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13595 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13596 /* we can attempt to send too. */
13597 if (hold_tcblock == 0) {
13599 * If there is activity recv'ing sacks no need to
13602 if (SCTP_TCB_TRYLOCK(stcb)) {
13603 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13607 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13609 } else if ((queue_only == 0) &&
13610 (stcb->asoc.peers_rwnd == 0) &&
13611 (stcb->asoc.total_flight == 0)) {
13612 /* We get to have a probe outstanding */
13613 if (hold_tcblock == 0) {
13615 SCTP_TCB_LOCK(stcb);
13617 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13618 } else if (some_on_control) {
13619 int num_out, reason, frag_point;
13621 /* Here we do control only */
13622 if (hold_tcblock == 0) {
13624 SCTP_TCB_LOCK(stcb);
13626 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13627 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13628 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13630 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13631 queue_only, stcb->asoc.peers_rwnd, un_sent,
13632 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13633 stcb->asoc.total_output_queue_size, error);
13638 if (local_soresv && stcb) {
13639 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13641 if (create_lock_applied) {
13642 SCTP_ASOC_CREATE_UNLOCK(inp);
13644 if ((stcb) && hold_tcblock) {
13645 SCTP_TCB_UNLOCK(stcb);
13647 if (stcb && free_cnt_applied) {
13648 atomic_add_int(&stcb->asoc.refcnt, -1);
13652 if (mtx_owned(&stcb->tcb_mtx)) {
13653 panic("Leaving with tcb mtx owned?");
13655 if (mtx_owned(&stcb->tcb_send_mtx)) {
13656 panic("Leaving with tcb send mtx owned?");
13664 sctp_m_freem(control);
13671 * generate an AUTHentication chunk, if required
13674 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13675 struct sctp_auth_chunk **auth_ret, uint32_t *offset,
13676 struct sctp_tcb *stcb, uint8_t chunk)
13678 struct mbuf *m_auth;
13679 struct sctp_auth_chunk *auth;
13683 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13687 if (stcb->asoc.auth_supported == 0) {
13690 /* does the requested chunk require auth? */
13691 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13694 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
13695 if (m_auth == NULL) {
13699 /* reserve some space if this will be the first mbuf */
13701 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13702 /* fill in the AUTH chunk details */
13703 auth = mtod(m_auth, struct sctp_auth_chunk *);
13704 memset(auth, 0, sizeof(*auth));
13705 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13706 auth->ch.chunk_flags = 0;
13707 chunk_len = sizeof(*auth) +
13708 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13709 auth->ch.chunk_length = htons(chunk_len);
13710 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13711 /* key id and hmac digest will be computed and filled in upon send */
13713 /* save the offset where the auth was inserted into the chain */
13715 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
13716 *offset += SCTP_BUF_LEN(cn);
13719 /* update length and return pointer to the auth chunk */
13720 SCTP_BUF_LEN(m_auth) = chunk_len;
13721 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13722 if (auth_ret != NULL)
13730 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
13732 struct nd_prefix *pfx = NULL;
13733 struct nd_pfxrouter *pfxrtr = NULL;
13734 struct sockaddr_in6 gw6;
13736 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13739 /* get prefix entry of address */
13741 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13742 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13744 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13745 &src6->sin6_addr, &pfx->ndpr_mask))
13748 /* no prefix entry in the prefix list */
13751 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13752 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13755 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13756 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13758 /* search installed gateway from prefix entry */
13759 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
13760 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13761 gw6.sin6_family = AF_INET6;
13762 gw6.sin6_len = sizeof(struct sockaddr_in6);
13763 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13764 sizeof(struct in6_addr));
13765 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13766 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13767 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13768 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13769 if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
13771 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13777 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13783 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
13786 struct sockaddr_in *sin, *mask;
13787 struct ifaddr *ifa;
13788 struct in_addr srcnetaddr, gwnetaddr;
13790 if (ro == NULL || ro->ro_rt == NULL ||
13791 sifa->address.sa.sa_family != AF_INET) {
13794 ifa = (struct ifaddr *)sifa->ifa;
13795 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13796 sin = &sifa->address.sin;
13797 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13798 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13799 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13800 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13802 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13803 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13804 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13805 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13806 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13807 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {