2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/udp.h>
53 #include <machine/in_cksum.h>
57 #define SCTP_MAX_GAPS_INARRAY 4
59 uint8_t right_edge; /* mergable on the right edge */
60 uint8_t left_edge; /* mergable on the left edge */
63 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
66 struct sack_track sack_array[256] = {
67 {0, 0, 0, 0, /* 0x00 */
74 {1, 0, 1, 0, /* 0x01 */
81 {0, 0, 1, 0, /* 0x02 */
88 {1, 0, 1, 0, /* 0x03 */
95 {0, 0, 1, 0, /* 0x04 */
102 {1, 0, 2, 0, /* 0x05 */
109 {0, 0, 1, 0, /* 0x06 */
116 {1, 0, 1, 0, /* 0x07 */
123 {0, 0, 1, 0, /* 0x08 */
130 {1, 0, 2, 0, /* 0x09 */
137 {0, 0, 2, 0, /* 0x0a */
144 {1, 0, 2, 0, /* 0x0b */
151 {0, 0, 1, 0, /* 0x0c */
158 {1, 0, 2, 0, /* 0x0d */
165 {0, 0, 1, 0, /* 0x0e */
172 {1, 0, 1, 0, /* 0x0f */
179 {0, 0, 1, 0, /* 0x10 */
186 {1, 0, 2, 0, /* 0x11 */
193 {0, 0, 2, 0, /* 0x12 */
200 {1, 0, 2, 0, /* 0x13 */
207 {0, 0, 2, 0, /* 0x14 */
214 {1, 0, 3, 0, /* 0x15 */
221 {0, 0, 2, 0, /* 0x16 */
228 {1, 0, 2, 0, /* 0x17 */
235 {0, 0, 1, 0, /* 0x18 */
242 {1, 0, 2, 0, /* 0x19 */
249 {0, 0, 2, 0, /* 0x1a */
256 {1, 0, 2, 0, /* 0x1b */
263 {0, 0, 1, 0, /* 0x1c */
270 {1, 0, 2, 0, /* 0x1d */
277 {0, 0, 1, 0, /* 0x1e */
284 {1, 0, 1, 0, /* 0x1f */
291 {0, 0, 1, 0, /* 0x20 */
298 {1, 0, 2, 0, /* 0x21 */
305 {0, 0, 2, 0, /* 0x22 */
312 {1, 0, 2, 0, /* 0x23 */
319 {0, 0, 2, 0, /* 0x24 */
326 {1, 0, 3, 0, /* 0x25 */
333 {0, 0, 2, 0, /* 0x26 */
340 {1, 0, 2, 0, /* 0x27 */
347 {0, 0, 2, 0, /* 0x28 */
354 {1, 0, 3, 0, /* 0x29 */
361 {0, 0, 3, 0, /* 0x2a */
368 {1, 0, 3, 0, /* 0x2b */
375 {0, 0, 2, 0, /* 0x2c */
382 {1, 0, 3, 0, /* 0x2d */
389 {0, 0, 2, 0, /* 0x2e */
396 {1, 0, 2, 0, /* 0x2f */
403 {0, 0, 1, 0, /* 0x30 */
410 {1, 0, 2, 0, /* 0x31 */
417 {0, 0, 2, 0, /* 0x32 */
424 {1, 0, 2, 0, /* 0x33 */
431 {0, 0, 2, 0, /* 0x34 */
438 {1, 0, 3, 0, /* 0x35 */
445 {0, 0, 2, 0, /* 0x36 */
452 {1, 0, 2, 0, /* 0x37 */
459 {0, 0, 1, 0, /* 0x38 */
466 {1, 0, 2, 0, /* 0x39 */
473 {0, 0, 2, 0, /* 0x3a */
480 {1, 0, 2, 0, /* 0x3b */
487 {0, 0, 1, 0, /* 0x3c */
494 {1, 0, 2, 0, /* 0x3d */
501 {0, 0, 1, 0, /* 0x3e */
508 {1, 0, 1, 0, /* 0x3f */
515 {0, 0, 1, 0, /* 0x40 */
522 {1, 0, 2, 0, /* 0x41 */
529 {0, 0, 2, 0, /* 0x42 */
536 {1, 0, 2, 0, /* 0x43 */
543 {0, 0, 2, 0, /* 0x44 */
550 {1, 0, 3, 0, /* 0x45 */
557 {0, 0, 2, 0, /* 0x46 */
564 {1, 0, 2, 0, /* 0x47 */
571 {0, 0, 2, 0, /* 0x48 */
578 {1, 0, 3, 0, /* 0x49 */
585 {0, 0, 3, 0, /* 0x4a */
592 {1, 0, 3, 0, /* 0x4b */
599 {0, 0, 2, 0, /* 0x4c */
606 {1, 0, 3, 0, /* 0x4d */
613 {0, 0, 2, 0, /* 0x4e */
620 {1, 0, 2, 0, /* 0x4f */
627 {0, 0, 2, 0, /* 0x50 */
634 {1, 0, 3, 0, /* 0x51 */
641 {0, 0, 3, 0, /* 0x52 */
648 {1, 0, 3, 0, /* 0x53 */
655 {0, 0, 3, 0, /* 0x54 */
662 {1, 0, 4, 0, /* 0x55 */
669 {0, 0, 3, 0, /* 0x56 */
676 {1, 0, 3, 0, /* 0x57 */
683 {0, 0, 2, 0, /* 0x58 */
690 {1, 0, 3, 0, /* 0x59 */
697 {0, 0, 3, 0, /* 0x5a */
704 {1, 0, 3, 0, /* 0x5b */
711 {0, 0, 2, 0, /* 0x5c */
718 {1, 0, 3, 0, /* 0x5d */
725 {0, 0, 2, 0, /* 0x5e */
732 {1, 0, 2, 0, /* 0x5f */
739 {0, 0, 1, 0, /* 0x60 */
746 {1, 0, 2, 0, /* 0x61 */
753 {0, 0, 2, 0, /* 0x62 */
760 {1, 0, 2, 0, /* 0x63 */
767 {0, 0, 2, 0, /* 0x64 */
774 {1, 0, 3, 0, /* 0x65 */
781 {0, 0, 2, 0, /* 0x66 */
788 {1, 0, 2, 0, /* 0x67 */
795 {0, 0, 2, 0, /* 0x68 */
802 {1, 0, 3, 0, /* 0x69 */
809 {0, 0, 3, 0, /* 0x6a */
816 {1, 0, 3, 0, /* 0x6b */
823 {0, 0, 2, 0, /* 0x6c */
830 {1, 0, 3, 0, /* 0x6d */
837 {0, 0, 2, 0, /* 0x6e */
844 {1, 0, 2, 0, /* 0x6f */
851 {0, 0, 1, 0, /* 0x70 */
858 {1, 0, 2, 0, /* 0x71 */
865 {0, 0, 2, 0, /* 0x72 */
872 {1, 0, 2, 0, /* 0x73 */
879 {0, 0, 2, 0, /* 0x74 */
886 {1, 0, 3, 0, /* 0x75 */
893 {0, 0, 2, 0, /* 0x76 */
900 {1, 0, 2, 0, /* 0x77 */
907 {0, 0, 1, 0, /* 0x78 */
914 {1, 0, 2, 0, /* 0x79 */
921 {0, 0, 2, 0, /* 0x7a */
928 {1, 0, 2, 0, /* 0x7b */
935 {0, 0, 1, 0, /* 0x7c */
942 {1, 0, 2, 0, /* 0x7d */
949 {0, 0, 1, 0, /* 0x7e */
956 {1, 0, 1, 0, /* 0x7f */
963 {0, 1, 1, 0, /* 0x80 */
970 {1, 1, 2, 0, /* 0x81 */
977 {0, 1, 2, 0, /* 0x82 */
984 {1, 1, 2, 0, /* 0x83 */
991 {0, 1, 2, 0, /* 0x84 */
998 {1, 1, 3, 0, /* 0x85 */
1005 {0, 1, 2, 0, /* 0x86 */
1012 {1, 1, 2, 0, /* 0x87 */
1019 {0, 1, 2, 0, /* 0x88 */
1026 {1, 1, 3, 0, /* 0x89 */
1033 {0, 1, 3, 0, /* 0x8a */
1040 {1, 1, 3, 0, /* 0x8b */
1047 {0, 1, 2, 0, /* 0x8c */
1054 {1, 1, 3, 0, /* 0x8d */
1061 {0, 1, 2, 0, /* 0x8e */
1068 {1, 1, 2, 0, /* 0x8f */
1075 {0, 1, 2, 0, /* 0x90 */
1082 {1, 1, 3, 0, /* 0x91 */
1089 {0, 1, 3, 0, /* 0x92 */
1096 {1, 1, 3, 0, /* 0x93 */
1103 {0, 1, 3, 0, /* 0x94 */
1110 {1, 1, 4, 0, /* 0x95 */
1117 {0, 1, 3, 0, /* 0x96 */
1124 {1, 1, 3, 0, /* 0x97 */
1131 {0, 1, 2, 0, /* 0x98 */
1138 {1, 1, 3, 0, /* 0x99 */
1145 {0, 1, 3, 0, /* 0x9a */
1152 {1, 1, 3, 0, /* 0x9b */
1159 {0, 1, 2, 0, /* 0x9c */
1166 {1, 1, 3, 0, /* 0x9d */
1173 {0, 1, 2, 0, /* 0x9e */
1180 {1, 1, 2, 0, /* 0x9f */
1187 {0, 1, 2, 0, /* 0xa0 */
1194 {1, 1, 3, 0, /* 0xa1 */
1201 {0, 1, 3, 0, /* 0xa2 */
1208 {1, 1, 3, 0, /* 0xa3 */
1215 {0, 1, 3, 0, /* 0xa4 */
1222 {1, 1, 4, 0, /* 0xa5 */
1229 {0, 1, 3, 0, /* 0xa6 */
1236 {1, 1, 3, 0, /* 0xa7 */
1243 {0, 1, 3, 0, /* 0xa8 */
1250 {1, 1, 4, 0, /* 0xa9 */
1257 {0, 1, 4, 0, /* 0xaa */
1264 {1, 1, 4, 0, /* 0xab */
1271 {0, 1, 3, 0, /* 0xac */
1278 {1, 1, 4, 0, /* 0xad */
1285 {0, 1, 3, 0, /* 0xae */
1292 {1, 1, 3, 0, /* 0xaf */
1299 {0, 1, 2, 0, /* 0xb0 */
1306 {1, 1, 3, 0, /* 0xb1 */
1313 {0, 1, 3, 0, /* 0xb2 */
1320 {1, 1, 3, 0, /* 0xb3 */
1327 {0, 1, 3, 0, /* 0xb4 */
1334 {1, 1, 4, 0, /* 0xb5 */
1341 {0, 1, 3, 0, /* 0xb6 */
1348 {1, 1, 3, 0, /* 0xb7 */
1355 {0, 1, 2, 0, /* 0xb8 */
1362 {1, 1, 3, 0, /* 0xb9 */
1369 {0, 1, 3, 0, /* 0xba */
1376 {1, 1, 3, 0, /* 0xbb */
1383 {0, 1, 2, 0, /* 0xbc */
1390 {1, 1, 3, 0, /* 0xbd */
1397 {0, 1, 2, 0, /* 0xbe */
1404 {1, 1, 2, 0, /* 0xbf */
1411 {0, 1, 1, 0, /* 0xc0 */
1418 {1, 1, 2, 0, /* 0xc1 */
1425 {0, 1, 2, 0, /* 0xc2 */
1432 {1, 1, 2, 0, /* 0xc3 */
1439 {0, 1, 2, 0, /* 0xc4 */
1446 {1, 1, 3, 0, /* 0xc5 */
1453 {0, 1, 2, 0, /* 0xc6 */
1460 {1, 1, 2, 0, /* 0xc7 */
1467 {0, 1, 2, 0, /* 0xc8 */
1474 {1, 1, 3, 0, /* 0xc9 */
1481 {0, 1, 3, 0, /* 0xca */
1488 {1, 1, 3, 0, /* 0xcb */
1495 {0, 1, 2, 0, /* 0xcc */
1502 {1, 1, 3, 0, /* 0xcd */
1509 {0, 1, 2, 0, /* 0xce */
1516 {1, 1, 2, 0, /* 0xcf */
1523 {0, 1, 2, 0, /* 0xd0 */
1530 {1, 1, 3, 0, /* 0xd1 */
1537 {0, 1, 3, 0, /* 0xd2 */
1544 {1, 1, 3, 0, /* 0xd3 */
1551 {0, 1, 3, 0, /* 0xd4 */
1558 {1, 1, 4, 0, /* 0xd5 */
1565 {0, 1, 3, 0, /* 0xd6 */
1572 {1, 1, 3, 0, /* 0xd7 */
1579 {0, 1, 2, 0, /* 0xd8 */
1586 {1, 1, 3, 0, /* 0xd9 */
1593 {0, 1, 3, 0, /* 0xda */
1600 {1, 1, 3, 0, /* 0xdb */
1607 {0, 1, 2, 0, /* 0xdc */
1614 {1, 1, 3, 0, /* 0xdd */
1621 {0, 1, 2, 0, /* 0xde */
1628 {1, 1, 2, 0, /* 0xdf */
1635 {0, 1, 1, 0, /* 0xe0 */
1642 {1, 1, 2, 0, /* 0xe1 */
1649 {0, 1, 2, 0, /* 0xe2 */
1656 {1, 1, 2, 0, /* 0xe3 */
1663 {0, 1, 2, 0, /* 0xe4 */
1670 {1, 1, 3, 0, /* 0xe5 */
1677 {0, 1, 2, 0, /* 0xe6 */
1684 {1, 1, 2, 0, /* 0xe7 */
1691 {0, 1, 2, 0, /* 0xe8 */
1698 {1, 1, 3, 0, /* 0xe9 */
1705 {0, 1, 3, 0, /* 0xea */
1712 {1, 1, 3, 0, /* 0xeb */
1719 {0, 1, 2, 0, /* 0xec */
1726 {1, 1, 3, 0, /* 0xed */
1733 {0, 1, 2, 0, /* 0xee */
1740 {1, 1, 2, 0, /* 0xef */
1747 {0, 1, 1, 0, /* 0xf0 */
1754 {1, 1, 2, 0, /* 0xf1 */
1761 {0, 1, 2, 0, /* 0xf2 */
1768 {1, 1, 2, 0, /* 0xf3 */
1775 {0, 1, 2, 0, /* 0xf4 */
1782 {1, 1, 3, 0, /* 0xf5 */
1789 {0, 1, 2, 0, /* 0xf6 */
1796 {1, 1, 2, 0, /* 0xf7 */
1803 {0, 1, 1, 0, /* 0xf8 */
1810 {1, 1, 2, 0, /* 0xf9 */
1817 {0, 1, 2, 0, /* 0xfa */
1824 {1, 1, 2, 0, /* 0xfb */
1831 {0, 1, 1, 0, /* 0xfc */
1838 {1, 1, 2, 0, /* 0xfd */
1845 {0, 1, 1, 0, /* 0xfe */
1852 {1, 1, 1, 0, /* 0xff */
1863 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1864 int ipv4_addr_legal,
1865 int ipv6_addr_legal,
1867 int ipv4_local_scope,
1872 if ((loopback_scope == 0) &&
1873 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1875 * skip loopback if not in scope *
1879 switch (ifa->address.sa.sa_family) {
1881 if (ipv4_addr_legal) {
1882 struct sockaddr_in *sin;
1884 sin = (struct sockaddr_in *)&ifa->address.sin;
1885 if (sin->sin_addr.s_addr == 0) {
1886 /* not in scope , unspecified */
1889 if ((ipv4_local_scope == 0) &&
1890 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1891 /* private address not in scope */
1900 if (ipv6_addr_legal) {
1901 struct sockaddr_in6 *sin6;
1904 * Must update the flags, bummer, which means any
1905 * IFA locks must now be applied HERE <->
1908 sctp_gather_internal_ifa_flags(ifa);
1910 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1913 /* ok to use deprecated addresses? */
1914 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1915 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1916 /* skip unspecifed addresses */
1919 if ( /* (local_scope == 0) && */
1920 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1923 if ((site_scope == 0) &&
1924 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1938 static struct mbuf *
1939 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1941 struct sctp_paramhdr *parmh;
1945 if (ifa->address.sa.sa_family == AF_INET) {
1946 len = sizeof(struct sctp_ipv4addr_param);
1947 } else if (ifa->address.sa.sa_family == AF_INET6) {
1948 len = sizeof(struct sctp_ipv6addr_param);
1953 if (M_TRAILINGSPACE(m) >= len) {
1954 /* easy side we just drop it on the end */
1955 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1958 /* Need more space */
1960 while (SCTP_BUF_NEXT(mret) != NULL) {
1961 mret = SCTP_BUF_NEXT(mret);
1963 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1964 if (SCTP_BUF_NEXT(mret) == NULL) {
1965 /* We are hosed, can't add more addresses */
1968 mret = SCTP_BUF_NEXT(mret);
1969 parmh = mtod(mret, struct sctp_paramhdr *);
1971 /* now add the parameter */
1972 switch (ifa->address.sa.sa_family) {
1975 struct sctp_ipv4addr_param *ipv4p;
1976 struct sockaddr_in *sin;
1978 sin = (struct sockaddr_in *)&ifa->address.sin;
1979 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1980 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1981 parmh->param_length = htons(len);
1982 ipv4p->addr = sin->sin_addr.s_addr;
1983 SCTP_BUF_LEN(mret) += len;
1989 struct sctp_ipv6addr_param *ipv6p;
1990 struct sockaddr_in6 *sin6;
1992 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1993 ipv6p = (struct sctp_ipv6addr_param *)parmh;
1994 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
1995 parmh->param_length = htons(len);
1996 memcpy(ipv6p->addr, &sin6->sin6_addr,
1997 sizeof(ipv6p->addr));
1998 /* clear embedded scope in the address */
1999 in6_clearscope((struct in6_addr *)ipv6p->addr);
2000 SCTP_BUF_LEN(mret) += len;
2012 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
2013 struct mbuf *m_at, int cnt_inits_to)
2015 struct sctp_vrf *vrf = NULL;
2016 int cnt, limit_out = 0, total_count;
2019 vrf_id = inp->def_vrf_id;
2020 SCTP_IPI_ADDR_RLOCK();
2021 vrf = sctp_find_vrf(vrf_id);
2023 SCTP_IPI_ADDR_RUNLOCK();
2026 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2027 struct sctp_ifa *sctp_ifap;
2028 struct sctp_ifn *sctp_ifnp;
2031 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2033 cnt = SCTP_ADDRESS_LIMIT;
2036 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2037 if ((scope->loopback_scope == 0) &&
2038 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2040 * Skip loopback devices if loopback_scope
2045 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2046 if (sctp_is_address_in_scope(sctp_ifap,
2047 scope->ipv4_addr_legal,
2048 scope->ipv6_addr_legal,
2049 scope->loopback_scope,
2050 scope->ipv4_local_scope,
2052 scope->site_scope, 1) == 0) {
2056 if (cnt > SCTP_ADDRESS_LIMIT) {
2060 if (cnt > SCTP_ADDRESS_LIMIT) {
2067 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2069 if ((scope->loopback_scope == 0) &&
2070 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2072 * Skip loopback devices if
2073 * loopback_scope not set
2077 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2078 if (sctp_is_address_in_scope(sctp_ifap,
2079 scope->ipv4_addr_legal,
2080 scope->ipv6_addr_legal,
2081 scope->loopback_scope,
2082 scope->ipv4_local_scope,
2084 scope->site_scope, 0) == 0) {
2087 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2098 if (total_count > SCTP_ADDRESS_LIMIT) {
2099 /* No more addresses */
2107 struct sctp_laddr *laddr;
2110 /* First, how many ? */
2111 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2112 if (laddr->ifa == NULL) {
2115 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2117 * Address being deleted by the system, dont
2121 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2123 * Address being deleted on this ep don't
2128 if (sctp_is_address_in_scope(laddr->ifa,
2129 scope->ipv4_addr_legal,
2130 scope->ipv6_addr_legal,
2131 scope->loopback_scope,
2132 scope->ipv4_local_scope,
2134 scope->site_scope, 1) == 0) {
2139 if (cnt > SCTP_ADDRESS_LIMIT) {
2143 * To get through a NAT we only list addresses if we have
2144 * more than one. That way if you just bind a single address
2145 * we let the source of the init dictate our address.
2148 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2150 if (laddr->ifa == NULL) {
2153 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2156 if (sctp_is_address_in_scope(laddr->ifa,
2157 scope->ipv4_addr_legal,
2158 scope->ipv6_addr_legal,
2159 scope->loopback_scope,
2160 scope->ipv4_local_scope,
2162 scope->site_scope, 0) == 0) {
2165 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2167 if (cnt >= SCTP_ADDRESS_LIMIT) {
2173 SCTP_IPI_ADDR_RUNLOCK();
2177 static struct sctp_ifa *
2178 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2179 uint8_t dest_is_loop,
2180 uint8_t dest_is_priv,
2183 uint8_t dest_is_global = 0;
2185 /* dest_is_priv is true if destination is a private address */
2186 /* dest_is_loop is true if destination is a loopback addresses */
2189 * Here we determine if its a preferred address. A preferred address
2190 * means it is the same scope or higher scope then the destination.
2191 * L = loopback, P = private, G = global
2192 * ----------------------------------------- src | dest | result
2193 * ---------------------------------------- L | L | yes
2194 * ----------------------------------------- P | L |
2195 * yes-v4 no-v6 ----------------------------------------- G |
2196 * L | yes-v4 no-v6 ----------------------------------------- L
2197 * | P | no ----------------------------------------- P |
2198 * P | yes ----------------------------------------- G |
2199 * P | no ----------------------------------------- L | G
2200 * | no ----------------------------------------- P | G |
2201 * no ----------------------------------------- G | G |
2202 * yes -----------------------------------------
2205 if (ifa->address.sa.sa_family != fam) {
2206 /* forget mis-matched family */
2209 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2212 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2213 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2214 /* Ok the address may be ok */
2215 if (fam == AF_INET6) {
2216 /* ok to use deprecated addresses? no lets not! */
2217 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2218 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2221 if (ifa->src_is_priv && !ifa->src_is_loop) {
2223 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2227 if (ifa->src_is_glob) {
2229 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2235 * Now that we know what is what, implement or table this could in
2236 * theory be done slicker (it used to be), but this is
2237 * straightforward and easier to validate :-)
2239 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2240 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2241 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2242 dest_is_loop, dest_is_priv, dest_is_global);
2244 if ((ifa->src_is_loop) && (dest_is_priv)) {
2245 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2248 if ((ifa->src_is_glob) && (dest_is_priv)) {
2249 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2252 if ((ifa->src_is_loop) && (dest_is_global)) {
2253 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2256 if ((ifa->src_is_priv) && (dest_is_global)) {
2257 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2260 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2261 /* its a preferred address */
2265 static struct sctp_ifa *
2266 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2267 uint8_t dest_is_loop,
2268 uint8_t dest_is_priv,
2271 uint8_t dest_is_global = 0;
2275 * Here we determine if its a acceptable address. A acceptable
2276 * address means it is the same scope or higher scope but we can
2277 * allow for NAT which means its ok to have a global dest and a
2280 * L = loopback, P = private, G = global
2281 * ----------------------------------------- src | dest | result
2282 * ----------------------------------------- L | L | yes
2283 * ----------------------------------------- P | L |
2284 * yes-v4 no-v6 ----------------------------------------- G |
2285 * L | yes ----------------------------------------- L |
2286 * P | no ----------------------------------------- P | P
2287 * | yes ----------------------------------------- G | P
2288 * | yes - May not work -----------------------------------------
2289 * L | G | no ----------------------------------------- P
2290 * | G | yes - May not work
2291 * ----------------------------------------- G | G | yes
2292 * -----------------------------------------
2295 if (ifa->address.sa.sa_family != fam) {
2296 /* forget non matching family */
2299 /* Ok the address may be ok */
2300 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2303 if (fam == AF_INET6) {
2304 /* ok to use deprecated addresses? */
2305 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2308 if (ifa->src_is_priv) {
2309 /* Special case, linklocal to loop */
2315 * Now that we know what is what, implement our table. This could in
2316 * theory be done slicker (it used to be), but this is
2317 * straightforward and easier to validate :-)
2319 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2322 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2325 /* its an acceptable address */
2330 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2332 struct sctp_laddr *laddr;
2335 /* There are no restrictions, no TCB :-) */
2338 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2339 if (laddr->ifa == NULL) {
2340 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2344 if (laddr->ifa == ifa) {
2345 /* Yes it is on the list */
2354 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2356 struct sctp_laddr *laddr;
2360 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2361 if (laddr->ifa == NULL) {
2362 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2366 if ((laddr->ifa == ifa) && laddr->action == 0)
2375 static struct sctp_ifa *
2376 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2379 int non_asoc_addr_ok,
2380 uint8_t dest_is_priv,
2381 uint8_t dest_is_loop,
2384 struct sctp_laddr *laddr, *starting_point;
2387 struct sctp_ifn *sctp_ifn;
2388 struct sctp_ifa *sctp_ifa, *sifa;
2389 struct sctp_vrf *vrf;
2392 vrf = sctp_find_vrf(vrf_id);
2396 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2397 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2398 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2400 * first question, is the ifn we will emit on in our list, if so, we
2401 * want such an address. Note that we first looked for a preferred
2405 /* is a preferred one on the interface we route out? */
2406 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2407 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2408 (non_asoc_addr_ok == 0))
2410 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2415 if (sctp_is_addr_in_ep(inp, sifa)) {
2416 atomic_add_int(&sifa->refcount, 1);
2422 * ok, now we now need to find one on the list of the addresses. We
2423 * can't get one on the emitting interface so let's find first a
2424 * preferred one. If not that an acceptable one otherwise... we
2427 starting_point = inp->next_addr_touse;
2429 if (inp->next_addr_touse == NULL) {
2430 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2433 for (laddr = inp->next_addr_touse; laddr;
2434 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2435 if (laddr->ifa == NULL) {
2436 /* address has been removed */
2439 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2440 /* address is being deleted */
2443 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2447 atomic_add_int(&sifa->refcount, 1);
2450 if (resettotop == 0) {
2451 inp->next_addr_touse = NULL;
2454 inp->next_addr_touse = starting_point;
2457 if (inp->next_addr_touse == NULL) {
2458 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2461 /* ok, what about an acceptable address in the inp */
2462 for (laddr = inp->next_addr_touse; laddr;
2463 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2464 if (laddr->ifa == NULL) {
2465 /* address has been removed */
2468 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2469 /* address is being deleted */
2472 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2476 atomic_add_int(&sifa->refcount, 1);
2479 if (resettotop == 0) {
2480 inp->next_addr_touse = NULL;
2481 goto once_again_too;
2484 * no address bound can be a source for the destination we are in
2492 static struct sctp_ifa *
2493 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2494 struct sctp_tcb *stcb,
2495 struct sctp_nets *net,
2498 uint8_t dest_is_priv,
2499 uint8_t dest_is_loop,
2500 int non_asoc_addr_ok,
2503 struct sctp_laddr *laddr, *starting_point;
2505 struct sctp_ifn *sctp_ifn;
2506 struct sctp_ifa *sctp_ifa, *sifa;
2507 uint8_t start_at_beginning = 0;
2508 struct sctp_vrf *vrf;
2512 * first question, is the ifn we will emit on in our list, if so, we
2515 vrf = sctp_find_vrf(vrf_id);
2519 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2520 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2521 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2524 * first question, is the ifn we will emit on in our list? If so,
2525 * we want that one. First we look for a preferred. Second, we go
2526 * for an acceptable.
2529 /* first try for a preferred address on the ep */
2530 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2531 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2533 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2534 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2537 if (((non_asoc_addr_ok == 0) &&
2538 (sctp_is_addr_restricted(stcb, sifa))) ||
2539 (non_asoc_addr_ok &&
2540 (sctp_is_addr_restricted(stcb, sifa)) &&
2541 (!sctp_is_addr_pending(stcb, sifa)))) {
2542 /* on the no-no list */
2545 atomic_add_int(&sifa->refcount, 1);
2549 /* next try for an acceptable address on the ep */
2550 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2551 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2553 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2554 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2557 if (((non_asoc_addr_ok == 0) &&
2558 (sctp_is_addr_restricted(stcb, sifa))) ||
2559 (non_asoc_addr_ok &&
2560 (sctp_is_addr_restricted(stcb, sifa)) &&
2561 (!sctp_is_addr_pending(stcb, sifa)))) {
2562 /* on the no-no list */
2565 atomic_add_int(&sifa->refcount, 1);
2572 * if we can't find one like that then we must look at all addresses
2573 * bound to pick one at first preferable then secondly acceptable.
2575 starting_point = stcb->asoc.last_used_address;
2577 if (stcb->asoc.last_used_address == NULL) {
2578 start_at_beginning = 1;
2579 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2581 /* search beginning with the last used address */
2582 for (laddr = stcb->asoc.last_used_address; laddr;
2583 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2584 if (laddr->ifa == NULL) {
2585 /* address has been removed */
2588 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2589 /* address is being deleted */
2592 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2595 if (((non_asoc_addr_ok == 0) &&
2596 (sctp_is_addr_restricted(stcb, sifa))) ||
2597 (non_asoc_addr_ok &&
2598 (sctp_is_addr_restricted(stcb, sifa)) &&
2599 (!sctp_is_addr_pending(stcb, sifa)))) {
2600 /* on the no-no list */
2603 stcb->asoc.last_used_address = laddr;
2604 atomic_add_int(&sifa->refcount, 1);
2607 if (start_at_beginning == 0) {
2608 stcb->asoc.last_used_address = NULL;
2609 goto sctp_from_the_top;
2611 /* now try for any higher scope than the destination */
2612 stcb->asoc.last_used_address = starting_point;
2613 start_at_beginning = 0;
2615 if (stcb->asoc.last_used_address == NULL) {
2616 start_at_beginning = 1;
2617 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2619 /* search beginning with the last used address */
2620 for (laddr = stcb->asoc.last_used_address; laddr;
2621 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2622 if (laddr->ifa == NULL) {
2623 /* address has been removed */
2626 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2627 /* address is being deleted */
2630 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2634 if (((non_asoc_addr_ok == 0) &&
2635 (sctp_is_addr_restricted(stcb, sifa))) ||
2636 (non_asoc_addr_ok &&
2637 (sctp_is_addr_restricted(stcb, sifa)) &&
2638 (!sctp_is_addr_pending(stcb, sifa)))) {
2639 /* on the no-no list */
2642 stcb->asoc.last_used_address = laddr;
2643 atomic_add_int(&sifa->refcount, 1);
2646 if (start_at_beginning == 0) {
2647 stcb->asoc.last_used_address = NULL;
2648 goto sctp_from_the_top2;
2653 static struct sctp_ifa *
2654 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2655 struct sctp_tcb *stcb,
2656 int non_asoc_addr_ok,
2657 uint8_t dest_is_loop,
2658 uint8_t dest_is_priv,
2664 struct sctp_ifa *ifa, *sifa;
2665 int num_eligible_addr = 0;
2668 struct sockaddr_in6 sin6, lsa6;
2670 if (fam == AF_INET6) {
2671 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2672 (void)sa6_recoverscope(&sin6);
2675 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2676 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2677 (non_asoc_addr_ok == 0))
2679 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2684 if (fam == AF_INET6 &&
2686 sifa->src_is_loop && sifa->src_is_priv) {
2688 * don't allow fe80::1 to be a src on loop ::1, we
2689 * don't list it to the peer so we will get an
2694 if (fam == AF_INET6 &&
2695 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2696 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2698 * link-local <-> link-local must belong to the same
2701 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2702 (void)sa6_recoverscope(&lsa6);
2703 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2710 * Check if the IPv6 address matches to next-hop. In the
2711 * mobile case, old IPv6 address may be not deleted from the
2712 * interface. Then, the interface has previous and new
2713 * addresses. We should use one corresponding to the
2714 * next-hop. (by micchie)
2717 if (stcb && fam == AF_INET6 &&
2718 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2719 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2725 /* Avoid topologically incorrect IPv4 address */
2726 if (stcb && fam == AF_INET &&
2727 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2728 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2733 if (((non_asoc_addr_ok == 0) &&
2734 (sctp_is_addr_restricted(stcb, sifa))) ||
2735 (non_asoc_addr_ok &&
2736 (sctp_is_addr_restricted(stcb, sifa)) &&
2737 (!sctp_is_addr_pending(stcb, sifa)))) {
2739 * It is restricted for some reason..
2740 * probably not yet added.
2745 if (num_eligible_addr >= addr_wanted) {
2748 num_eligible_addr++;
2755 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2756 struct sctp_tcb *stcb,
2757 int non_asoc_addr_ok,
2758 uint8_t dest_is_loop,
2759 uint8_t dest_is_priv,
2762 struct sctp_ifa *ifa, *sifa;
2763 int num_eligible_addr = 0;
2765 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2766 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2767 (non_asoc_addr_ok == 0)) {
2770 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2776 if (((non_asoc_addr_ok == 0) &&
2777 (sctp_is_addr_restricted(stcb, sifa))) ||
2778 (non_asoc_addr_ok &&
2779 (sctp_is_addr_restricted(stcb, sifa)) &&
2780 (!sctp_is_addr_pending(stcb, sifa)))) {
2782 * It is restricted for some reason..
2783 * probably not yet added.
2788 num_eligible_addr++;
2790 return (num_eligible_addr);
2793 static struct sctp_ifa *
2794 sctp_choose_boundall(struct sctp_inpcb *inp,
2795 struct sctp_tcb *stcb,
2796 struct sctp_nets *net,
2799 uint8_t dest_is_priv,
2800 uint8_t dest_is_loop,
2801 int non_asoc_addr_ok,
2804 int cur_addr_num = 0, num_preferred = 0;
2806 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2807 struct sctp_ifa *sctp_ifa, *sifa;
2809 struct sctp_vrf *vrf;
2812 * For boundall we can use any address in the association.
2813 * If non_asoc_addr_ok is set we can use any address (at least in
2814 * theory). So we look for preferred addresses first. If we find one,
2815 * we use it. Otherwise we next try to get an address on the
2816 * interface, which we should be able to do (unless non_asoc_addr_ok
2817 * is false and we are routed out that way). In these cases where we
2818 * can't use the address of the interface we go through all the
2819 * ifn's looking for an address we can use and fill that in. Punting
2820 * means we send back address 0, which will probably cause problems
2821 * actually since then IP will fill in the address of the route ifn,
2822 * which means we probably already rejected it.. i.e. here comes an
2825 vrf = sctp_find_vrf(vrf_id);
2829 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2830 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2831 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2832 if (sctp_ifn == NULL) {
2833 /* ?? We don't have this guy ?? */
2834 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2835 goto bound_all_plan_b;
2837 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2838 ifn_index, sctp_ifn->ifn_name);
2841 cur_addr_num = net->indx_of_eligible_next_to_use;
2843 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2848 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2849 num_preferred, sctp_ifn->ifn_name);
2850 if (num_preferred == 0) {
2852 * no eligible addresses, we must use some other interface
2853 * address if we can find one.
2855 goto bound_all_plan_b;
2858 * Ok we have num_eligible_addr set with how many we can use, this
2859 * may vary from call to call due to addresses being deprecated
2862 if (cur_addr_num >= num_preferred) {
2866 * select the nth address from the list (where cur_addr_num is the
2867 * nth) and 0 is the first one, 1 is the second one etc...
2869 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2871 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2872 dest_is_priv, cur_addr_num, fam, ro);
2874 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2876 atomic_add_int(&sctp_ifa->refcount, 1);
2878 /* save off where the next one we will want */
2879 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2884 * plan_b: Look at all interfaces and find a preferred address. If
2885 * no preferred fall through to plan_c.
2888 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2889 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2890 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2891 sctp_ifn->ifn_name);
2892 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2893 /* wrong base scope */
2894 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2897 if ((sctp_ifn == looked_at) && looked_at) {
2898 /* already looked at this guy */
2899 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2902 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2903 dest_is_loop, dest_is_priv, fam);
2904 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2905 "Found ifn:%p %d preferred source addresses\n",
2906 ifn, num_preferred);
2907 if (num_preferred == 0) {
2908 /* None on this interface. */
2909 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2912 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2913 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2914 num_preferred, sctp_ifn, cur_addr_num);
2917 * Ok we have num_eligible_addr set with how many we can
2918 * use, this may vary from call to call due to addresses
2919 * being deprecated etc..
2921 if (cur_addr_num >= num_preferred) {
2924 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2925 dest_is_priv, cur_addr_num, fam, ro);
2929 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2930 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
2932 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
2933 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
2934 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
2935 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
2937 atomic_add_int(&sifa->refcount, 1);
2942 /* plan_c: do we have an acceptable address on the emit interface */
2943 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
2944 if (emit_ifn == NULL) {
2947 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
2948 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2949 (non_asoc_addr_ok == 0))
2951 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
2956 if (((non_asoc_addr_ok == 0) &&
2957 (sctp_is_addr_restricted(stcb, sifa))) ||
2958 (non_asoc_addr_ok &&
2959 (sctp_is_addr_restricted(stcb, sifa)) &&
2960 (!sctp_is_addr_pending(stcb, sifa)))) {
2962 * It is restricted for some reason..
2963 * probably not yet added.
2968 atomic_add_int(&sifa->refcount, 1);
2973 * plan_d: We are in trouble. No preferred address on the emit
2974 * interface. And not even a preferred address on all interfaces. Go
2975 * out and see if we can find an acceptable address somewhere
2976 * amongst all interfaces.
2978 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n");
2979 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2980 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2981 /* wrong base scope */
2984 if ((sctp_ifn == looked_at) && looked_at)
2985 /* already looked at this guy */
2988 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2989 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2990 (non_asoc_addr_ok == 0))
2992 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
2998 if (((non_asoc_addr_ok == 0) &&
2999 (sctp_is_addr_restricted(stcb, sifa))) ||
3000 (non_asoc_addr_ok &&
3001 (sctp_is_addr_restricted(stcb, sifa)) &&
3002 (!sctp_is_addr_pending(stcb, sifa)))) {
3004 * It is restricted for some
3005 * reason.. probably not yet added.
3010 atomic_add_int(&sifa->refcount, 1);
3015 * Ok we can find NO address to source from that is not on our
3016 * restricted list and non_asoc_address is NOT ok, or it is on our
3017 * restricted list. We can't source to it :-(
3024 /* tcb may be NULL */
3026 sctp_source_address_selection(struct sctp_inpcb *inp,
3027 struct sctp_tcb *stcb,
3029 struct sctp_nets *net,
3030 int non_asoc_addr_ok, uint32_t vrf_id)
3032 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3035 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3038 struct sctp_ifa *answer;
3039 uint8_t dest_is_priv, dest_is_loop;
3043 * Rules: - Find the route if needed, cache if I can. - Look at
3044 * interface address in route, Is it in the bound list. If so we
3045 * have the best source. - If not we must rotate amongst the
3050 * Do we need to pay attention to scope. We can have a private address
3051 * or a global address we are sourcing or sending to. So if we draw
3053 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3055 *------------------------------------------
3056 * source * dest * result
3057 * -----------------------------------------
3058 * <a> Private * Global * NAT
3059 * -----------------------------------------
3060 * <b> Private * Private * No problem
3061 * -----------------------------------------
3062 * <c> Global * Private * Huh, How will this work?
3063 * -----------------------------------------
3064 * <d> Global * Global * No Problem
3065 *------------------------------------------
3066 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3068 *------------------------------------------
3069 * source * dest * result
3070 * -----------------------------------------
3071 * <a> Linklocal * Global *
3072 * -----------------------------------------
3073 * <b> Linklocal * Linklocal * No problem
3074 * -----------------------------------------
3075 * <c> Global * Linklocal * Huh, How will this work?
3076 * -----------------------------------------
3077 * <d> Global * Global * No Problem
3078 *------------------------------------------
3079 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3081 * And then we add to that what happens if there are multiple addresses
3082 * assigned to an interface. Remember the ifa on a ifn is a linked
3083 * list of addresses. So one interface can have more than one IP
3084 * address. What happens if we have both a private and a global
3085 * address? Do we then use context of destination to sort out which
3086 * one is best? And what about NAT's sending P->G may get you a NAT
3087 * translation, or should you select the G thats on the interface in
3092 * - count the number of addresses on the interface.
3093 * - if it is one, no problem except case <c>.
3094 * For <a> we will assume a NAT out there.
3095 * - if there are more than one, then we need to worry about scope P
3096 * or G. We should prefer G -> G and P -> P if possible.
3097 * Then as a secondary fall back to mixed types G->P being a last
3099 * - The above all works for bound all, but bound specific we need to
3100 * use the same concept but instead only consider the bound
3101 * addresses. If the bound set is NOT assigned to the interface then
3102 * we must use rotation amongst the bound addresses..
3104 if (ro->ro_rt == NULL) {
3106 * Need a route to cache.
3108 SCTP_RTALLOC(ro, vrf_id);
3110 if (ro->ro_rt == NULL) {
3113 fam = to->sin_family;
3114 dest_is_priv = dest_is_loop = 0;
3115 /* Setup our scopes for the destination */
3118 /* Scope based on outbound address */
3119 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3122 /* mark it as local */
3123 net->addr_is_local = 1;
3125 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3131 /* Scope based on outbound address */
3132 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3133 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3135 * If the address is a loopback address, which
3136 * consists of "::1" OR "fe80::1%lo0", we are
3137 * loopback scope. But we don't use dest_is_priv
3138 * (link local addresses).
3142 /* mark it as local */
3143 net->addr_is_local = 1;
3145 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3151 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3152 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
3153 SCTP_IPI_ADDR_RLOCK();
3154 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3158 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3159 dest_is_priv, dest_is_loop,
3160 non_asoc_addr_ok, fam);
3161 SCTP_IPI_ADDR_RUNLOCK();
3168 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
3169 vrf_id, dest_is_priv,
3171 non_asoc_addr_ok, fam);
3173 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3178 SCTP_IPI_ADDR_RUNLOCK();
3183 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
3188 tlen = SCTP_BUF_LEN(control);
3191 * Independent of how many mbufs, find the c_type inside the control
3192 * structure and copy out the data.
3195 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3196 /* not enough room for one more we are done. */
3199 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3200 if (((int)cmh.cmsg_len + at) > tlen) {
3202 * this is real messed up since there is not enough
3203 * data here to cover the cmsg header. We are done.
3207 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3208 (c_type == cmh.cmsg_type)) {
3209 /* found the one we want, copy it out */
3210 at += CMSG_ALIGN(sizeof(struct cmsghdr));
3211 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3213 * space of cmsg_len after header not big
3218 m_copydata(control, at, cpsize, data);
3221 at += CMSG_ALIGN(cmh.cmsg_len);
3222 if (cmh.cmsg_len == 0) {
3231 static struct mbuf *
3232 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
3233 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3235 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3236 struct sctp_state_cookie *stc;
3237 struct sctp_paramhdr *ph;
3243 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3244 sizeof(struct sctp_paramhdr)), 0,
3245 M_DONTWAIT, 1, MT_DATA);
3249 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3250 if (copy_init == NULL) {
3254 #ifdef SCTP_MBUF_LOGGING
3255 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3260 if (SCTP_BUF_IS_EXTENDED(mat)) {
3261 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3263 mat = SCTP_BUF_NEXT(mat);
3267 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3269 if (copy_initack == NULL) {
3271 sctp_m_freem(copy_init);
3274 #ifdef SCTP_MBUF_LOGGING
3275 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3280 if (SCTP_BUF_IS_EXTENDED(mat)) {
3281 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3283 mat = SCTP_BUF_NEXT(mat);
3287 /* easy side we just drop it on the end */
3288 ph = mtod(mret, struct sctp_paramhdr *);
3289 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3290 sizeof(struct sctp_paramhdr);
3291 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3292 sizeof(struct sctp_paramhdr));
3293 ph->param_type = htons(SCTP_STATE_COOKIE);
3294 ph->param_length = 0; /* fill in at the end */
3295 /* Fill in the stc cookie data */
3296 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3298 /* tack the INIT and then the INIT-ACK onto the chain */
3301 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3302 cookie_sz += SCTP_BUF_LEN(m_at);
3303 if (SCTP_BUF_NEXT(m_at) == NULL) {
3304 SCTP_BUF_NEXT(m_at) = copy_init;
3309 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3310 cookie_sz += SCTP_BUF_LEN(m_at);
3311 if (SCTP_BUF_NEXT(m_at) == NULL) {
3312 SCTP_BUF_NEXT(m_at) = copy_initack;
3317 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3318 cookie_sz += SCTP_BUF_LEN(m_at);
3319 if (SCTP_BUF_NEXT(m_at) == NULL) {
3323 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3325 /* no space, so free the entire chain */
3329 SCTP_BUF_LEN(sig) = 0;
3330 SCTP_BUF_NEXT(m_at) = sig;
3332 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3333 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3335 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3336 cookie_sz += SCTP_SIGNATURE_SIZE;
3337 ph->param_length = htons(cookie_sz);
3343 sctp_get_ect(struct sctp_tcb *stcb,
3344 struct sctp_tmit_chunk *chk)
3346 uint8_t this_random;
3349 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 0)
3352 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce) == 0)
3353 /* no nonce, always return ECT0 */
3354 return (SCTP_ECT0_BIT);
3356 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
3357 /* Peer does NOT support it, so we send a ECT0 only */
3358 return (SCTP_ECT0_BIT);
3361 return (SCTP_ECT0_BIT);
3363 if ((stcb->asoc.hb_random_idx > 3) ||
3364 ((stcb->asoc.hb_random_idx == 3) &&
3365 (stcb->asoc.hb_ect_randombit > 7))) {
3369 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
3370 memcpy(stcb->asoc.hb_random_values, &rndval,
3371 sizeof(stcb->asoc.hb_random_values));
3372 this_random = stcb->asoc.hb_random_values[0];
3373 stcb->asoc.hb_random_idx = 0;
3374 stcb->asoc.hb_ect_randombit = 0;
3376 if (stcb->asoc.hb_ect_randombit > 7) {
3377 stcb->asoc.hb_ect_randombit = 0;
3378 stcb->asoc.hb_random_idx++;
3379 if (stcb->asoc.hb_random_idx > 3) {
3383 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
3385 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
3387 /* ECN Nonce stuff */
3388 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
3389 stcb->asoc.hb_ect_randombit++;
3390 return (SCTP_ECT1_BIT);
3392 stcb->asoc.hb_ect_randombit++;
3393 return (SCTP_ECT0_BIT);
3398 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3399 struct sctp_tcb *stcb, /* may be NULL */
3400 struct sctp_nets *net,
3401 struct sockaddr *to,
3403 uint32_t auth_offset,
3404 struct sctp_auth_chunk *auth,
3405 int nofragment_flag,
3407 struct sctp_tmit_chunk *chk,
3411 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3414 union sctp_sockstore *over_addr
3416 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3419 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
3420 * header WITH an SCTPHDR but no IP header, endpoint inp and sa
3421 * structure: - fill in the HMAC digest of any AUTH chunk in the
3422 * packet. - calculate and fill in the SCTP checksum. - prepend an
3423 * IP address header. - if boundall use INADDR_ANY. - if
3424 * boundspecific do source address selection. - set fragmentation
3425 * option for ipV4. - On return from IP output, check/adjust mtu
3426 * size of output interface and smallest_mtu size as well.
3428 /* Will need ifdefs around this */
3431 struct sctphdr *sctphdr;
3436 sctp_route_t *ro = NULL;
3437 struct udphdr *udp = NULL;
3439 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3440 struct socket *so = NULL;
3444 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3445 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3450 vrf_id = stcb->asoc.vrf_id;
3452 vrf_id = inp->def_vrf_id;
3455 /* fill in the HMAC digest for any AUTH chunk in the packet */
3456 if ((auth != NULL) && (stcb != NULL)) {
3457 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb);
3459 /* Calculate the csum and fill in the length of the packet */
3460 sctphdr = mtod(m, struct sctphdr *);
3461 if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
3463 (to->sa_family == AF_INET) &&
3464 (stcb->asoc.loopback_scope)) {
3465 sctphdr->checksum = 0;
3467 * This can probably now be taken out since my audit shows
3468 * no more bad pktlen's coming in. But we will wait a while
3471 packet_length = sctp_calculate_len(m);
3473 sctphdr->checksum = 0;
3474 csum = sctp_calculate_sum(m, &packet_length, 0);
3475 sctphdr->checksum = csum;
3478 if (to->sa_family == AF_INET) {
3479 struct ip *ip = NULL;
3480 sctp_route_t iproute;
3484 newm = sctp_get_mbuf_for_msg(sizeof(struct ip) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
3486 newm = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA);
3490 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3494 SCTP_ALIGN_TO_END(newm, sizeof(struct ip) + sizeof(struct udphdr));
3495 SCTP_BUF_LEN(newm) = sizeof(struct ip) + sizeof(struct udphdr);
3496 packet_length += sizeof(struct ip) + sizeof(struct udphdr);
3498 SCTP_ALIGN_TO_END(newm, sizeof(struct ip));
3499 SCTP_BUF_LEN(newm) = sizeof(struct ip);
3500 packet_length += sizeof(struct ip);
3502 SCTP_BUF_NEXT(newm) = m;
3504 ip = mtod(m, struct ip *);
3505 ip->ip_v = IPVERSION;
3506 ip->ip_hl = (sizeof(struct ip) >> 2);
3508 tos_value = net->tos_flowlabel & 0x000000ff;
3510 tos_value = inp->ip_inp.inp.inp_ip_tos;
3512 if ((nofragment_flag) && (port == 0)) {
3513 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
3516 ip->ip_off = htons(IP_DF);
3521 /* FreeBSD has a function for ip_id's */
3522 ip->ip_id = ip_newid();
3524 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3525 ip->ip_len = packet_length;
3527 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3529 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
3532 ip->ip_tos = (u_char)(tos_value & 0xfc);
3535 /* no association at all */
3536 ip->ip_tos = (tos_value & 0xfc);
3539 ip->ip_p = IPPROTO_UDP;
3541 ip->ip_p = IPPROTO_SCTP;
3546 memset(&iproute, 0, sizeof(iproute));
3547 memcpy(&ro->ro_dst, to, to->sa_len);
3549 ro = (sctp_route_t *) & net->ro;
3551 /* Now the address selection part */
3552 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3554 /* call the routine to select the src address */
3555 if (net && out_of_asoc_ok == 0) {
3556 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3557 sctp_free_ifa(net->ro._s_addr);
3558 net->ro._s_addr = NULL;
3559 net->src_addr_selected = 0;
3565 if (net->src_addr_selected == 0) {
3566 /* Cache the source address */
3567 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
3570 net->src_addr_selected = 1;
3572 if (net->ro._s_addr == NULL) {
3573 /* No route to host */
3574 net->src_addr_selected = 0;
3577 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
3579 if (over_addr == NULL) {
3580 struct sctp_ifa *_lsrc;
3582 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3586 if (_lsrc == NULL) {
3589 ip->ip_src = _lsrc->address.sin.sin_addr;
3590 sctp_free_ifa(_lsrc);
3592 ip->ip_src = over_addr->sin.sin_addr;
3593 SCTP_RTALLOC((&ro->ro_rt), vrf_id);
3597 udp = (struct udphdr *)(ip + 1);
3598 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3599 udp->uh_dport = port;
3600 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
3601 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
3604 * If source address selection fails and we find no route
3605 * then the ip_output should fail as well with a
3606 * NO_ROUTE_TO_HOST type error. We probably should catch
3607 * that somewhere and abort the association right away
3608 * (assuming this is an INIT being sent).
3610 if ((ro->ro_rt == NULL)) {
3612 * src addr selection failed to find a route (or
3613 * valid source addr), so we can't get there from
3617 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3618 "%s: dropped packet - no valid source addr\n",
3621 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3622 "Destination was ");
3623 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1,
3624 &net->ro._l_addr.sa);
3625 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3626 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3627 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3628 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3630 SCTP_FAILED_THRESHOLD,
3633 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3634 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3636 * JRS 5/14/07 - If a
3638 * unreachable, the PF bit
3639 * is turned off. This
3640 * allows an unambiguous use
3642 * destinations that are
3643 * reachable but potentially
3645 * destination is set to the
3646 * unreachable state, also
3647 * set the destination to
3651 * Add debug message here if
3652 * destination is not in PF
3656 * Stop any running T3
3659 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3660 net->dest_state &= ~SCTP_ADDR_PF;
3661 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
3667 if (net == stcb->asoc.primary_destination) {
3668 /* need a new primary */
3669 struct sctp_nets *alt;
3671 alt = sctp_find_alternate_net(stcb, net, 0);
3673 if (sctp_set_primary_addr(stcb,
3674 (struct sockaddr *)NULL,
3676 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3677 if (net->ro._s_addr) {
3678 sctp_free_ifa(net->ro._s_addr);
3679 net->ro._s_addr = NULL;
3681 net->src_addr_selected = 0;
3687 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
3689 return (EHOSTUNREACH);
3691 if (ro != &iproute) {
3692 memcpy(&iproute, ro, sizeof(*ro));
3694 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
3695 (uint32_t) (ntohl(ip->ip_src.s_addr)));
3696 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
3697 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
3698 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
3701 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
3702 /* failed to prepend data, give up */
3703 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3707 #ifdef SCTP_PACKET_LOGGING
3708 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
3709 sctp_packet_log(m, packet_length);
3711 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
3713 SCTP_ENABLE_UDP_CSUM(o_pak);
3715 /* send it out. table id is taken from stcb */
3716 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3717 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3718 so = SCTP_INP_SO(inp);
3719 SCTP_SOCKET_UNLOCK(so, 0);
3722 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
3723 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3724 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3725 atomic_add_int(&stcb->asoc.refcnt, 1);
3726 SCTP_TCB_UNLOCK(stcb);
3727 SCTP_SOCKET_LOCK(so, 0);
3728 SCTP_TCB_LOCK(stcb);
3729 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3732 SCTP_STAT_INCR(sctps_sendpackets);
3733 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
3735 SCTP_STAT_INCR(sctps_senderrors);
3737 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
3739 /* free tempy routes */
3745 /* PMTU check versus smallest asoc MTU goes here */
3746 if ((ro->ro_rt != NULL) &&
3747 (net->ro._s_addr)) {
3750 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
3752 (stcb->asoc.smallest_mtu > mtu)) {
3753 #ifdef SCTP_PRINT_FOR_B_AND_M
3754 SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n",
3757 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
3760 } else if (ro->ro_rt == NULL) {
3761 /* route was freed */
3762 if (net->ro._s_addr &&
3763 net->src_addr_selected) {
3764 sctp_free_ifa(net->ro._s_addr);
3765 net->ro._s_addr = NULL;
3767 net->src_addr_selected = 0;
3773 else if (to->sa_family == AF_INET6) {
3775 struct ip6_hdr *ip6h;
3776 struct route_in6 ip6route;
3779 uint16_t flowBottom;
3780 u_char tosBottom, tosTop;
3781 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
3783 struct sockaddr_in6 lsa6_storage;
3785 u_short prev_port = 0;
3788 flowlabel = net->tos_flowlabel;
3790 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
3794 newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
3796 newm = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
3800 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3804 SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
3805 SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr) + sizeof(struct udphdr);
3806 packet_length += sizeof(struct ip6_hdr) + sizeof(struct udphdr);
3808 SCTP_ALIGN_TO_END(newm, sizeof(struct ip6_hdr));
3809 SCTP_BUF_LEN(newm) = sizeof(struct ip6_hdr);
3810 packet_length += sizeof(struct ip6_hdr);
3812 SCTP_BUF_NEXT(newm) = m;
3815 ip6h = mtod(m, struct ip6_hdr *);
3817 * We assume here that inp_flow is in host byte order within
3820 flowBottom = flowlabel & 0x0000ffff;
3821 flowTop = ((flowlabel & 0x000f0000) >> 16);
3822 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
3823 /* protect *sin6 from overwrite */
3824 sin6 = (struct sockaddr_in6 *)to;
3828 /* KAME hack: embed scopeid */
3829 if (sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone)) != 0) {
3830 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3834 memset(&ip6route, 0, sizeof(ip6route));
3835 ro = (sctp_route_t *) & ip6route;
3836 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
3838 ro = (sctp_route_t *) & net->ro;
3841 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3843 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
3846 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3849 /* we could get no asoc if it is a O-O-T-B packet */
3850 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3852 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
3854 ip6h->ip6_nxt = IPPROTO_UDP;
3856 ip6h->ip6_nxt = IPPROTO_SCTP;
3858 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
3859 ip6h->ip6_dst = sin6->sin6_addr;
3862 * Add SRC address selection here: we can only reuse to a
3863 * limited degree the kame src-addr-sel, since we can try
3864 * their selection but it may not be bound.
3866 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
3867 lsa6_tmp.sin6_family = AF_INET6;
3868 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
3870 if (net && out_of_asoc_ok == 0) {
3871 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3872 sctp_free_ifa(net->ro._s_addr);
3873 net->ro._s_addr = NULL;
3874 net->src_addr_selected = 0;
3880 if (net->src_addr_selected == 0) {
3881 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3882 /* KAME hack: embed scopeid */
3883 if (sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone)) != 0) {
3884 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3887 /* Cache the source address */
3888 net->ro._s_addr = sctp_source_address_selection(inp,
3894 (void)sa6_recoverscope(sin6);
3895 net->src_addr_selected = 1;
3897 if (net->ro._s_addr == NULL) {
3898 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
3899 net->src_addr_selected = 0;
3902 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
3904 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
3905 /* KAME hack: embed scopeid */
3906 if (sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone)) != 0) {
3907 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3910 if (over_addr == NULL) {
3911 struct sctp_ifa *_lsrc;
3913 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3917 if (_lsrc == NULL) {
3920 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
3921 sctp_free_ifa(_lsrc);
3923 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
3924 SCTP_RTALLOC((&ro->ro_rt), vrf_id);
3926 (void)sa6_recoverscope(sin6);
3928 lsa6->sin6_port = inp->sctp_lport;
3930 if (ro->ro_rt == NULL) {
3932 * src addr selection failed to find a route (or
3933 * valid source addr), so we can't get there from
3939 * XXX: sa6 may not have a valid sin6_scope_id in the
3940 * non-SCOPEDROUTING case.
3942 bzero(&lsa6_storage, sizeof(lsa6_storage));
3943 lsa6_storage.sin6_family = AF_INET6;
3944 lsa6_storage.sin6_len = sizeof(lsa6_storage);
3945 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3946 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
3947 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
3952 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3953 lsa6_storage.sin6_port = inp->sctp_lport;
3954 lsa6 = &lsa6_storage;
3955 ip6h->ip6_src = lsa6->sin6_addr;
3958 udp = (struct udphdr *)(ip6h + 1);
3959 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3960 udp->uh_dport = port;
3961 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
3965 * We set the hop limit now since there is a good chance
3966 * that our ro pointer is now filled
3968 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
3969 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3972 /* Copy to be sure something bad is not happening */
3973 sin6->sin6_addr = ip6h->ip6_dst;
3974 lsa6->sin6_addr = ip6h->ip6_src;
3977 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
3978 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
3979 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
3980 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
3981 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
3983 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3984 /* preserve the port and scope for link local send */
3985 prev_scope = sin6->sin6_scope_id;
3986 prev_port = sin6->sin6_port;
3988 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
3989 /* failed to prepend data, give up */
3991 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3994 #ifdef SCTP_PACKET_LOGGING
3995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
3996 sctp_packet_log(m, packet_length);
3998 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4000 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4001 udp->uh_sum = 0xffff;
4004 /* send it out. table id is taken from stcb */
4005 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4006 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4007 so = SCTP_INP_SO(inp);
4008 SCTP_SOCKET_UNLOCK(so, 0);
4011 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4012 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4013 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4014 atomic_add_int(&stcb->asoc.refcnt, 1);
4015 SCTP_TCB_UNLOCK(stcb);
4016 SCTP_SOCKET_LOCK(so, 0);
4017 SCTP_TCB_LOCK(stcb);
4018 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4022 /* for link local this must be done */
4023 sin6->sin6_scope_id = prev_scope;
4024 sin6->sin6_port = prev_port;
4026 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4027 SCTP_STAT_INCR(sctps_sendpackets);
4028 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4030 SCTP_STAT_INCR(sctps_senderrors);
4033 /* Now if we had a temp route free it */
4038 /* PMTU check versus smallest asoc MTU goes here */
4039 if (ro->ro_rt == NULL) {
4040 /* Route was freed */
4041 if (net->ro._s_addr &&
4042 net->src_addr_selected) {
4043 sctp_free_ifa(net->ro._s_addr);
4044 net->ro._s_addr = NULL;
4046 net->src_addr_selected = 0;
4048 if ((ro->ro_rt != NULL) &&
4049 (net->ro._s_addr)) {
4052 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4054 (stcb->asoc.smallest_mtu > mtu)) {
4055 #ifdef SCTP_PRINT_FOR_B_AND_M
4056 SCTP_PRINTF("sctp_mtu_size_reset called after ip6_output mtu-change:%d\n",
4059 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4063 if (ND_IFINFO(ifp)->linkmtu &&
4064 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4065 #ifdef SCTP_PRINT_FOR_B_AND_M
4066 SCTP_PRINTF("sctp_mtu_size_reset called via ifp ND_IFINFO() linkmtu:%d\n",
4067 ND_IFINFO(ifp)->linkmtu);
4069 sctp_mtu_size_reset(inp,
4071 ND_IFINFO(ifp)->linkmtu);
4079 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4080 ((struct sockaddr *)to)->sa_family);
4082 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4089 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4090 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4095 struct mbuf *m, *m_at, *mp_last;
4096 struct sctp_nets *net;
4097 struct sctp_init_msg *initm;
4098 struct sctp_supported_addr_param *sup_addr;
4099 struct sctp_adaptation_layer_indication *ali;
4100 struct sctp_ecn_supported_param *ecn;
4101 struct sctp_prsctp_supported_param *prsctp;
4102 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4103 struct sctp_supported_chunk_types_param *pr_supported;
4104 int cnt_inits_to = 0;
4109 /* INIT's always go to the primary (and usually ONLY address) */
4111 net = stcb->asoc.primary_destination;
4113 net = TAILQ_FIRST(&stcb->asoc.nets);
4118 /* we confirm any address we send an INIT to */
4119 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4120 (void)sctp_set_primary_addr(stcb, NULL, net);
4122 /* we confirm any address we send an INIT to */
4123 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4125 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4127 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4129 * special hook, if we are sending to link local it will not
4130 * show up in our private address count.
4132 struct sockaddr_in6 *sin6l;
4134 sin6l = &net->ro._l_addr.sin6;
4135 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4139 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4140 /* This case should not happen */
4141 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4144 /* start the INIT timer */
4145 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4147 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4149 /* No memory, INIT timer will re-attempt. */
4150 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4153 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
4155 * assume peer supports asconf in order to be able to queue local
4156 * address changes while an INIT is in flight and before the assoc
4159 stcb->asoc.peer_supports_asconf = 1;
4160 /* Now lets put the SCTP header in place */
4161 initm = mtod(m, struct sctp_init_msg *);
4162 initm->sh.src_port = inp->sctp_lport;
4163 initm->sh.dest_port = stcb->rport;
4164 initm->sh.v_tag = 0;
4165 initm->sh.checksum = 0; /* calculate later */
4166 /* now the chunk header */
4167 initm->msg.ch.chunk_type = SCTP_INITIATION;
4168 initm->msg.ch.chunk_flags = 0;
4169 /* fill in later from mbuf we build */
4170 initm->msg.ch.chunk_length = 0;
4171 /* place in my tag */
4172 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag);
4173 /* set up some of the credits. */
4174 initm->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(inp->sctp_socket),
4175 SCTP_MINIMAL_RWND));
4177 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4178 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4179 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4180 /* now the address restriction */
4181 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm +
4183 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4185 /* we support 2 types: IPv6/IPv4 */
4186 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
4187 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4188 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4190 /* we support 1 type: IPv4 */
4191 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
4192 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4193 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4195 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
4197 /* adaptation layer indication parameter */
4198 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
4199 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4200 ali->ph.param_length = htons(sizeof(*ali));
4201 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4202 SCTP_BUF_LEN(m) += sizeof(*ali);
4203 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4205 /* now any cookie time extensions */
4206 if (stcb->asoc.cookie_preserve_req) {
4207 struct sctp_cookie_perserve_param *cookie_preserve;
4209 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4210 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4211 cookie_preserve->ph.param_length = htons(
4212 sizeof(*cookie_preserve));
4213 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4214 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4215 ecn = (struct sctp_ecn_supported_param *)(
4216 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4217 stcb->asoc.cookie_preserve_req = 0;
4220 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
4221 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4222 ecn->ph.param_length = htons(sizeof(*ecn));
4223 SCTP_BUF_LEN(m) += sizeof(*ecn);
4224 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4227 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4229 /* And now tell the peer we do pr-sctp */
4230 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4231 prsctp->ph.param_length = htons(sizeof(*prsctp));
4232 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4234 /* And now tell the peer we do all the extensions */
4235 pr_supported = (struct sctp_supported_chunk_types_param *)
4236 ((caddr_t)prsctp + sizeof(*prsctp));
4237 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4239 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4240 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4241 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4242 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4243 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4244 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
4245 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4246 p_len = sizeof(*pr_supported) + num_ext;
4247 pr_supported->ph.param_length = htons(p_len);
4248 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4249 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4251 /* ECN nonce: And now tell the peer we support ECN nonce */
4252 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
4253 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
4254 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
4255 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
4256 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
4257 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
4259 /* add authentication parameters */
4260 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4261 struct sctp_auth_random *randp;
4262 struct sctp_auth_hmac_algo *hmacs;
4263 struct sctp_auth_chunk_list *chunks;
4265 /* attach RANDOM parameter, if available */
4266 if (stcb->asoc.authinfo.random != NULL) {
4267 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4268 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4269 #ifdef SCTP_AUTH_DRAFT_04
4270 randp->ph.param_type = htons(SCTP_RANDOM);
4271 randp->ph.param_length = htons(p_len);
4272 bcopy(stcb->asoc.authinfo.random->key,
4274 stcb->asoc.authinfo.random_len);
4276 /* random key already contains the header */
4277 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4279 /* zero out any padding required */
4280 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4281 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4283 /* add HMAC_ALGO parameter */
4284 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4285 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4286 (uint8_t *) hmacs->hmac_ids);
4288 p_len += sizeof(*hmacs);
4289 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4290 hmacs->ph.param_length = htons(p_len);
4291 /* zero out any padding required */
4292 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4293 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4295 /* add CHUNKS parameter */
4296 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4297 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4298 chunks->chunk_types);
4300 p_len += sizeof(*chunks);
4301 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4302 chunks->ph.param_length = htons(p_len);
4303 /* zero out any padding required */
4304 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4305 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4309 /* now the addresses */
4311 struct sctp_scoping scp;
4314 * To optimize this we could put the scoping stuff into a
4315 * structure and remove the individual uint8's from the
4316 * assoc structure. Then we could just sifa in the address
4317 * within the stcb.. but for now this is a quick hack to get
4318 * the address stuff teased apart.
4320 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4321 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4322 scp.loopback_scope = stcb->asoc.loopback_scope;
4323 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4324 scp.local_scope = stcb->asoc.local_scope;
4325 scp.site_scope = stcb->asoc.site_scope;
4327 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
4330 /* calulate the size and update pkt header and chunk header */
4332 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4333 if (SCTP_BUF_NEXT(m_at) == NULL)
4335 p_len += SCTP_BUF_LEN(m_at);
4337 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr)));
4339 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4340 * here since the timer will drive a retranmission.
4343 /* I don't expect this to execute but we will be safe here */
4345 if ((padval) && (mp_last)) {
4347 * The compiler worries that mp_last may not be set even
4348 * though I think it is impossible :-> however we add
4349 * mp_last here just in case.
4351 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4353 /* Houston we have a problem, no space */
4359 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4360 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4361 (struct sockaddr *)&net->ro._l_addr,
4362 m, 0, NULL, 0, 0, NULL, 0, net->port, so_locked, NULL);
4363 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4364 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4365 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4366 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4370 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4371 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
4374 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4375 * being equal to the beginning of the params i.e. (iphlen +
4376 * sizeof(struct sctp_init_msg) parse through the parameters to the
4377 * end of the mbuf verifying that all parameters are known.
4379 * For unknown parameters build and return a mbuf with
4380 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4381 * processing this chunk stop, and set *abort_processing to 1.
4383 * By having param_offset be pre-set to where parameters begin it is
4384 * hoped that this routine may be reused in the future by new
4387 struct sctp_paramhdr *phdr, params;
4389 struct mbuf *mat, *op_err;
4390 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4391 int at, limit, pad_needed;
4392 uint16_t ptype, plen, padded_size;
4395 *abort_processing = 0;
4398 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4401 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4402 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4403 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4404 ptype = ntohs(phdr->param_type);
4405 plen = ntohs(phdr->param_length);
4406 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4407 /* wacked parameter */
4408 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4411 limit -= SCTP_SIZE32(plen);
4413 * All parameters for all chunks that we know/understand are
4414 * listed here. We process them other places and make
4415 * appropriate stop actions per the upper bits. However this
4416 * is the generic routine processor's can call to get back
4417 * an operr.. to either incorporate (init-ack) or send.
4419 padded_size = SCTP_SIZE32(plen);
4421 /* Param's with variable size */
4422 case SCTP_HEARTBEAT_INFO:
4423 case SCTP_STATE_COOKIE:
4424 case SCTP_UNRECOG_PARAM:
4425 case SCTP_ERROR_CAUSE_IND:
4429 /* Param's with variable size within a range */
4430 case SCTP_CHUNK_LIST:
4431 case SCTP_SUPPORTED_CHUNK_EXT:
4432 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4433 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4438 case SCTP_SUPPORTED_ADDRTYPE:
4439 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4440 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4446 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4447 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4452 case SCTP_SET_PRIM_ADDR:
4453 case SCTP_DEL_IP_ADDRESS:
4454 case SCTP_ADD_IP_ADDRESS:
4455 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4456 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4457 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4462 /* Param's with a fixed size */
4463 case SCTP_IPV4_ADDRESS:
4464 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4465 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4470 case SCTP_IPV6_ADDRESS:
4471 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4472 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4477 case SCTP_COOKIE_PRESERVE:
4478 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4479 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4484 case SCTP_ECN_NONCE_SUPPORTED:
4485 case SCTP_PRSCTP_SUPPORTED:
4486 if (padded_size != sizeof(struct sctp_paramhdr)) {
4487 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp %d\n", plen);
4492 case SCTP_ECN_CAPABLE:
4493 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4494 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4499 case SCTP_ULP_ADAPTATION:
4500 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4501 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4506 case SCTP_SUCCESS_REPORT:
4507 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4508 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4513 case SCTP_HOSTNAME_ADDRESS:
4515 /* We can NOT handle HOST NAME addresses!! */
4518 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
4519 *abort_processing = 1;
4520 if (op_err == NULL) {
4521 /* Ok need to try to get a mbuf */
4523 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4525 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4528 l_len += sizeof(struct sctp_paramhdr);
4529 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4531 SCTP_BUF_LEN(op_err) = 0;
4533 * pre-reserve space for ip
4534 * and sctp header and
4538 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4540 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4542 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4543 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4547 /* If we have space */
4548 struct sctp_paramhdr s;
4551 uint32_t cpthis = 0;
4553 pad_needed = 4 - (err_at % 4);
4554 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4555 err_at += pad_needed;
4557 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
4558 s.param_length = htons(sizeof(s) + plen);
4559 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4560 err_at += sizeof(s);
4561 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4563 sctp_m_freem(op_err);
4565 * we are out of memory but
4566 * we still need to have a
4567 * look at what to do (the
4568 * system is in trouble
4573 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4581 * we do not recognize the parameter figure out what
4584 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
4585 if ((ptype & 0x4000) == 0x4000) {
4586 /* Report bit is set?? */
4587 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
4588 if (op_err == NULL) {
4591 /* Ok need to try to get an mbuf */
4593 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4595 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4598 l_len += sizeof(struct sctp_paramhdr);
4599 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4601 SCTP_BUF_LEN(op_err) = 0;
4603 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4605 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4607 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4608 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4612 /* If we have space */
4613 struct sctp_paramhdr s;
4616 uint32_t cpthis = 0;
4618 pad_needed = 4 - (err_at % 4);
4619 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4620 err_at += pad_needed;
4622 s.param_type = htons(SCTP_UNRECOG_PARAM);
4623 s.param_length = htons(sizeof(s) + plen);
4624 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4625 err_at += sizeof(s);
4626 if (plen > sizeof(tempbuf)) {
4627 plen = sizeof(tempbuf);
4629 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4631 sctp_m_freem(op_err);
4633 * we are out of memory but
4634 * we still need to have a
4635 * look at what to do (the
4636 * system is in trouble
4640 goto more_processing;
4642 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4647 if ((ptype & 0x8000) == 0x0000) {
4648 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
4651 /* skip this chunk and continue processing */
4652 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
4653 at += SCTP_SIZE32(plen);
4658 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4662 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
4663 *abort_processing = 1;
4664 if ((op_err == NULL) && phdr) {
4668 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4670 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4672 l_len += (2 * sizeof(struct sctp_paramhdr));
4673 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4675 SCTP_BUF_LEN(op_err) = 0;
4677 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4679 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4681 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4682 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4685 if ((op_err) && phdr) {
4686 struct sctp_paramhdr s;
4689 uint32_t cpthis = 0;
4691 pad_needed = 4 - (err_at % 4);
4692 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4693 err_at += pad_needed;
4695 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4696 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
4697 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4698 err_at += sizeof(s);
4699 /* Only copy back the p-hdr that caused the issue */
4700 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
4706 sctp_are_there_new_addresses(struct sctp_association *asoc,
4707 struct mbuf *in_initpkt, int iphlen, int offset)
4710 * Given a INIT packet, look through the packet to verify that there
4711 * are NO new addresses. As we go through the parameters add reports
4712 * of any un-understood parameters that require an error. Also we
4713 * must return (1) to drop the packet if we see a un-understood
4714 * parameter that tells us to drop the chunk.
4716 struct sockaddr_in sin4, *sa4;
4719 struct sockaddr_in6 sin6, *sa6;
4722 struct sockaddr *sa_touse;
4723 struct sockaddr *sa;
4724 struct sctp_paramhdr *phdr, params;
4728 struct ip6_hdr *ip6h;
4732 uint16_t ptype, plen;
4735 struct sctp_nets *net;
4737 memset(&sin4, 0, sizeof(sin4));
4739 memset(&sin6, 0, sizeof(sin6));
4741 sin4.sin_family = AF_INET;
4742 sin4.sin_len = sizeof(sin4);
4744 sin6.sin6_family = AF_INET6;
4745 sin6.sin6_len = sizeof(sin6);
4748 /* First what about the src address of the pkt ? */
4749 iph = mtod(in_initpkt, struct ip *);
4750 switch (iph->ip_v) {
4752 /* source addr is IPv4 */
4753 sin4.sin_addr = iph->ip_src;
4754 sa_touse = (struct sockaddr *)&sin4;
4757 case IPV6_VERSION >> 4:
4758 /* source addr is IPv6 */
4759 ip6h = mtod(in_initpkt, struct ip6_hdr *);
4760 sin6.sin6_addr = ip6h->ip6_src;
4761 sa_touse = (struct sockaddr *)&sin6;
4769 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4770 sa = (struct sockaddr *)&net->ro._l_addr;
4771 if (sa->sa_family == sa_touse->sa_family) {
4772 if (sa->sa_family == AF_INET) {
4773 sa4 = (struct sockaddr_in *)sa;
4774 if (sa4->sin_addr.s_addr ==
4775 sin4.sin_addr.s_addr) {
4781 if (sa->sa_family == AF_INET6) {
4782 sa6 = (struct sockaddr_in6 *)sa;
4783 if (SCTP6_ARE_ADDR_EQUAL(sa6,
4793 /* New address added! no need to look futher. */
4796 /* Ok so far lets munge through the rest of the packet */
4800 offset += sizeof(struct sctp_init_chunk);
4801 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4803 ptype = ntohs(phdr->param_type);
4804 plen = ntohs(phdr->param_length);
4805 if (ptype == SCTP_IPV4_ADDRESS) {
4806 struct sctp_ipv4addr_param *p4, p4_buf;
4808 phdr = sctp_get_next_param(mat, offset,
4809 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4810 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4814 p4 = (struct sctp_ipv4addr_param *)phdr;
4815 sin4.sin_addr.s_addr = p4->addr;
4816 sa_touse = (struct sockaddr *)&sin4;
4817 } else if (ptype == SCTP_IPV6_ADDRESS) {
4818 struct sctp_ipv6addr_param *p6, p6_buf;
4820 phdr = sctp_get_next_param(mat, offset,
4821 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4822 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4826 p6 = (struct sctp_ipv6addr_param *)phdr;
4828 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4831 sa_touse = (struct sockaddr *)&sin4;
4834 /* ok, sa_touse points to one to check */
4836 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4837 sa = (struct sockaddr *)&net->ro._l_addr;
4838 if (sa->sa_family != sa_touse->sa_family) {
4841 if (sa->sa_family == AF_INET) {
4842 sa4 = (struct sockaddr_in *)sa;
4843 if (sa4->sin_addr.s_addr ==
4844 sin4.sin_addr.s_addr) {
4850 if (sa->sa_family == AF_INET6) {
4851 sa6 = (struct sockaddr_in6 *)sa;
4852 if (SCTP6_ARE_ADDR_EQUAL(
4861 /* New addr added! no need to look further */
4865 offset += SCTP_SIZE32(plen);
4866 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4872 * Given a MBUF chain that was sent into us containing an INIT. Build a
4873 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
4874 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
4875 * message (i.e. the struct sctp_init_msg).
4878 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4879 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
4880 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
4882 struct sctp_association *asoc;
4883 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
4884 struct sctp_init_msg *initackm_out;
4885 struct sctp_adaptation_layer_indication *ali;
4886 struct sctp_ecn_supported_param *ecn;
4887 struct sctp_prsctp_supported_param *prsctp;
4888 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4889 struct sctp_supported_chunk_types_param *pr_supported;
4890 union sctp_sockstore store, store1, *over_addr;
4891 struct sockaddr_in *sin, *to_sin;
4894 struct sockaddr_in6 *sin6, *to_sin6;
4900 struct ip6_hdr *ip6;
4903 struct sockaddr *to;
4904 struct sctp_state_cookie stc;
4905 struct sctp_nets *net = NULL;
4906 uint8_t *signature = NULL;
4907 int cnt_inits_to = 0;
4908 uint16_t his_limit, i_want;
4909 int abort_flag, padval;
4919 if ((asoc != NULL) &&
4920 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
4921 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
4922 /* new addresses, out of here in non-cookie-wait states */
4924 * Send a ABORT, we don't add the new address error clause
4925 * though we even set the T bit and copy in the 0 tag.. this
4926 * looks no different than if no listener was present.
4928 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
4932 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
4933 (offset + sizeof(struct sctp_init_chunk)),
4934 &abort_flag, (struct sctp_chunkhdr *)init_chk);
4937 sctp_send_abort(init_pkt, iphlen, sh,
4938 init_chk->init.initiate_tag, op_err, vrf_id, port);
4941 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
4943 /* No memory, INIT timer will re-attempt. */
4945 sctp_m_freem(op_err);
4948 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
4950 /* the time I built cookie */
4951 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
4953 /* populate any tie tags */
4955 /* unlock before tag selections */
4956 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
4957 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
4958 stc.cookie_life = asoc->cookie_life;
4959 net = asoc->primary_destination;
4961 stc.tie_tag_my_vtag = 0;
4962 stc.tie_tag_peer_vtag = 0;
4963 /* life I will award this cookie */
4964 stc.cookie_life = inp->sctp_ep.def_cookie_life;
4967 /* copy in the ports for later check */
4968 stc.myport = sh->dest_port;
4969 stc.peerport = sh->src_port;
4972 * If we wanted to honor cookie life extentions, we would add to
4973 * stc.cookie_life. For now we should NOT honor any extension
4975 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
4976 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4977 struct inpcb *in_inp;
4979 /* Its a V6 socket */
4980 in_inp = (struct inpcb *)inp;
4981 stc.ipv6_addr_legal = 1;
4982 /* Now look at the binding flag to see if V4 will be legal */
4983 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
4984 stc.ipv4_addr_legal = 1;
4986 /* V4 addresses are NOT legal on the association */
4987 stc.ipv4_addr_legal = 0;
4990 /* Its a V4 socket, no - V6 */
4991 stc.ipv4_addr_legal = 1;
4992 stc.ipv6_addr_legal = 0;
4995 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5000 /* now for scope setup */
5001 memset((caddr_t)&store, 0, sizeof(store));
5002 memset((caddr_t)&store1, 0, sizeof(store1));
5004 to_sin = &store1.sin;
5007 to_sin6 = &store1.sin6;
5009 iph = mtod(init_pkt, struct ip *);
5010 /* establish the to_addr's */
5011 switch (iph->ip_v) {
5013 to_sin->sin_port = sh->dest_port;
5014 to_sin->sin_family = AF_INET;
5015 to_sin->sin_len = sizeof(struct sockaddr_in);
5016 to_sin->sin_addr = iph->ip_dst;
5019 case IPV6_VERSION >> 4:
5020 ip6 = mtod(init_pkt, struct ip6_hdr *);
5021 to_sin6->sin6_addr = ip6->ip6_dst;
5022 to_sin6->sin6_scope_id = 0;
5023 to_sin6->sin6_port = sh->dest_port;
5024 to_sin6->sin6_family = AF_INET6;
5025 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5034 to = (struct sockaddr *)&store;
5035 switch (iph->ip_v) {
5038 sin->sin_family = AF_INET;
5039 sin->sin_len = sizeof(struct sockaddr_in);
5040 sin->sin_port = sh->src_port;
5041 sin->sin_addr = iph->ip_src;
5042 /* lookup address */
5043 stc.address[0] = sin->sin_addr.s_addr;
5047 stc.addr_type = SCTP_IPV4_ADDRESS;
5048 /* local from address */
5049 stc.laddress[0] = to_sin->sin_addr.s_addr;
5050 stc.laddress[1] = 0;
5051 stc.laddress[2] = 0;
5052 stc.laddress[3] = 0;
5053 stc.laddr_type = SCTP_IPV4_ADDRESS;
5054 /* scope_id is only for v6 */
5056 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5057 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5062 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5063 /* Must use the address in this case */
5064 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5065 stc.loopback_scope = 1;
5068 stc.local_scope = 0;
5073 case IPV6_VERSION >> 4:
5075 ip6 = mtod(init_pkt, struct ip6_hdr *);
5076 sin6->sin6_family = AF_INET6;
5077 sin6->sin6_len = sizeof(struct sockaddr_in6);
5078 sin6->sin6_port = sh->src_port;
5079 sin6->sin6_addr = ip6->ip6_src;
5080 /* lookup address */
5081 memcpy(&stc.address, &sin6->sin6_addr,
5082 sizeof(struct in6_addr));
5083 sin6->sin6_scope_id = 0;
5084 stc.addr_type = SCTP_IPV6_ADDRESS;
5086 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5088 * FIX ME: does this have scope from
5091 (void)sa6_recoverscope(sin6);
5092 stc.scope_id = sin6->sin6_scope_id;
5093 sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone));
5094 stc.loopback_scope = 1;
5095 stc.local_scope = 0;
5098 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5100 * If the new destination is a
5101 * LINK_LOCAL we must have common
5102 * both site and local scope. Don't
5103 * set local scope though since we
5104 * must depend on the source to be
5105 * added implicitly. We cannot
5106 * assure just because we share one
5107 * link that all links are common.
5109 stc.local_scope = 0;
5113 * we start counting for the private
5114 * address stuff at 1. since the
5115 * link local we source from won't
5116 * show up in our scoped count.
5120 * pull out the scope_id from
5124 * FIX ME: does this have scope from
5127 (void)sa6_recoverscope(sin6);
5128 stc.scope_id = sin6->sin6_scope_id;
5129 sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone));
5130 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5132 * If the new destination is
5133 * SITE_LOCAL then we must have site
5138 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5139 stc.laddr_type = SCTP_IPV6_ADDRESS;
5149 /* set the scope per the existing tcb */
5152 struct sctp_nets *lnet;
5156 stc.loopback_scope = asoc->loopback_scope;
5157 stc.ipv4_scope = asoc->ipv4_local_scope;
5158 stc.site_scope = asoc->site_scope;
5159 stc.local_scope = asoc->local_scope;
5161 /* Why do we not consider IPv4 LL addresses? */
5162 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5163 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5164 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5166 * if we have a LL address, start
5174 /* use the net pointer */
5175 to = (struct sockaddr *)&net->ro._l_addr;
5176 switch (to->sa_family) {
5178 sin = (struct sockaddr_in *)to;
5179 stc.address[0] = sin->sin_addr.s_addr;
5183 stc.addr_type = SCTP_IPV4_ADDRESS;
5184 if (net->src_addr_selected == 0) {
5186 * strange case here, the INIT should have
5187 * did the selection.
5189 net->ro._s_addr = sctp_source_address_selection(inp,
5190 stcb, (sctp_route_t *) & net->ro,
5192 if (net->ro._s_addr == NULL)
5195 net->src_addr_selected = 1;
5198 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5199 stc.laddress[1] = 0;
5200 stc.laddress[2] = 0;
5201 stc.laddress[3] = 0;
5202 stc.laddr_type = SCTP_IPV4_ADDRESS;
5206 sin6 = (struct sockaddr_in6 *)to;
5207 memcpy(&stc.address, &sin6->sin6_addr,
5208 sizeof(struct in6_addr));
5209 stc.addr_type = SCTP_IPV6_ADDRESS;
5210 if (net->src_addr_selected == 0) {
5212 * strange case here, the INIT should have
5213 * did the selection.
5215 net->ro._s_addr = sctp_source_address_selection(inp,
5216 stcb, (sctp_route_t *) & net->ro,
5218 if (net->ro._s_addr == NULL)
5221 net->src_addr_selected = 1;
5223 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5224 sizeof(struct in6_addr));
5225 stc.laddr_type = SCTP_IPV6_ADDRESS;
5230 /* Now lets put the SCTP header in place */
5231 initackm_out = mtod(m, struct sctp_init_msg *);
5232 initackm_out->sh.src_port = inp->sctp_lport;
5233 initackm_out->sh.dest_port = sh->src_port;
5234 initackm_out->sh.v_tag = init_chk->init.initiate_tag;
5235 /* Save it off for quick ref */
5236 stc.peers_vtag = init_chk->init.initiate_tag;
5237 initackm_out->sh.checksum = 0; /* calculate later */
5239 memcpy(stc.identification, SCTP_VERSION_STRING,
5240 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5241 /* now the chunk header */
5242 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK;
5243 initackm_out->msg.ch.chunk_flags = 0;
5244 /* fill in later from mbuf we build */
5245 initackm_out->msg.ch.chunk_length = 0;
5246 /* place in my tag */
5247 if ((asoc != NULL) &&
5248 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5249 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5250 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5251 /* re-use the v-tags and init-seq here */
5252 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag);
5253 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number);
5255 uint32_t vtag, itsn;
5257 if (hold_inp_lock) {
5258 SCTP_INP_INCR_REF(inp);
5259 SCTP_INP_RUNLOCK(inp);
5262 atomic_add_int(&asoc->refcnt, 1);
5263 SCTP_TCB_UNLOCK(stcb);
5264 vtag = sctp_select_a_tag(inp, 1);
5265 initackm_out->msg.init.initiate_tag = htonl(vtag);
5266 /* get a TSN to use too */
5267 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5268 initackm_out->msg.init.initial_tsn = htonl(itsn);
5269 SCTP_TCB_LOCK(stcb);
5270 atomic_add_int(&asoc->refcnt, -1);
5272 vtag = sctp_select_a_tag(inp, 1);
5273 initackm_out->msg.init.initiate_tag = htonl(vtag);
5274 /* get a TSN to use too */
5275 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5277 if (hold_inp_lock) {
5278 SCTP_INP_RLOCK(inp);
5279 SCTP_INP_DECR_REF(inp);
5282 /* save away my tag to */
5283 stc.my_vtag = initackm_out->msg.init.initiate_tag;
5285 /* set up some of the credits. */
5286 so = inp->sctp_socket;
5288 /* memory problem */
5292 initackm_out->msg.init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5294 /* set what I want */
5295 his_limit = ntohs(init_chk->init.num_inbound_streams);
5296 /* choose what I want */
5298 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5299 i_want = asoc->streamoutcnt;
5301 i_want = inp->sctp_ep.pre_open_stream_count;
5304 i_want = inp->sctp_ep.pre_open_stream_count;
5306 if (his_limit < i_want) {
5307 /* I Want more :< */
5308 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams;
5310 /* I can have what I want :> */
5311 initackm_out->msg.init.num_outbound_streams = htons(i_want);
5313 /* tell him his limt. */
5314 initackm_out->msg.init.num_inbound_streams =
5315 htons(inp->sctp_ep.max_open_streams_intome);
5317 /* adaptation layer indication parameter */
5318 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initackm_out + sizeof(*initackm_out));
5319 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5320 ali->ph.param_length = htons(sizeof(*ali));
5321 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5322 SCTP_BUF_LEN(m) += sizeof(*ali);
5323 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5326 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
5327 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5328 ecn->ph.param_length = htons(sizeof(*ecn));
5329 SCTP_BUF_LEN(m) += sizeof(*ecn);
5331 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5334 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5336 /* And now tell the peer we do pr-sctp */
5337 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5338 prsctp->ph.param_length = htons(sizeof(*prsctp));
5339 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5341 /* And now tell the peer we do all the extensions */
5342 pr_supported = (struct sctp_supported_chunk_types_param *)
5343 ((caddr_t)prsctp + sizeof(*prsctp));
5345 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5347 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5348 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5349 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5350 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5351 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5352 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5353 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5354 p_len = sizeof(*pr_supported) + num_ext;
5355 pr_supported->ph.param_length = htons(p_len);
5356 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5357 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5359 /* ECN nonce: And now tell the peer we support ECN nonce */
5360 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
5361 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
5362 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
5363 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
5364 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
5365 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
5367 /* add authentication parameters */
5368 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5369 struct sctp_auth_random *randp;
5370 struct sctp_auth_hmac_algo *hmacs;
5371 struct sctp_auth_chunk_list *chunks;
5372 uint16_t random_len;
5374 /* generate and add RANDOM parameter */
5375 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5376 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5377 randp->ph.param_type = htons(SCTP_RANDOM);
5378 p_len = sizeof(*randp) + random_len;
5379 randp->ph.param_length = htons(p_len);
5380 SCTP_READ_RANDOM(randp->random_data, random_len);
5381 /* zero out any padding required */
5382 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5383 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5385 /* add HMAC_ALGO parameter */
5386 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5387 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5388 (uint8_t *) hmacs->hmac_ids);
5390 p_len += sizeof(*hmacs);
5391 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5392 hmacs->ph.param_length = htons(p_len);
5393 /* zero out any padding required */
5394 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5395 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5397 /* add CHUNKS parameter */
5398 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5399 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5400 chunks->chunk_types);
5402 p_len += sizeof(*chunks);
5403 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5404 chunks->ph.param_length = htons(p_len);
5405 /* zero out any padding required */
5406 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5407 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5411 /* now the addresses */
5413 struct sctp_scoping scp;
5416 * To optimize this we could put the scoping stuff into a
5417 * structure and remove the individual uint8's from the stc
5418 * structure. Then we could just sifa in the address within
5419 * the stc.. but for now this is a quick hack to get the
5420 * address stuff teased apart.
5422 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5423 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5424 scp.loopback_scope = stc.loopback_scope;
5425 scp.ipv4_local_scope = stc.ipv4_scope;
5426 scp.local_scope = stc.local_scope;
5427 scp.site_scope = stc.site_scope;
5428 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
5431 /* tack on the operational error if present */
5439 llen += SCTP_BUF_LEN(ol);
5440 ol = SCTP_BUF_NEXT(ol);
5443 /* must add a pad to the param */
5444 uint32_t cpthis = 0;
5447 padlen = 4 - (llen % 4);
5448 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5450 while (SCTP_BUF_NEXT(m_at) != NULL) {
5451 m_at = SCTP_BUF_NEXT(m_at);
5453 SCTP_BUF_NEXT(m_at) = op_err;
5454 while (SCTP_BUF_NEXT(m_at) != NULL) {
5455 m_at = SCTP_BUF_NEXT(m_at);
5458 /* pre-calulate the size and update pkt header and chunk header */
5460 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5461 p_len += SCTP_BUF_LEN(m_tmp);
5462 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5463 /* m_tmp should now point to last one */
5468 /* Now we must build a cookie */
5469 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m,
5470 sizeof(struct sctphdr), &stc, &signature);
5471 if (m_cookie == NULL) {
5472 /* memory problem */
5476 /* Now append the cookie to the end and update the space/size */
5477 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5479 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5480 p_len += SCTP_BUF_LEN(m_tmp);
5481 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5482 /* m_tmp should now point to last one */
5488 * Place in the size, but we don't include the last pad (if any) in
5491 initackm_out->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr)));
5494 * Time to sign the cookie, we don't sign over the cookie signature
5495 * though thus we set trailer.
5497 (void)sctp_hmac_m(SCTP_HMAC,
5498 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5499 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5500 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5502 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5503 * here since the timer will drive a retranmission.
5506 if ((padval) && (mp_last)) {
5507 /* see my previous comments on mp_last */
5510 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
5512 /* Houston we have a problem, no space */
5518 if (stc.loopback_scope) {
5519 over_addr = &store1;
5525 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5526 NULL, 0, port, SCTP_SO_NOT_LOCKED, over_addr);
5527 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5532 sctp_insert_on_wheel(struct sctp_tcb *stcb,
5533 struct sctp_association *asoc,
5534 struct sctp_stream_out *strq, int holds_lock)
5536 struct sctp_stream_out *stre, *strn;
5538 if (holds_lock == 0) {
5539 SCTP_TCB_SEND_LOCK(stcb);
5541 if ((strq->next_spoke.tqe_next) ||
5542 (strq->next_spoke.tqe_prev)) {
5543 /* already on wheel */
5546 stre = TAILQ_FIRST(&asoc->out_wheel);
5548 /* only one on wheel */
5549 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
5552 for (; stre; stre = strn) {
5553 strn = TAILQ_NEXT(stre, next_spoke);
5554 if (stre->stream_no > strq->stream_no) {
5555 TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
5557 } else if (stre->stream_no == strq->stream_no) {
5558 /* huh, should not happen */
5560 } else if (strn == NULL) {
5561 /* next one is null */
5562 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
5567 if (holds_lock == 0) {
5568 SCTP_TCB_SEND_UNLOCK(stcb);
5573 sctp_remove_from_wheel(struct sctp_tcb *stcb,
5574 struct sctp_association *asoc,
5575 struct sctp_stream_out *strq)
5577 /* take off and then setup so we know it is not on the wheel */
5578 SCTP_TCB_SEND_LOCK(stcb);
5579 if (TAILQ_FIRST(&strq->outqueue)) {
5580 /* more was added */
5581 SCTP_TCB_SEND_UNLOCK(stcb);
5584 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
5585 strq->next_spoke.tqe_next = NULL;
5586 strq->next_spoke.tqe_prev = NULL;
5587 SCTP_TCB_SEND_UNLOCK(stcb);
5591 sctp_prune_prsctp(struct sctp_tcb *stcb,
5592 struct sctp_association *asoc,
5593 struct sctp_sndrcvinfo *srcv,
5597 struct sctp_tmit_chunk *chk, *nchk;
5599 SCTP_TCB_LOCK_ASSERT(stcb);
5600 if ((asoc->peer_supports_prsctp) &&
5601 (asoc->sent_queue_cnt_removeable > 0)) {
5602 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5604 * Look for chunks marked with the PR_SCTP flag AND
5605 * the buffer space flag. If the one being sent is
5606 * equal or greater priority then purge the old one
5607 * and free some space.
5609 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5611 * This one is PR-SCTP AND buffer space
5614 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5616 * Lower numbers equates to higher
5617 * priority so if the one we are
5618 * looking at has a larger or equal
5619 * priority we want to drop the data
5620 * and NOT retransmit it.
5624 * We release the book_size
5625 * if the mbuf is here
5630 if (chk->sent > SCTP_DATAGRAM_UNSENT)
5631 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
5633 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
5634 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5636 &asoc->sent_queue, SCTP_SO_LOCKED);
5637 freed_spc += ret_spc;
5638 if (freed_spc >= dataout) {
5641 } /* if chunk was present */
5642 } /* if of sufficent priority */
5643 } /* if chunk has enabled */
5644 } /* tailqforeach */
5646 chk = TAILQ_FIRST(&asoc->send_queue);
5648 nchk = TAILQ_NEXT(chk, sctp_next);
5649 /* Here we must move to the sent queue and mark */
5650 if (PR_SCTP_TTL_ENABLED(chk->flags)) {
5651 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5654 * We release the book_size
5655 * if the mbuf is here
5659 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5660 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
5661 &asoc->send_queue, SCTP_SO_LOCKED);
5663 freed_spc += ret_spc;
5664 if (freed_spc >= dataout) {
5667 } /* end if chk->data */
5668 } /* end if right class */
5669 } /* end if chk pr-sctp */
5671 } /* end while (chk) */
5672 } /* if enabled in asoc */
5676 sctp_get_frag_point(struct sctp_tcb *stcb,
5677 struct sctp_association *asoc)
5682 * For endpoints that have both v6 and v4 addresses we must reserve
5683 * room for the ipv6 header, for those that are only dealing with V4
5684 * we use a larger frag point.
5686 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5687 ovh = SCTP_MED_OVERHEAD;
5689 ovh = SCTP_MED_V4_OVERHEAD;
5692 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
5693 siz = asoc->smallest_mtu - ovh;
5695 siz = (stcb->asoc.sctp_frag_point - ovh);
5697 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
5699 /* A data chunk MUST fit in a cluster */
5700 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
5703 /* adjust for an AUTH chunk if DATA requires auth */
5704 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
5705 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5708 /* make it an even word boundary please */
5715 sctp_set_prsctp_policy(struct sctp_tcb *stcb,
5716 struct sctp_stream_queue_pending *sp)
5719 if (stcb->asoc.peer_supports_prsctp) {
5721 * We assume that the user wants PR_SCTP_TTL if the user
5722 * provides a positive lifetime but does not specify any
5723 * PR_SCTP policy. This is a BAD assumption and causes
5724 * problems at least with the U-Vancovers MPI folks. I will
5725 * change this to be no policy means NO PR-SCTP.
5727 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
5728 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
5733 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
5734 case CHUNK_FLAGS_PR_SCTP_BUF:
5736 * Time to live is a priority stored in tv_sec when
5737 * doing the buffer drop thing.
5739 sp->ts.tv_sec = sp->timetolive;
5742 case CHUNK_FLAGS_PR_SCTP_TTL:
5746 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5747 tv.tv_sec = sp->timetolive / 1000;
5748 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
5750 * TODO sctp_constants.h needs alternative
5751 * time macros when _KERNEL is undefined.
5753 timevaladd(&sp->ts, &tv);
5756 case CHUNK_FLAGS_PR_SCTP_RTX:
5758 * Time to live is a the number or retransmissions
5761 sp->ts.tv_sec = sp->timetolive;
5765 SCTPDBG(SCTP_DEBUG_USRREQ1,
5766 "Unknown PR_SCTP policy %u.\n",
5767 PR_SCTP_POLICY(sp->sinfo_flags));
5774 sctp_msg_append(struct sctp_tcb *stcb,
5775 struct sctp_nets *net,
5777 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
5779 int error = 0, holds_lock;
5781 struct sctp_stream_queue_pending *sp = NULL;
5782 struct sctp_stream_out *strm;
5785 * Given an mbuf chain, put it into the association send queue and
5786 * place it on the wheel
5788 holds_lock = hold_stcb_lock;
5789 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
5790 /* Invalid stream number */
5791 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5795 if ((stcb->asoc.stream_locked) &&
5796 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
5797 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5801 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
5802 /* Now can we send this? */
5803 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
5804 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5805 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
5806 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
5807 /* got data while shutting down */
5808 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
5812 sctp_alloc_a_strmoq(stcb, sp);
5814 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5818 sp->sinfo_flags = srcv->sinfo_flags;
5819 sp->timetolive = srcv->sinfo_timetolive;
5820 sp->ppid = srcv->sinfo_ppid;
5821 sp->context = srcv->sinfo_context;
5823 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
5827 sp->net = stcb->asoc.primary_destination;
5830 atomic_add_int(&sp->net->ref_count, 1);
5831 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5832 sp->stream = srcv->sinfo_stream;
5833 sp->msg_is_complete = 1;
5834 sp->sender_all_done = 1;
5837 sp->tail_mbuf = NULL;
5840 sctp_set_prsctp_policy(stcb, sp);
5842 * We could in theory (for sendall) sifa the length in, but we would
5843 * still have to hunt through the chain since we need to setup the
5847 if (SCTP_BUF_NEXT(at) == NULL)
5849 sp->length += SCTP_BUF_LEN(at);
5850 at = SCTP_BUF_NEXT(at);
5852 SCTP_TCB_SEND_LOCK(stcb);
5853 sctp_snd_sb_alloc(stcb, sp->length);
5854 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
5855 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
5856 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
5857 sp->strseq = strm->next_sequence_sent;
5858 strm->next_sequence_sent++;
5860 if ((strm->next_spoke.tqe_next == NULL) &&
5861 (strm->next_spoke.tqe_prev == NULL)) {
5862 /* Not on wheel, insert */
5863 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
5866 SCTP_TCB_SEND_UNLOCK(stcb);
5875 static struct mbuf *
5876 sctp_copy_mbufchain(struct mbuf *clonechain,
5877 struct mbuf *outchain,
5878 struct mbuf **endofchain,
5881 uint8_t copy_by_ref)
5884 struct mbuf *appendchain;
5888 if (endofchain == NULL) {
5892 sctp_m_freem(outchain);
5895 if (can_take_mbuf) {
5896 appendchain = clonechain;
5899 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
5901 /* Its not in a cluster */
5902 if (*endofchain == NULL) {
5903 /* lets get a mbuf cluster */
5904 if (outchain == NULL) {
5905 /* This is the general case */
5907 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5908 if (outchain == NULL) {
5911 SCTP_BUF_LEN(outchain) = 0;
5912 *endofchain = outchain;
5913 /* get the prepend space */
5914 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
5917 * We really should not get a NULL
5923 if (SCTP_BUF_NEXT(m) == NULL) {
5927 m = SCTP_BUF_NEXT(m);
5930 if (*endofchain == NULL) {
5932 * huh, TSNH XXX maybe we
5935 sctp_m_freem(outchain);
5939 /* get the new end of length */
5940 len = M_TRAILINGSPACE(*endofchain);
5942 /* how much is left at the end? */
5943 len = M_TRAILINGSPACE(*endofchain);
5945 /* Find the end of the data, for appending */
5946 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
5948 /* Now lets copy it out */
5949 if (len >= sizeofcpy) {
5950 /* It all fits, copy it in */
5951 m_copydata(clonechain, 0, sizeofcpy, cp);
5952 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
5954 /* fill up the end of the chain */
5956 m_copydata(clonechain, 0, len, cp);
5957 SCTP_BUF_LEN((*endofchain)) += len;
5958 /* now we need another one */
5961 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5966 SCTP_BUF_NEXT((*endofchain)) = m;
5968 cp = mtod((*endofchain), caddr_t);
5969 m_copydata(clonechain, len, sizeofcpy, cp);
5970 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
5974 /* copy the old fashion way */
5975 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
5976 #ifdef SCTP_MBUF_LOGGING
5977 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5982 if (SCTP_BUF_IS_EXTENDED(mat)) {
5983 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
5985 mat = SCTP_BUF_NEXT(mat);
5991 if (appendchain == NULL) {
5994 sctp_m_freem(outchain);
5998 /* tack on to the end */
5999 if (*endofchain != NULL) {
6000 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6004 if (SCTP_BUF_NEXT(m) == NULL) {
6005 SCTP_BUF_NEXT(m) = appendchain;
6008 m = SCTP_BUF_NEXT(m);
6012 * save off the end and update the end-chain postion
6016 if (SCTP_BUF_NEXT(m) == NULL) {
6020 m = SCTP_BUF_NEXT(m);
6024 /* save off the end and update the end-chain postion */
6027 if (SCTP_BUF_NEXT(m) == NULL) {
6031 m = SCTP_BUF_NEXT(m);
6033 return (appendchain);
6038 sctp_med_chunk_output(struct sctp_inpcb *inp,
6039 struct sctp_tcb *stcb,
6040 struct sctp_association *asoc,
6043 int control_only, int *cwnd_full, int from_where,
6044 struct timeval *now, int *now_filled, int frag_point, int so_locked
6045 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6051 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6054 struct sctp_copy_all *ca;
6057 int added_control = 0;
6058 int un_sent, do_chunk_output = 1;
6059 struct sctp_association *asoc;
6061 ca = (struct sctp_copy_all *)ptr;
6062 if (ca->m == NULL) {
6065 if (ca->inp != inp) {
6069 if ((ca->m) && ca->sndlen) {
6070 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6072 /* can't copy so we are done */
6076 #ifdef SCTP_MBUF_LOGGING
6077 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6082 if (SCTP_BUF_IS_EXTENDED(mat)) {
6083 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6085 mat = SCTP_BUF_NEXT(mat);
6092 SCTP_TCB_LOCK_ASSERT(stcb);
6093 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6094 /* Abort this assoc with m as the user defined reason */
6096 struct sctp_paramhdr *ph;
6098 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6100 ph = mtod(m, struct sctp_paramhdr *);
6101 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6102 ph->param_length = htons(ca->sndlen);
6105 * We add one here to keep the assoc from
6106 * dis-appearing on us.
6108 atomic_add_int(&stcb->asoc.refcnt, 1);
6109 sctp_abort_an_association(inp, stcb,
6110 SCTP_RESPONSE_TO_USER_REQ,
6111 m, SCTP_SO_NOT_LOCKED);
6113 * sctp_abort_an_association calls sctp_free_asoc()
6114 * free association will NOT free it since we
6115 * incremented the refcnt .. we do this to prevent
6116 * it being freed and things getting tricky since we
6117 * could end up (from free_asoc) calling inpcb_free
6118 * which would get a recursive lock call to the
6119 * iterator lock.. But as a consequence of that the
6120 * stcb will return to us un-locked.. since
6121 * free_asoc returns with either no TCB or the TCB
6122 * unlocked, we must relock.. to unlock in the
6123 * iterator timer :-0
6125 SCTP_TCB_LOCK(stcb);
6126 atomic_add_int(&stcb->asoc.refcnt, -1);
6127 goto no_chunk_output;
6131 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
6135 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6136 /* shutdown this assoc */
6139 cnt = sctp_is_there_unsent_data(stcb);
6141 if (TAILQ_EMPTY(&asoc->send_queue) &&
6142 TAILQ_EMPTY(&asoc->sent_queue) &&
6144 if (asoc->locked_on_sending) {
6148 * there is nothing queued to send, so I'm
6151 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6152 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6153 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6155 * only send SHUTDOWN the first time
6158 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
6159 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6160 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6162 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6163 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6164 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6165 asoc->primary_destination);
6166 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6167 asoc->primary_destination);
6169 do_chunk_output = 0;
6173 * we still got (or just got) data to send,
6174 * so set SHUTDOWN_PENDING
6177 * XXX sockets draft says that SCTP_EOF
6178 * should be sent with no data. currently,
6179 * we will allow user data to be sent first
6180 * and move to SHUTDOWN-PENDING
6182 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6183 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6184 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6185 if (asoc->locked_on_sending) {
6187 * Locked to send out the
6190 struct sctp_stream_queue_pending *sp;
6192 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6194 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6195 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6198 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6199 if (TAILQ_EMPTY(&asoc->send_queue) &&
6200 TAILQ_EMPTY(&asoc->sent_queue) &&
6201 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6203 atomic_add_int(&stcb->asoc.refcnt, 1);
6204 sctp_abort_an_association(stcb->sctp_ep, stcb,
6205 SCTP_RESPONSE_TO_USER_REQ,
6206 NULL, SCTP_SO_NOT_LOCKED);
6207 atomic_add_int(&stcb->asoc.refcnt, -1);
6208 goto no_chunk_output;
6210 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6211 asoc->primary_destination);
6217 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6218 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6220 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6221 (stcb->asoc.total_flight > 0) &&
6222 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6224 do_chunk_output = 0;
6226 if (do_chunk_output)
6227 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6228 else if (added_control) {
6229 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0;
6233 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6234 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6235 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6246 sctp_sendall_completes(void *ptr, uint32_t val)
6248 struct sctp_copy_all *ca;
6250 ca = (struct sctp_copy_all *)ptr;
6252 * Do a notify here? Kacheong suggests that the notify be done at
6253 * the send time.. so you would push up a notification if any send
6254 * failed. Don't know if this is feasable since the only failures we
6255 * have is "memory" related and if you cannot get an mbuf to send
6256 * the data you surely can't get an mbuf to send up to notify the
6257 * user you can't send the data :->
6260 /* now free everything */
6261 sctp_m_freem(ca->m);
6262 SCTP_FREE(ca, SCTP_M_COPYAL);
6266 #define MC_ALIGN(m, len) do { \
6267 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6272 static struct mbuf *
6273 sctp_copy_out_all(struct uio *uio, int len)
6275 struct mbuf *ret, *at;
6276 int left, willcpy, cancpy, error;
6278 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6284 SCTP_BUF_LEN(ret) = 0;
6285 /* save space for the data chunk header */
6286 cancpy = M_TRAILINGSPACE(ret);
6287 willcpy = min(cancpy, left);
6290 /* Align data to the end */
6291 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6297 SCTP_BUF_LEN(at) = willcpy;
6298 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6301 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6302 if (SCTP_BUF_NEXT(at) == NULL) {
6305 at = SCTP_BUF_NEXT(at);
6306 SCTP_BUF_LEN(at) = 0;
6307 cancpy = M_TRAILINGSPACE(at);
6308 willcpy = min(cancpy, left);
6315 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6316 struct sctp_sndrcvinfo *srcv)
6319 struct sctp_copy_all *ca;
6321 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6325 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6328 memset(ca, 0, sizeof(struct sctp_copy_all));
6331 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6333 * take off the sendall flag, it would be bad if we failed to do
6336 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6337 /* get length and mbuf chain */
6339 ca->sndlen = uio->uio_resid;
6340 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6341 if (ca->m == NULL) {
6342 SCTP_FREE(ca, SCTP_M_COPYAL);
6343 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6347 /* Gather the length of the send */
6353 ca->sndlen += SCTP_BUF_LEN(m);
6354 m = SCTP_BUF_NEXT(m);
6358 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6359 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6360 SCTP_ASOC_ANY_STATE,
6362 sctp_sendall_completes, inp, 1);
6364 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6365 SCTP_FREE(ca, SCTP_M_COPYAL);
6366 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6374 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6376 struct sctp_tmit_chunk *chk, *nchk;
6378 chk = TAILQ_FIRST(&asoc->control_send_queue);
6380 nchk = TAILQ_NEXT(chk, sctp_next);
6381 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6382 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6384 sctp_m_freem(chk->data);
6387 asoc->ctrl_queue_cnt--;
6388 sctp_free_a_chunk(stcb, chk);
6395 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6397 struct sctp_association *asoc;
6398 struct sctp_tmit_chunk *chk, *chk_tmp;
6399 struct sctp_asconf_chunk *acp;
6402 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL;
6405 chk_tmp = TAILQ_NEXT(chk, sctp_next);
6406 /* find SCTP_ASCONF chunk in queue */
6407 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6409 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6410 if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) {
6415 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6417 sctp_m_freem(chk->data);
6420 asoc->ctrl_queue_cnt--;
6421 sctp_free_a_chunk(stcb, chk);
6428 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6430 struct sctp_association *asoc,
6431 struct sctp_tmit_chunk **data_list,
6433 struct sctp_nets *net)
6436 struct sctp_tmit_chunk *tp1;
6438 for (i = 0; i < bundle_at; i++) {
6439 /* off of the send queue */
6442 * Any chunk NOT 0 you zap the time chunk 0 gets
6443 * zapped or set based on if a RTO measurment is
6446 data_list[i]->do_rtt = 0;
6449 data_list[i]->sent_rcv_time = net->last_sent_time;
6450 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6451 TAILQ_REMOVE(&asoc->send_queue,
6454 /* on to the sent queue */
6455 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6456 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
6457 data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
6458 struct sctp_tmit_chunk *tpp;
6460 /* need to move back */
6462 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6464 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6468 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6469 data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
6472 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6474 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6479 /* This does not lower until the cum-ack passes it */
6480 asoc->sent_queue_cnt++;
6481 asoc->send_queue_cnt--;
6482 if ((asoc->peers_rwnd <= 0) &&
6483 (asoc->total_flight == 0) &&
6485 /* Mark the chunk as being a window probe */
6486 SCTP_STAT_INCR(sctps_windowprobed);
6488 #ifdef SCTP_AUDITING_ENABLED
6489 sctp_audit_log(0xC2, 3);
6491 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6492 data_list[i]->snd_count = 1;
6493 data_list[i]->rec.data.chunk_was_revoked = 0;
6494 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6495 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6496 data_list[i]->whoTo->flight_size,
6497 data_list[i]->book_size,
6498 (uintptr_t) data_list[i]->whoTo,
6499 data_list[i]->rec.data.TSN_seq);
6501 sctp_flight_size_increase(data_list[i]);
6502 sctp_total_flight_increase(stcb, data_list[i]);
6503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6504 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6505 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6507 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6508 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6509 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6510 /* SWS sender side engages */
6511 asoc->peers_rwnd = 0;
6517 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
6519 struct sctp_tmit_chunk *chk, *nchk;
6521 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
6523 nchk = TAILQ_NEXT(chk, sctp_next);
6524 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6525 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6526 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6527 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6528 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6529 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6530 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6531 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6532 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6533 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6534 /* Stray chunks must be cleaned up */
6536 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6538 sctp_m_freem(chk->data);
6541 asoc->ctrl_queue_cnt--;
6542 sctp_free_a_chunk(stcb, chk);
6543 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6544 /* special handling, we must look into the param */
6545 if (chk != asoc->str_reset) {
6546 goto clean_up_anyway;
6554 sctp_can_we_split_this(struct sctp_tcb *stcb,
6556 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6559 * Make a decision on if I should split a msg into multiple parts.
6560 * This is only asked of incomplete messages.
6564 * If we are doing EEOR we need to always send it if its the
6565 * entire thing, since it might be all the guy is putting in
6568 if (goal_mtu >= length) {
6570 * If we have data outstanding,
6571 * we get another chance when the sack
6572 * arrives to transmit - wait for more data
6574 if (stcb->asoc.total_flight == 0) {
6576 * If nothing is in flight, we zero the
6584 /* You can fill the rest */
6589 * For those strange folk that make the send buffer
6590 * smaller than our fragmentation point, we can't
6591 * get a full msg in so we have to allow splitting.
6593 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
6596 if ((length <= goal_mtu) ||
6597 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
6598 /* Sub-optimial residual don't split in non-eeor mode. */
6602 * If we reach here length is larger than the goal_mtu. Do we wish
6603 * to split it for the sake of packet putting together?
6605 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
6606 /* Its ok to split it */
6607 return (min(goal_mtu, frag_point));
6609 /* Nope, can't split */
6615 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
6616 struct sctp_stream_out *strq,
6618 uint32_t frag_point,
6624 /* Move from the stream to the send_queue keeping track of the total */
6625 struct sctp_association *asoc;
6626 struct sctp_stream_queue_pending *sp;
6627 struct sctp_tmit_chunk *chk;
6628 struct sctp_data_chunk *dchkh;
6629 uint32_t to_move, length;
6630 uint8_t rcv_flags = 0;
6632 uint8_t send_lock_up = 0;
6634 SCTP_TCB_LOCK_ASSERT(stcb);
6637 /* sa_ignore FREED_MEMORY */
6638 sp = TAILQ_FIRST(&strq->outqueue);
6641 if (send_lock_up == 0) {
6642 SCTP_TCB_SEND_LOCK(stcb);
6645 sp = TAILQ_FIRST(&strq->outqueue);
6649 if (strq->last_msg_incomplete) {
6650 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
6652 strq->last_msg_incomplete);
6653 strq->last_msg_incomplete = 0;
6657 SCTP_TCB_SEND_UNLOCK(stcb);
6662 if ((sp->msg_is_complete) && (sp->length == 0)) {
6663 if (sp->sender_all_done) {
6665 * We are doing differed cleanup. Last time through
6666 * when we took all the data the sender_all_done was
6669 if (sp->put_last_out == 0) {
6670 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
6671 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
6672 sp->sender_all_done,
6674 sp->msg_is_complete,
6678 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
6679 SCTP_TCB_SEND_LOCK(stcb);
6682 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
6683 TAILQ_REMOVE(&strq->outqueue, sp, next);
6684 sctp_free_remote_addr(sp->net);
6686 sctp_m_freem(sp->data);
6689 sctp_free_a_strmoq(stcb, sp);
6691 /* we can't be locked to it */
6693 stcb->asoc.locked_on_sending = NULL;
6695 SCTP_TCB_SEND_UNLOCK(stcb);
6698 /* back to get the next msg */
6702 * sender just finished this but still holds a
6711 /* is there some to get */
6712 if (sp->length == 0) {
6720 some_taken = sp->some_taken;
6721 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6722 sp->msg_is_complete = 1;
6725 length = sp->length;
6726 if (sp->msg_is_complete) {
6727 /* The message is complete */
6728 to_move = min(length, frag_point);
6729 if (to_move == length) {
6730 /* All of it fits in the MTU */
6731 if (sp->some_taken) {
6732 rcv_flags |= SCTP_DATA_LAST_FRAG;
6733 sp->put_last_out = 1;
6735 rcv_flags |= SCTP_DATA_NOT_FRAG;
6736 sp->put_last_out = 1;
6739 /* Not all of it fits, we fragment */
6740 if (sp->some_taken == 0) {
6741 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6746 to_move = sctp_can_we_split_this(stcb, length, goal_mtu,
6747 frag_point, eeor_mode);
6750 * We use a snapshot of length in case it
6751 * is expanding during the compare.
6756 if (to_move >= llen) {
6758 if (send_lock_up == 0) {
6760 * We are taking all of an incomplete msg
6761 * thus we need a send lock.
6763 SCTP_TCB_SEND_LOCK(stcb);
6765 if (sp->msg_is_complete) {
6767 * the sender finished the
6774 if (sp->some_taken == 0) {
6775 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6779 /* Nothing to take. */
6780 if (sp->some_taken) {
6789 /* If we reach here, we can copy out a chunk */
6790 sctp_alloc_a_chunk(stcb, chk);
6792 /* No chunk memory */
6798 * Setup for unordered if needed by looking at the user sent info
6801 if (sp->sinfo_flags & SCTP_UNORDERED) {
6802 rcv_flags |= SCTP_DATA_UNORDERED;
6804 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) {
6805 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
6807 /* clear out the chunk before setting up */
6808 memset(chk, 0, sizeof(*chk));
6809 chk->rec.data.rcv_flags = rcv_flags;
6811 if (to_move >= length) {
6812 /* we think we can steal the whole thing */
6813 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
6814 SCTP_TCB_SEND_LOCK(stcb);
6817 if (to_move < sp->length) {
6818 /* bail, it changed */
6821 chk->data = sp->data;
6822 chk->last_mbuf = sp->tail_mbuf;
6823 /* register the stealing */
6824 sp->data = sp->tail_mbuf = NULL;
6829 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
6830 chk->last_mbuf = NULL;
6831 if (chk->data == NULL) {
6832 sp->some_taken = some_taken;
6833 sctp_free_a_chunk(stcb, chk);
6838 #ifdef SCTP_MBUF_LOGGING
6839 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6844 if (SCTP_BUF_IS_EXTENDED(mat)) {
6845 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6847 mat = SCTP_BUF_NEXT(mat);
6851 /* Pull off the data */
6852 m_adj(sp->data, to_move);
6853 /* Now lets work our way down and compact it */
6855 while (m && (SCTP_BUF_LEN(m) == 0)) {
6856 sp->data = SCTP_BUF_NEXT(m);
6857 SCTP_BUF_NEXT(m) = NULL;
6858 if (sp->tail_mbuf == m) {
6860 * Freeing tail? TSNH since
6861 * we supposedly were taking less
6862 * than the sp->length.
6865 panic("Huh, freing tail? - TSNH");
6867 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
6868 sp->tail_mbuf = sp->data = NULL;
6877 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
6878 chk->copy_by_ref = 1;
6880 chk->copy_by_ref = 0;
6883 * get last_mbuf and counts of mb useage This is ugly but hopefully
6884 * its only one mbuf.
6886 if (chk->last_mbuf == NULL) {
6887 chk->last_mbuf = chk->data;
6888 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
6889 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
6892 if (to_move > length) {
6893 /*- This should not happen either
6894 * since we always lower to_move to the size
6895 * of sp->length if its larger.
6898 panic("Huh, how can to_move be larger?");
6900 SCTP_PRINTF("Huh, how can to_move be larger?\n");
6904 atomic_subtract_int(&sp->length, to_move);
6906 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
6907 /* Not enough room for a chunk header, get some */
6910 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
6913 * we're in trouble here. _PREPEND below will free
6914 * all the data if there is no leading space, so we
6915 * must put the data back and restore.
6917 if (send_lock_up == 0) {
6918 SCTP_TCB_SEND_LOCK(stcb);
6921 if (chk->data == NULL) {
6922 /* unsteal the data */
6923 sp->data = chk->data;
6924 sp->tail_mbuf = chk->last_mbuf;
6928 /* reassemble the data */
6930 sp->data = chk->data;
6931 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
6933 sp->some_taken = some_taken;
6934 atomic_add_int(&sp->length, to_move);
6937 sctp_free_a_chunk(stcb, chk);
6941 SCTP_BUF_LEN(m) = 0;
6942 SCTP_BUF_NEXT(m) = chk->data;
6944 M_ALIGN(chk->data, 4);
6947 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
6948 if (chk->data == NULL) {
6949 /* HELP, TSNH since we assured it would not above? */
6951 panic("prepend failes HELP?");
6953 SCTP_PRINTF("prepend fails HELP?\n");
6954 sctp_free_a_chunk(stcb, chk);
6960 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
6961 chk->book_size = chk->send_size = (to_move +
6962 sizeof(struct sctp_data_chunk));
6963 chk->book_size_scale = 0;
6964 chk->sent = SCTP_DATAGRAM_UNSENT;
6967 chk->asoc = &stcb->asoc;
6968 chk->pad_inplace = 0;
6969 chk->no_fr_allowed = 0;
6970 chk->rec.data.stream_seq = sp->strseq;
6971 chk->rec.data.stream_number = sp->stream;
6972 chk->rec.data.payloadtype = sp->ppid;
6973 chk->rec.data.context = sp->context;
6974 chk->rec.data.doing_fast_retransmit = 0;
6975 chk->rec.data.ect_nonce = 0; /* ECN Nonce */
6977 chk->rec.data.timetodrop = sp->ts;
6978 chk->flags = sp->act_flags;
6979 chk->addr_over = sp->addr_over;
6982 atomic_add_int(&chk->whoTo->ref_count, 1);
6984 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
6985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
6986 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
6987 (uintptr_t) stcb, sp->length,
6988 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
6989 chk->rec.data.TSN_seq);
6991 dchkh = mtod(chk->data, struct sctp_data_chunk *);
6993 * Put the rest of the things in place now. Size was done earlier in
6994 * previous loop prior to padding.
6997 #ifdef SCTP_ASOCLOG_OF_TSNS
6998 SCTP_TCB_LOCK_ASSERT(stcb);
6999 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7000 asoc->tsn_out_at = 0;
7001 asoc->tsn_out_wrapped = 1;
7003 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7004 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7005 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7006 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7007 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7008 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7009 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7010 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7014 dchkh->ch.chunk_type = SCTP_DATA;
7015 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7016 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7017 dchkh->dp.stream_id = htons(strq->stream_no);
7018 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7019 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7020 dchkh->ch.chunk_length = htons(chk->send_size);
7021 /* Now advance the chk->send_size by the actual pad needed. */
7022 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7027 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7028 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7029 chk->pad_inplace = 1;
7031 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7032 /* pad added an mbuf */
7033 chk->last_mbuf = lm;
7035 chk->send_size += pads;
7037 /* We only re-set the policy if it is on */
7038 if (sp->pr_sctp_on) {
7039 sctp_set_prsctp_policy(stcb, sp);
7040 asoc->pr_sctp_cnt++;
7041 chk->pr_sctp_on = 1;
7043 chk->pr_sctp_on = 0;
7045 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7046 /* All done pull and kill the message */
7047 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7048 if (sp->put_last_out == 0) {
7049 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7050 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7051 sp->sender_all_done,
7053 sp->msg_is_complete,
7057 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7058 SCTP_TCB_SEND_LOCK(stcb);
7061 TAILQ_REMOVE(&strq->outqueue, sp, next);
7062 sctp_free_remote_addr(sp->net);
7064 sctp_m_freem(sp->data);
7067 sctp_free_a_strmoq(stcb, sp);
7069 /* we can't be locked to it */
7071 stcb->asoc.locked_on_sending = NULL;
7073 /* more to go, we are locked */
7076 asoc->chunks_on_out_queue++;
7077 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7078 asoc->send_queue_cnt++;
7081 SCTP_TCB_SEND_UNLOCK(stcb);
7088 static struct sctp_stream_out *
7089 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
7091 struct sctp_stream_out *strq;
7093 /* Find the next stream to use */
7094 if (asoc->last_out_stream == NULL) {
7095 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
7096 if (asoc->last_out_stream == NULL) {
7097 /* huh nothing on the wheel, TSNH */
7102 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
7105 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
7107 /* Save off the last stream */
7108 asoc->last_out_stream = strq;
7115 sctp_fill_outqueue(struct sctp_tcb *stcb,
7116 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now)
7118 struct sctp_association *asoc;
7119 struct sctp_stream_out *strq, *strqn, *strqt;
7120 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7122 struct sctp_stream_queue_pending *sp;
7124 SCTP_TCB_LOCK_ASSERT(stcb);
7127 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
7128 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7130 /* ?? not sure what else to do */
7131 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7134 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7136 /* Need an allowance for the data chunk header too */
7137 goal_mtu -= sizeof(struct sctp_data_chunk);
7139 /* must make even word boundary */
7140 goal_mtu &= 0xfffffffc;
7141 if (asoc->locked_on_sending) {
7142 /* We are stuck on one stream until the message completes. */
7143 strqn = strq = asoc->locked_on_sending;
7146 strqn = strq = sctp_select_a_stream(stcb, asoc);
7150 while ((goal_mtu > 0) && strq) {
7151 sp = TAILQ_FIRST(&strq->outqueue);
7153 * If CMT is off, we must validate that the stream in
7154 * question has the first item pointed towards are network
7155 * destionation requested by the caller. Note that if we
7156 * turn out to be locked to a stream (assigning TSN's then
7157 * we must stop, since we cannot look for another stream
7158 * with data to send to that destination). In CMT's case, by
7159 * skipping this check, we will send one data packet towards
7160 * the requested net.
7165 if ((sp->net != net) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
7166 /* none for this network */
7170 strq = sctp_select_a_stream(stcb, asoc);
7174 if (strqn == strq) {
7175 /* I have circled */
7183 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked,
7184 &giveup, eeor_mode, &bail);
7185 asoc->last_out_stream = strq;
7187 asoc->locked_on_sending = strq;
7188 if ((moved_how_much == 0) || (giveup) || bail)
7189 /* no more to move for now */
7192 asoc->locked_on_sending = NULL;
7193 strqt = sctp_select_a_stream(stcb, asoc);
7194 if (TAILQ_FIRST(&strq->outqueue) == NULL) {
7195 if (strq == strqn) {
7196 /* Must move start to next one */
7197 strqn = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
7198 if (strqn == NULL) {
7199 strqn = TAILQ_FIRST(&asoc->out_wheel);
7200 if (strqn == NULL) {
7205 sctp_remove_from_wheel(stcb, asoc, strq);
7207 if ((giveup) || bail) {
7215 total_moved += moved_how_much;
7216 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7217 goal_mtu &= 0xfffffffc;
7222 if (total_moved == 0) {
7223 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) &&
7224 (net == stcb->asoc.primary_destination)) {
7225 /* ran dry for primary network net */
7226 SCTP_STAT_INCR(sctps_primary_randry);
7227 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7228 /* ran dry with CMT on */
7229 SCTP_STAT_INCR(sctps_cmt_randry);
7235 sctp_fix_ecn_echo(struct sctp_association *asoc)
7237 struct sctp_tmit_chunk *chk;
7239 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7240 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7241 chk->sent = SCTP_DATAGRAM_UNSENT;
7247 sctp_move_to_an_alt(struct sctp_tcb *stcb,
7248 struct sctp_association *asoc,
7249 struct sctp_nets *net)
7251 struct sctp_tmit_chunk *chk;
7252 struct sctp_nets *a_net;
7254 SCTP_TCB_LOCK_ASSERT(stcb);
7256 * JRS 5/14/07 - If CMT PF is turned on, find an alternate
7257 * destination using the PF algorithm for finding alternate
7260 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
7261 a_net = sctp_find_alternate_net(stcb, net, 2);
7263 a_net = sctp_find_alternate_net(stcb, net, 0);
7265 if ((a_net != net) &&
7266 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
7268 * We only proceed if a valid alternate is found that is not
7269 * this one and is reachable. Here we must move all chunks
7270 * queued in the send queue off of the destination address
7273 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7274 if (chk->whoTo == net) {
7275 /* Move the chunk to our alternate */
7276 sctp_free_remote_addr(chk->whoTo);
7278 atomic_add_int(&a_net->ref_count, 1);
7285 sctp_med_chunk_output(struct sctp_inpcb *inp,
7286 struct sctp_tcb *stcb,
7287 struct sctp_association *asoc,
7290 int control_only, int *cwnd_full, int from_where,
7291 struct timeval *now, int *now_filled, int frag_point, int so_locked
7292 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7298 * Ok this is the generic chunk service queue. we must do the
7299 * following: - Service the stream queue that is next, moving any
7300 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7301 * LAST to the out queue in one pass) and assigning TSN's - Check to
7302 * see if the cwnd/rwnd allows any output, if so we go ahead and
7303 * fomulate and send the low level chunks. Making sure to combine
7304 * any control in the control chunk queue also.
7306 struct sctp_nets *net;
7307 struct mbuf *outchain, *endoutchain;
7308 struct sctp_tmit_chunk *chk, *nchk;
7309 struct sctphdr *shdr;
7311 /* temp arrays for unlinking */
7312 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7313 int no_fragmentflg, error;
7314 int one_chunk, hbflag, skip_data_for_this_net;
7315 int asconf, cookie, no_out_cnt;
7316 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode;
7317 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7318 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at;
7320 uint32_t auth_offset = 0;
7321 struct sctp_auth_chunk *auth = NULL;
7324 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7333 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7334 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7335 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7340 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7342 * First lets prime the pump. For each destination, if there is room
7343 * in the flight size, attempt to pull an MTU's worth out of the
7344 * stream queues into the general send_queue
7346 #ifdef SCTP_AUDITING_ENABLED
7347 sctp_audit_log(0xC2, 2);
7349 SCTP_TCB_LOCK_ASSERT(stcb);
7351 if ((control_only) || (asoc->stream_reset_outstanding))
7356 /* Nothing to possible to send? */
7357 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7358 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7359 TAILQ_EMPTY(&asoc->send_queue) &&
7360 TAILQ_EMPTY(&asoc->out_wheel)) {
7364 if (asoc->peers_rwnd == 0) {
7365 /* No room in peers rwnd */
7368 if (asoc->total_flight > 0) {
7369 /* we are allowed one chunk in flight */
7373 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
7374 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7376 * for CMT we start at the next one past the one we
7377 * last added data to.
7379 if (TAILQ_FIRST(&asoc->send_queue) != NULL) {
7380 goto skip_the_fill_from_streams;
7382 if (asoc->last_net_data_came_from) {
7383 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next);
7385 net = TAILQ_FIRST(&asoc->nets);
7389 net = TAILQ_FIRST(&asoc->nets);
7393 * JRI-TODO: CMT-MPI. Simply set the first
7394 * destination (net) to be optimized for the next
7395 * message to be pulled out of the outwheel. 1. peek
7396 * at outwheel 2. If large message, set net =
7397 * highest_cwnd 3. If small message, set net =
7401 net = asoc->primary_destination;
7404 net = TAILQ_FIRST(&asoc->nets);
7410 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7411 net->window_probe = 0;
7412 if (old_startat && (old_startat == net)) {
7416 * JRI: if dest is unreachable or unconfirmed, do
7417 * not send data to it
7419 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) || (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
7423 * JRI: if dest is in PF state, do not send data to
7426 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
7427 SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
7428 (net->dest_state & SCTP_ADDR_PF)) {
7431 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) && (net->ref_count < 2)) {
7432 /* nothing can be in queue for this guy */
7435 if (net->flight_size >= net->cwnd) {
7436 /* skip this network, no room */
7441 * JRI : this for loop we are in takes in each net,
7442 * if its's got space in cwnd and has data sent to
7443 * it (when CMT is off) then it calls
7444 * sctp_fill_outqueue for the net. This gets data on
7445 * the send queue for that network.
7447 * In sctp_fill_outqueue TSN's are assigned and data is
7448 * copied out of the stream buffers. Note mostly
7449 * copy by reference (we hope).
7451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7452 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7454 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
7456 /* memory alloc failure */
7458 goto skip_the_fill_from_streams;
7461 if (start_at != TAILQ_FIRST(&asoc->nets)) {
7462 /* got to pick up the beginning stuff. */
7463 old_startat = start_at;
7464 start_at = net = TAILQ_FIRST(&asoc->nets);
7469 skip_the_fill_from_streams:
7470 *cwnd_full = cwnd_full_ind;
7472 /* now service each destination and send out what we can for it */
7473 /* Nothing to send? */
7474 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
7475 (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) &&
7476 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
7480 if (no_data_chunks) {
7481 chk = TAILQ_FIRST(&asoc->asconf_send_queue);
7483 chk = TAILQ_FIRST(&asoc->control_send_queue);
7485 chk = TAILQ_FIRST(&asoc->send_queue);
7488 send_start_at = chk->whoTo;
7490 send_start_at = TAILQ_FIRST(&asoc->nets);
7493 again_one_more_time:
7494 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7495 /* how much can we send? */
7496 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7497 if (old_startat && (old_startat == net)) {
7498 /* through list ocmpletely. */
7502 if (net->ref_count < 2) {
7504 * Ref-count of 1 so we cannot have data or control
7505 * queued to this address. Skip it.
7509 ctl_cnt = bundle_at = 0;
7510 endoutchain = outchain = NULL;
7513 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7514 skip_data_for_this_net = 1;
7516 skip_data_for_this_net = 0;
7518 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7520 * if we have a route and an ifp check to see if we
7521 * have room to send to this guy
7525 ifp = net->ro.ro_rt->rt_ifp;
7526 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7527 SCTP_STAT_INCR(sctps_ifnomemqueued);
7528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7529 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7534 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7536 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7540 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7550 if (mtu > asoc->peers_rwnd) {
7551 if (asoc->total_flight > 0) {
7552 /* We have a packet in flight somewhere */
7553 r_mtu = asoc->peers_rwnd;
7555 /* We are always allowed to send one MTU out */
7562 /************************/
7563 /* ASCONF transmission */
7564 /************************/
7565 /* Now first lets go through the asconf queue */
7566 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue);
7568 nchk = TAILQ_NEXT(chk, sctp_next);
7569 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7572 if (chk->whoTo != net) {
7574 * No, not sent to the network we are
7579 if (chk->data == NULL) {
7582 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7583 chk->sent != SCTP_DATAGRAM_RESEND) {
7587 * if no AUTH is yet included and this chunk
7588 * requires it, make sure to account for it. We
7589 * don't apply the size until the AUTH chunk is
7590 * actually added below in case there is no room for
7591 * this chunk. NOTE: we overload the use of "omtu"
7594 if ((auth == NULL) &&
7595 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7596 stcb->asoc.peer_auth_chunks)) {
7597 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7600 /* Here we do NOT factor the r_mtu */
7601 if ((chk->send_size < (int)(mtu - omtu)) ||
7602 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7604 * We probably should glom the mbuf chain
7605 * from the chk->data for control but the
7606 * problem is it becomes yet one more level
7607 * of tracking to do if for some reason
7608 * output fails. Then I have got to
7609 * reconstruct the merged control chain.. el
7610 * yucko.. for now we take the easy way and
7614 * Add an AUTH chunk, if chunk requires it
7615 * save the offset into the chain for AUTH
7617 if ((auth == NULL) &&
7618 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7619 stcb->asoc.peer_auth_chunks))) {
7620 outchain = sctp_add_auth_chunk(outchain,
7625 chk->rec.chunk_id.id);
7626 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7628 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7629 (int)chk->rec.chunk_id.can_take_data,
7630 chk->send_size, chk->copy_by_ref);
7631 if (outchain == NULL) {
7633 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7636 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7637 /* update our MTU size */
7638 if (mtu > (chk->send_size + omtu))
7639 mtu -= (chk->send_size + omtu);
7642 to_out += (chk->send_size + omtu);
7643 /* Do clear IP_DF ? */
7644 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7647 if (chk->rec.chunk_id.can_take_data)
7650 * set hb flag since we can use these for
7656 * should sysctl this: don't bundle data
7657 * with ASCONF since it requires AUTH
7660 chk->sent = SCTP_DATAGRAM_SENT;
7664 * Ok we are out of room but we can
7665 * output without effecting the
7666 * flight size since this little guy
7667 * is a control only packet.
7669 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7671 * do NOT clear the asconf flag as
7672 * it is used to do appropriate
7673 * source address selection.
7675 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
7676 if (outchain == NULL) {
7678 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
7683 shdr = mtod(outchain, struct sctphdr *);
7684 shdr->src_port = inp->sctp_lport;
7685 shdr->dest_port = stcb->rport;
7686 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
7688 auth_offset += sizeof(struct sctphdr);
7689 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7690 (struct sockaddr *)&net->ro._l_addr,
7691 outchain, auth_offset, auth,
7692 no_fragmentflg, 0, NULL, asconf, net->port, so_locked, NULL))) {
7693 if (error == ENOBUFS) {
7694 asoc->ifp_had_enobuf = 1;
7695 SCTP_STAT_INCR(sctps_lowlevelerr);
7697 if (from_where == 0) {
7698 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7700 if (*now_filled == 0) {
7701 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7703 *now = net->last_sent_time;
7705 net->last_sent_time = *now;
7708 /* error, could not output */
7709 if (error == EHOSTUNREACH) {
7715 sctp_move_to_an_alt(stcb, asoc, net);
7720 asoc->ifp_had_enobuf = 0;
7721 if (*now_filled == 0) {
7722 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7724 *now = net->last_sent_time;
7726 net->last_sent_time = *now;
7730 * increase the number we sent, if a
7731 * cookie is sent we don't tell them
7734 outchain = endoutchain = NULL;
7738 *num_out += ctl_cnt;
7739 /* recalc a clean slate and setup */
7740 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7741 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7743 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
7750 /************************/
7751 /* Control transmission */
7752 /************************/
7753 /* Now first lets go through the control queue */
7754 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
7756 nchk = TAILQ_NEXT(chk, sctp_next);
7757 if (chk->whoTo != net) {
7759 * No, not sent to the network we are
7764 if (chk->data == NULL) {
7767 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
7769 * It must be unsent. Cookies and ASCONF's
7770 * hang around but there timers will force
7771 * when marked for resend.
7776 * if no AUTH is yet included and this chunk
7777 * requires it, make sure to account for it. We
7778 * don't apply the size until the AUTH chunk is
7779 * actually added below in case there is no room for
7780 * this chunk. NOTE: we overload the use of "omtu"
7783 if ((auth == NULL) &&
7784 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7785 stcb->asoc.peer_auth_chunks)) {
7786 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7789 /* Here we do NOT factor the r_mtu */
7790 if ((chk->send_size < (int)(mtu - omtu)) ||
7791 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7793 * We probably should glom the mbuf chain
7794 * from the chk->data for control but the
7795 * problem is it becomes yet one more level
7796 * of tracking to do if for some reason
7797 * output fails. Then I have got to
7798 * reconstruct the merged control chain.. el
7799 * yucko.. for now we take the easy way and
7803 * Add an AUTH chunk, if chunk requires it
7804 * save the offset into the chain for AUTH
7806 if ((auth == NULL) &&
7807 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7808 stcb->asoc.peer_auth_chunks))) {
7809 outchain = sctp_add_auth_chunk(outchain,
7814 chk->rec.chunk_id.id);
7815 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7817 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7818 (int)chk->rec.chunk_id.can_take_data,
7819 chk->send_size, chk->copy_by_ref);
7820 if (outchain == NULL) {
7822 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7825 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7826 /* update our MTU size */
7827 if (mtu > (chk->send_size + omtu))
7828 mtu -= (chk->send_size + omtu);
7831 to_out += (chk->send_size + omtu);
7832 /* Do clear IP_DF ? */
7833 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7836 if (chk->rec.chunk_id.can_take_data)
7838 /* Mark things to be removed, if needed */
7839 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7840 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7841 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7842 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7843 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7844 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7845 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7846 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7847 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7848 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7850 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
7853 * JRS 5/14/07 - Set the
7854 * flag to say a heartbeat
7859 /* remove these chunks at the end */
7860 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
7861 /* turn off the timer */
7862 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
7863 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7864 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
7870 * Other chunks, since they have
7871 * timers running (i.e. COOKIE) we
7872 * just "trust" that it gets sent or
7876 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7880 chk->sent = SCTP_DATAGRAM_SENT;
7885 * Ok we are out of room but we can
7886 * output without effecting the
7887 * flight size since this little guy
7888 * is a control only packet.
7891 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7893 * do NOT clear the asconf
7894 * flag as it is used to do
7895 * appropriate source
7896 * address selection.
7900 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
7903 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
7904 if (outchain == NULL) {
7906 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
7908 goto error_out_again;
7910 shdr = mtod(outchain, struct sctphdr *);
7911 shdr->src_port = inp->sctp_lport;
7912 shdr->dest_port = stcb->rport;
7913 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
7915 auth_offset += sizeof(struct sctphdr);
7916 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7917 (struct sockaddr *)&net->ro._l_addr,
7918 outchain, auth_offset, auth,
7919 no_fragmentflg, 0, NULL, asconf, net->port, so_locked, NULL))) {
7920 if (error == ENOBUFS) {
7921 asoc->ifp_had_enobuf = 1;
7922 SCTP_STAT_INCR(sctps_lowlevelerr);
7924 if (from_where == 0) {
7925 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7928 /* error, could not output */
7930 if (*now_filled == 0) {
7931 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7933 *now = net->last_sent_time;
7935 net->last_sent_time = *now;
7939 if (error == EHOSTUNREACH) {
7945 sctp_move_to_an_alt(stcb, asoc, net);
7950 asoc->ifp_had_enobuf = 0;
7951 /* Only HB or ASCONF advances time */
7953 if (*now_filled == 0) {
7954 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7956 *now = net->last_sent_time;
7958 net->last_sent_time = *now;
7963 * increase the number we sent, if a
7964 * cookie is sent we don't tell them
7967 outchain = endoutchain = NULL;
7971 *num_out += ctl_cnt;
7972 /* recalc a clean slate and setup */
7973 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7974 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7976 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
7983 /*********************/
7984 /* Data transmission */
7985 /*********************/
7987 * if AUTH for DATA is required and no AUTH has been added
7988 * yet, account for this in the mtu now... if no data can be
7989 * bundled, this adjustment won't matter anyways since the
7990 * packet will be going out...
7992 if ((auth == NULL) &&
7993 sctp_auth_is_required_chunk(SCTP_DATA,
7994 stcb->asoc.peer_auth_chunks)) {
7995 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7997 /* now lets add any data within the MTU constraints */
7998 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8000 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8001 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8007 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8008 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8018 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && (skip_data_for_this_net == 0)) ||
8020 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
8021 if (no_data_chunks) {
8022 /* let only control go out */
8026 if (net->flight_size >= net->cwnd) {
8027 /* skip this net, no room for data */
8031 nchk = TAILQ_NEXT(chk, sctp_next);
8032 if (chk->whoTo != net) {
8033 /* No, not sent to this net */
8036 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8038 * strange, we have a chunk that is
8039 * to big for its destination and
8040 * yet no fragment ok flag.
8041 * Something went wrong when the
8042 * PMTU changed...we did not mark
8043 * this chunk for some reason?? I
8044 * will fix it here by letting IP
8045 * fragment it for now and printing
8046 * a warning. This really should not
8049 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8050 chk->send_size, mtu);
8051 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8053 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8054 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8055 struct sctp_data_chunk *dchkh;
8057 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8058 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8060 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8061 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8062 /* ok we will add this one */
8065 * Add an AUTH chunk, if chunk
8066 * requires it, save the offset into
8067 * the chain for AUTH
8069 if ((auth == NULL) &&
8070 (sctp_auth_is_required_chunk(SCTP_DATA,
8071 stcb->asoc.peer_auth_chunks))) {
8073 outchain = sctp_add_auth_chunk(outchain,
8079 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8081 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8082 chk->send_size, chk->copy_by_ref);
8083 if (outchain == NULL) {
8084 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8085 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8086 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8089 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8092 /* upate our MTU size */
8093 /* Do clear IP_DF ? */
8094 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8097 /* unsigned subtraction of mtu */
8098 if (mtu > chk->send_size)
8099 mtu -= chk->send_size;
8102 /* unsigned subtraction of r_mtu */
8103 if (r_mtu > chk->send_size)
8104 r_mtu -= chk->send_size;
8108 to_out += chk->send_size;
8109 if ((to_out > mx_mtu) && no_fragmentflg) {
8111 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8113 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8117 chk->window_probe = 0;
8118 data_list[bundle_at++] = chk;
8119 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8123 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8124 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8125 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8127 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8129 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8130 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8140 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8142 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8144 data_list[0]->window_probe = 1;
8145 net->window_probe = 1;
8151 * Must be sent in order of the
8152 * TSN's (on a network)
8156 } /* for (chunk gather loop for this net) */
8157 } /* if asoc.state OPEN */
8158 /* Is there something to send for this destination? */
8160 /* We may need to start a control timer or two */
8162 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8165 * do NOT clear the asconf flag as it is
8166 * used to do appropriate source address
8171 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8174 /* must start a send timer if data is being sent */
8175 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8177 * no timer running on this destination
8180 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8181 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
8182 SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
8184 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
8185 (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8187 * JRS 5/14/07 - If a HB has been sent to a
8188 * PF destination and no T3 timer is
8189 * currently running, start the T3 timer to
8190 * track the HBs that were sent.
8192 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8194 /* Now send it, if there is anything to send :> */
8195 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
8196 if (outchain == NULL) {
8198 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
8202 shdr = mtod(outchain, struct sctphdr *);
8203 shdr->src_port = inp->sctp_lport;
8204 shdr->dest_port = stcb->rport;
8205 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
8207 auth_offset += sizeof(struct sctphdr);
8208 if ((error = sctp_lowlevel_chunk_output(inp,
8211 (struct sockaddr *)&net->ro._l_addr,
8218 asconf, net->port, so_locked, NULL))) {
8219 /* error, we could not output */
8220 if (error == ENOBUFS) {
8221 SCTP_STAT_INCR(sctps_lowlevelerr);
8222 asoc->ifp_had_enobuf = 1;
8224 if (from_where == 0) {
8225 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8228 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8230 if (*now_filled == 0) {
8231 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8233 *now = net->last_sent_time;
8235 net->last_sent_time = *now;
8239 if (error == EHOSTUNREACH) {
8241 * Destination went unreachable
8244 sctp_move_to_an_alt(stcb, asoc, net);
8248 * I add this line to be paranoid. As far as
8249 * I can tell the continue, takes us back to
8250 * the top of the for, but just to make sure
8251 * I will reset these again here.
8253 ctl_cnt = bundle_at = 0;
8254 continue; /* This takes us back to the
8255 * for() for the nets. */
8257 asoc->ifp_had_enobuf = 0;
8259 outchain = endoutchain = NULL;
8262 if (bundle_at || hbflag) {
8263 /* For data/asconf and hb set time */
8264 if (*now_filled == 0) {
8265 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8267 *now = net->last_sent_time;
8269 net->last_sent_time = *now;
8273 *num_out += (ctl_cnt + bundle_at);
8276 /* setup for a RTO measurement */
8277 tsns_sent = data_list[0]->rec.data.TSN_seq;
8278 /* fill time if not already filled */
8279 if (*now_filled == 0) {
8280 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8282 *now = asoc->time_last_sent;
8284 asoc->time_last_sent = *now;
8286 data_list[0]->do_rtt = 1;
8287 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8288 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8289 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8290 if (net->flight_size < net->cwnd) {
8291 /* start or restart it */
8292 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8293 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8294 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
8296 SCTP_STAT_INCR(sctps_earlyfrstrout);
8297 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
8299 /* stop it if its running */
8300 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8301 SCTP_STAT_INCR(sctps_earlyfrstpout);
8302 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8303 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
8312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8313 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8316 if (old_startat == NULL) {
8317 old_startat = send_start_at;
8318 send_start_at = TAILQ_FIRST(&asoc->nets);
8320 goto again_one_more_time;
8323 * At the end there should be no NON timed chunks hanging on this
8326 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8327 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8329 if ((*num_out == 0) && (*reason_code == 0)) {
8334 sctp_clean_up_ctl(stcb, asoc);
8339 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8342 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8343 * the control chunk queue.
8345 struct sctp_chunkhdr *hdr;
8346 struct sctp_tmit_chunk *chk;
8349 SCTP_TCB_LOCK_ASSERT(stcb);
8350 sctp_alloc_a_chunk(stcb, chk);
8353 sctp_m_freem(op_err);
8356 chk->copy_by_ref = 0;
8357 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8358 if (op_err == NULL) {
8359 sctp_free_a_chunk(stcb, chk);
8364 while (mat != NULL) {
8365 chk->send_size += SCTP_BUF_LEN(mat);
8366 mat = SCTP_BUF_NEXT(mat);
8368 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8369 chk->rec.chunk_id.can_take_data = 1;
8370 chk->sent = SCTP_DATAGRAM_UNSENT;
8373 chk->asoc = &stcb->asoc;
8375 chk->whoTo = chk->asoc->primary_destination;
8376 atomic_add_int(&chk->whoTo->ref_count, 1);
8377 hdr = mtod(op_err, struct sctp_chunkhdr *);
8378 hdr->chunk_type = SCTP_OPERATION_ERROR;
8379 hdr->chunk_flags = 0;
8380 hdr->chunk_length = htons(chk->send_size);
8381 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8384 chk->asoc->ctrl_queue_cnt++;
8388 sctp_send_cookie_echo(struct mbuf *m,
8390 struct sctp_tcb *stcb,
8391 struct sctp_nets *net)
8394 * pull out the cookie and put it at the front of the control chunk
8398 struct mbuf *cookie;
8399 struct sctp_paramhdr parm, *phdr;
8400 struct sctp_chunkhdr *hdr;
8401 struct sctp_tmit_chunk *chk;
8402 uint16_t ptype, plen;
8404 /* First find the cookie in the param area */
8406 at = offset + sizeof(struct sctp_init_chunk);
8408 SCTP_TCB_LOCK_ASSERT(stcb);
8410 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8414 ptype = ntohs(phdr->param_type);
8415 plen = ntohs(phdr->param_length);
8416 if (ptype == SCTP_STATE_COOKIE) {
8419 /* found the cookie */
8420 if ((pad = (plen % 4))) {
8423 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8424 if (cookie == NULL) {
8428 #ifdef SCTP_MBUF_LOGGING
8429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8434 if (SCTP_BUF_IS_EXTENDED(mat)) {
8435 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8437 mat = SCTP_BUF_NEXT(mat);
8443 at += SCTP_SIZE32(plen);
8445 if (cookie == NULL) {
8446 /* Did not find the cookie */
8449 /* ok, we got the cookie lets change it into a cookie echo chunk */
8451 /* first the change from param to cookie */
8452 hdr = mtod(cookie, struct sctp_chunkhdr *);
8453 hdr->chunk_type = SCTP_COOKIE_ECHO;
8454 hdr->chunk_flags = 0;
8455 /* get the chunk stuff now and place it in the FRONT of the queue */
8456 sctp_alloc_a_chunk(stcb, chk);
8459 sctp_m_freem(cookie);
8462 chk->copy_by_ref = 0;
8463 chk->send_size = plen;
8464 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8465 chk->rec.chunk_id.can_take_data = 0;
8466 chk->sent = SCTP_DATAGRAM_UNSENT;
8468 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8469 chk->asoc = &stcb->asoc;
8471 chk->whoTo = chk->asoc->primary_destination;
8472 atomic_add_int(&chk->whoTo->ref_count, 1);
8473 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8474 chk->asoc->ctrl_queue_cnt++;
8479 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8483 struct sctp_nets *net)
8486 * take a HB request and make it into a HB ack and send it.
8488 struct mbuf *outchain;
8489 struct sctp_chunkhdr *chdr;
8490 struct sctp_tmit_chunk *chk;
8494 /* must have a net pointer */
8497 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8498 if (outchain == NULL) {
8499 /* gak out of memory */
8502 #ifdef SCTP_MBUF_LOGGING
8503 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8508 if (SCTP_BUF_IS_EXTENDED(mat)) {
8509 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8511 mat = SCTP_BUF_NEXT(mat);
8515 chdr = mtod(outchain, struct sctp_chunkhdr *);
8516 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8517 chdr->chunk_flags = 0;
8518 if (chk_length % 4) {
8520 uint32_t cpthis = 0;
8523 padlen = 4 - (chk_length % 4);
8524 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8526 sctp_alloc_a_chunk(stcb, chk);
8529 sctp_m_freem(outchain);
8532 chk->copy_by_ref = 0;
8533 chk->send_size = chk_length;
8534 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8535 chk->rec.chunk_id.can_take_data = 1;
8536 chk->sent = SCTP_DATAGRAM_UNSENT;
8539 chk->asoc = &stcb->asoc;
8540 chk->data = outchain;
8542 atomic_add_int(&chk->whoTo->ref_count, 1);
8543 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8544 chk->asoc->ctrl_queue_cnt++;
8548 sctp_send_cookie_ack(struct sctp_tcb *stcb)
8550 /* formulate and queue a cookie-ack back to sender */
8551 struct mbuf *cookie_ack;
8552 struct sctp_chunkhdr *hdr;
8553 struct sctp_tmit_chunk *chk;
8556 SCTP_TCB_LOCK_ASSERT(stcb);
8558 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
8559 if (cookie_ack == NULL) {
8563 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
8564 sctp_alloc_a_chunk(stcb, chk);
8567 sctp_m_freem(cookie_ack);
8570 chk->copy_by_ref = 0;
8571 chk->send_size = sizeof(struct sctp_chunkhdr);
8572 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
8573 chk->rec.chunk_id.can_take_data = 1;
8574 chk->sent = SCTP_DATAGRAM_UNSENT;
8577 chk->asoc = &stcb->asoc;
8578 chk->data = cookie_ack;
8579 if (chk->asoc->last_control_chunk_from != NULL) {
8580 chk->whoTo = chk->asoc->last_control_chunk_from;
8582 chk->whoTo = chk->asoc->primary_destination;
8584 atomic_add_int(&chk->whoTo->ref_count, 1);
8585 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
8586 hdr->chunk_type = SCTP_COOKIE_ACK;
8587 hdr->chunk_flags = 0;
8588 hdr->chunk_length = htons(chk->send_size);
8589 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
8590 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8591 chk->asoc->ctrl_queue_cnt++;
8597 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
8599 /* formulate and queue a SHUTDOWN-ACK back to the sender */
8600 struct mbuf *m_shutdown_ack;
8601 struct sctp_shutdown_ack_chunk *ack_cp;
8602 struct sctp_tmit_chunk *chk;
8604 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8605 if (m_shutdown_ack == NULL) {
8609 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
8610 sctp_alloc_a_chunk(stcb, chk);
8613 sctp_m_freem(m_shutdown_ack);
8616 chk->copy_by_ref = 0;
8617 chk->send_size = sizeof(struct sctp_chunkhdr);
8618 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
8619 chk->rec.chunk_id.can_take_data = 1;
8620 chk->sent = SCTP_DATAGRAM_UNSENT;
8623 chk->asoc = &stcb->asoc;
8624 chk->data = m_shutdown_ack;
8626 atomic_add_int(&net->ref_count, 1);
8628 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
8629 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
8630 ack_cp->ch.chunk_flags = 0;
8631 ack_cp->ch.chunk_length = htons(chk->send_size);
8632 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
8633 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8634 chk->asoc->ctrl_queue_cnt++;
8639 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
8641 /* formulate and queue a SHUTDOWN to the sender */
8642 struct mbuf *m_shutdown;
8643 struct sctp_shutdown_chunk *shutdown_cp;
8644 struct sctp_tmit_chunk *chk;
8646 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8647 if (m_shutdown == NULL) {
8651 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
8652 sctp_alloc_a_chunk(stcb, chk);
8655 sctp_m_freem(m_shutdown);
8658 chk->copy_by_ref = 0;
8659 chk->send_size = sizeof(struct sctp_shutdown_chunk);
8660 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
8661 chk->rec.chunk_id.can_take_data = 1;
8662 chk->sent = SCTP_DATAGRAM_UNSENT;
8665 chk->asoc = &stcb->asoc;
8666 chk->data = m_shutdown;
8668 atomic_add_int(&net->ref_count, 1);
8670 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
8671 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
8672 shutdown_cp->ch.chunk_flags = 0;
8673 shutdown_cp->ch.chunk_length = htons(chk->send_size);
8674 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
8675 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
8676 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8677 chk->asoc->ctrl_queue_cnt++;
8682 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
8685 * formulate and queue an ASCONF to the peer. ASCONF parameters
8686 * should be queued on the assoc queue.
8688 struct sctp_tmit_chunk *chk;
8689 struct mbuf *m_asconf;
8692 SCTP_TCB_LOCK_ASSERT(stcb);
8694 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
8695 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
8696 /* can't send a new one if there is one in flight already */
8699 /* compose an ASCONF chunk, maximum length is PMTU */
8700 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
8701 if (m_asconf == NULL) {
8704 sctp_alloc_a_chunk(stcb, chk);
8707 sctp_m_freem(m_asconf);
8710 chk->copy_by_ref = 0;
8711 chk->data = m_asconf;
8712 chk->send_size = len;
8713 chk->rec.chunk_id.id = SCTP_ASCONF;
8714 chk->rec.chunk_id.can_take_data = 0;
8715 chk->sent = SCTP_DATAGRAM_UNSENT;
8717 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8718 chk->asoc = &stcb->asoc;
8720 atomic_add_int(&chk->whoTo->ref_count, 1);
8721 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
8722 chk->asoc->ctrl_queue_cnt++;
8727 sctp_send_asconf_ack(struct sctp_tcb *stcb)
8730 * formulate and queue a asconf-ack back to sender. the asconf-ack
8731 * must be stored in the tcb.
8733 struct sctp_tmit_chunk *chk;
8734 struct sctp_asconf_ack *ack, *latest_ack;
8735 struct mbuf *m_ack, *m;
8736 struct sctp_nets *net = NULL;
8738 SCTP_TCB_LOCK_ASSERT(stcb);
8739 /* Get the latest ASCONF-ACK */
8740 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
8741 if (latest_ack == NULL) {
8744 if (latest_ack->last_sent_to != NULL &&
8745 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
8746 /* we're doing a retransmission */
8747 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
8750 if (stcb->asoc.last_control_chunk_from == NULL)
8751 net = stcb->asoc.primary_destination;
8753 net = stcb->asoc.last_control_chunk_from;
8757 if (stcb->asoc.last_control_chunk_from == NULL)
8758 net = stcb->asoc.primary_destination;
8760 net = stcb->asoc.last_control_chunk_from;
8762 latest_ack->last_sent_to = net;
8764 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
8765 if (ack->data == NULL) {
8768 /* copy the asconf_ack */
8769 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
8770 if (m_ack == NULL) {
8771 /* couldn't copy it */
8774 #ifdef SCTP_MBUF_LOGGING
8775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8780 if (SCTP_BUF_IS_EXTENDED(mat)) {
8781 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8783 mat = SCTP_BUF_NEXT(mat);
8788 sctp_alloc_a_chunk(stcb, chk);
8792 sctp_m_freem(m_ack);
8795 chk->copy_by_ref = 0;
8802 chk->send_size = ack->len;
8803 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
8804 chk->rec.chunk_id.can_take_data = 1;
8805 chk->sent = SCTP_DATAGRAM_UNSENT;
8807 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
8808 chk->asoc = &stcb->asoc;
8809 atomic_add_int(&chk->whoTo->ref_count, 1);
8811 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8812 chk->asoc->ctrl_queue_cnt++;
8819 sctp_chunk_retransmission(struct sctp_inpcb *inp,
8820 struct sctp_tcb *stcb,
8821 struct sctp_association *asoc,
8822 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
8823 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8829 * send out one MTU of retransmission. If fast_retransmit is
8830 * happening we ignore the cwnd. Otherwise we obey the cwnd and
8831 * rwnd. For a Cookie or Asconf in the control chunk queue we
8832 * retransmit them by themselves.
8834 * For data chunks we will pick out the lowest TSN's in the sent_queue
8835 * marked for resend and bundle them all together (up to a MTU of
8836 * destination). The address to send to should have been
8837 * selected/changed where the retransmission was marked (i.e. in FR
8838 * or t3-timeout routines).
8840 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8841 struct sctp_tmit_chunk *chk, *fwd;
8842 struct mbuf *m, *endofchain;
8843 struct sctphdr *shdr;
8844 struct sctp_nets *net = NULL;
8845 uint32_t tsns_sent = 0;
8846 int no_fragmentflg, bundle_at, cnt_thru;
8848 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
8849 struct sctp_auth_chunk *auth = NULL;
8850 uint32_t auth_offset = 0;
8853 SCTP_TCB_LOCK_ASSERT(stcb);
8854 tmr_started = ctl_cnt = bundle_at = error = 0;
8859 endofchain = m = NULL;
8860 #ifdef SCTP_AUDITING_ENABLED
8861 sctp_audit_log(0xC3, 1);
8863 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
8864 (TAILQ_EMPTY(&asoc->control_send_queue))) {
8865 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
8866 asoc->sent_queue_retran_cnt);
8867 asoc->sent_queue_cnt = 0;
8868 asoc->sent_queue_cnt_removeable = 0;
8869 /* send back 0/0 so we enter normal transmission */
8873 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8874 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
8875 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
8876 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
8877 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
8878 if (chk != asoc->str_reset) {
8880 * not eligible for retran if its
8887 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
8892 * Add an AUTH chunk, if chunk requires it save the
8893 * offset into the chain for AUTH
8895 if ((auth == NULL) &&
8896 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8897 stcb->asoc.peer_auth_chunks))) {
8898 m = sctp_add_auth_chunk(m, &endofchain,
8899 &auth, &auth_offset,
8901 chk->rec.chunk_id.id);
8903 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
8909 /* do we have control chunks to retransmit? */
8911 /* Start a timer no matter if we suceed or fail */
8912 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8913 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
8914 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
8915 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
8917 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
8919 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
8922 shdr = mtod(m, struct sctphdr *);
8923 shdr->src_port = inp->sctp_lport;
8924 shdr->dest_port = stcb->rport;
8925 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
8927 auth_offset += sizeof(struct sctphdr);
8928 chk->snd_count++; /* update our count */
8930 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
8931 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset,
8932 auth, no_fragmentflg, 0, NULL, 0, chk->whoTo->port, so_locked, NULL))) {
8933 SCTP_STAT_INCR(sctps_lowlevelerr);
8936 m = endofchain = NULL;
8940 * We don't want to mark the net->sent time here since this
8941 * we use this for HB and retrans cannot measure RTT
8943 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
8945 chk->sent = SCTP_DATAGRAM_SENT;
8946 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
8950 /* Clean up the fwd-tsn list */
8951 sctp_clean_up_ctl(stcb, asoc);
8956 * Ok, it is just data retransmission we need to do or that and a
8957 * fwd-tsn with it all.
8959 if (TAILQ_EMPTY(&asoc->sent_queue)) {
8960 return (SCTP_RETRAN_DONE);
8962 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
8963 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
8964 /* not yet open, resend the cookie and that is it */
8967 #ifdef SCTP_AUDITING_ENABLED
8968 sctp_auditing(20, inp, stcb, NULL);
8970 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
8971 if (chk->sent != SCTP_DATAGRAM_RESEND) {
8972 /* No, not sent to this net or not ready for rtx */
8975 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
8976 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
8977 /* Gak, we have exceeded max unlucky retran, abort! */
8978 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
8980 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
8981 atomic_add_int(&stcb->asoc.refcnt, 1);
8982 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
8983 SCTP_TCB_LOCK(stcb);
8984 atomic_subtract_int(&stcb->asoc.refcnt, 1);
8985 return (SCTP_RETRAN_EXIT);
8987 /* pick up the net */
8989 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8990 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8992 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
8995 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
8996 /* No room in peers rwnd */
8999 tsn = asoc->last_acked_seq + 1;
9000 if (tsn == chk->rec.data.TSN_seq) {
9002 * we make a special exception for this
9003 * case. The peer has no rwnd but is missing
9004 * the lowest chunk.. which is probably what
9005 * is holding up the rwnd.
9007 goto one_chunk_around;
9012 if (asoc->peers_rwnd < mtu) {
9014 if ((asoc->peers_rwnd == 0) &&
9015 (asoc->total_flight == 0)) {
9016 chk->window_probe = 1;
9017 chk->whoTo->window_probe = 1;
9020 #ifdef SCTP_AUDITING_ENABLED
9021 sctp_audit_log(0xC3, 2);
9025 net->fast_retran_ip = 0;
9026 if (chk->rec.data.doing_fast_retransmit == 0) {
9028 * if no FR in progress skip destination that have
9029 * flight_size > cwnd.
9031 if (net->flight_size >= net->cwnd) {
9036 * Mark the destination net to have FR recovery
9040 net->fast_retran_ip = 1;
9044 * if no AUTH is yet included and this chunk requires it,
9045 * make sure to account for it. We don't apply the size
9046 * until the AUTH chunk is actually added below in case
9047 * there is no room for this chunk.
9049 if ((auth == NULL) &&
9050 sctp_auth_is_required_chunk(SCTP_DATA,
9051 stcb->asoc.peer_auth_chunks)) {
9052 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9056 if ((chk->send_size <= (mtu - dmtu)) ||
9057 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9058 /* ok we will add this one */
9059 if ((auth == NULL) &&
9060 (sctp_auth_is_required_chunk(SCTP_DATA,
9061 stcb->asoc.peer_auth_chunks))) {
9062 m = sctp_add_auth_chunk(m, &endofchain,
9063 &auth, &auth_offset,
9066 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9068 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9071 /* Do clear IP_DF ? */
9072 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9075 /* upate our MTU size */
9076 if (mtu > (chk->send_size + dmtu))
9077 mtu -= (chk->send_size + dmtu);
9080 data_list[bundle_at++] = chk;
9081 if (one_chunk && (asoc->total_flight <= 0)) {
9082 SCTP_STAT_INCR(sctps_windowprobed);
9085 if (one_chunk == 0) {
9087 * now are there anymore forward from chk to pick
9090 fwd = TAILQ_NEXT(chk, sctp_next);
9092 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9093 /* Nope, not for retran */
9094 fwd = TAILQ_NEXT(fwd, sctp_next);
9097 if (fwd->whoTo != net) {
9098 /* Nope, not the net in question */
9099 fwd = TAILQ_NEXT(fwd, sctp_next);
9102 if ((auth == NULL) &&
9103 sctp_auth_is_required_chunk(SCTP_DATA,
9104 stcb->asoc.peer_auth_chunks)) {
9105 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9108 if (fwd->send_size <= (mtu - dmtu)) {
9109 if ((auth == NULL) &&
9110 (sctp_auth_is_required_chunk(SCTP_DATA,
9111 stcb->asoc.peer_auth_chunks))) {
9112 m = sctp_add_auth_chunk(m,
9114 &auth, &auth_offset,
9118 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9120 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9123 /* Do clear IP_DF ? */
9124 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9127 /* upate our MTU size */
9128 if (mtu > (fwd->send_size + dmtu))
9129 mtu -= (fwd->send_size + dmtu);
9132 data_list[bundle_at++] = fwd;
9133 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9136 fwd = TAILQ_NEXT(fwd, sctp_next);
9138 /* can't fit so we are done */
9143 /* Is there something to send for this destination? */
9146 * No matter if we fail/or suceed we should start a
9147 * timer. A failure is like a lost IP packet :-)
9149 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9151 * no timer running on this destination
9154 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9157 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
9159 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
9162 shdr = mtod(m, struct sctphdr *);
9163 shdr->src_port = inp->sctp_lport;
9164 shdr->dest_port = stcb->rport;
9165 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
9167 auth_offset += sizeof(struct sctphdr);
9168 /* Now lets send it, if there is anything to send :> */
9169 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9170 (struct sockaddr *)&net->ro._l_addr, m, auth_offset,
9171 auth, no_fragmentflg, 0, NULL, 0, net->port, so_locked, NULL))) {
9172 /* error, we could not output */
9173 SCTP_STAT_INCR(sctps_lowlevelerr);
9176 m = endofchain = NULL;
9181 * We don't want to mark the net->sent time here
9182 * since this we use this for HB and retrans cannot
9185 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9187 /* For auto-close */
9189 if (*now_filled == 0) {
9190 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9191 *now = asoc->time_last_sent;
9194 asoc->time_last_sent = *now;
9196 *cnt_out += bundle_at;
9197 #ifdef SCTP_AUDITING_ENABLED
9198 sctp_audit_log(0xC4, bundle_at);
9201 tsns_sent = data_list[0]->rec.data.TSN_seq;
9203 for (i = 0; i < bundle_at; i++) {
9204 SCTP_STAT_INCR(sctps_sendretransdata);
9205 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9207 * When we have a revoked data, and we
9208 * retransmit it, then we clear the revoked
9209 * flag since this flag dictates if we
9210 * subtracted from the fs
9212 if (data_list[i]->rec.data.chunk_was_revoked) {
9213 /* Deflate the cwnd */
9214 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9215 data_list[i]->rec.data.chunk_was_revoked = 0;
9217 data_list[i]->snd_count++;
9218 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9219 /* record the time */
9220 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9221 if (data_list[i]->book_size_scale) {
9223 * need to double the book size on
9226 data_list[i]->book_size_scale = 0;
9228 * Since we double the booksize, we
9229 * must also double the output queue
9230 * size, since this get shrunk when
9231 * we free by this amount.
9233 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9234 data_list[i]->book_size *= 2;
9238 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9239 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9240 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9242 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9243 (uint32_t) (data_list[i]->send_size +
9244 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9246 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9247 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9248 data_list[i]->whoTo->flight_size,
9249 data_list[i]->book_size,
9250 (uintptr_t) data_list[i]->whoTo,
9251 data_list[i]->rec.data.TSN_seq);
9253 sctp_flight_size_increase(data_list[i]);
9254 sctp_total_flight_increase(stcb, data_list[i]);
9255 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9256 /* SWS sender side engages */
9257 asoc->peers_rwnd = 0;
9260 (data_list[i]->rec.data.doing_fast_retransmit)) {
9261 SCTP_STAT_INCR(sctps_sendfastretrans);
9262 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9263 (tmr_started == 0)) {
9265 * ok we just fast-retrans'd
9266 * the lowest TSN, i.e the
9267 * first on the list. In
9268 * this case we want to give
9269 * some more time to get a
9270 * SACK back without a
9273 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9274 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9275 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9279 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9280 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9282 #ifdef SCTP_AUDITING_ENABLED
9283 sctp_auditing(21, inp, stcb, NULL);
9289 if (asoc->sent_queue_retran_cnt <= 0) {
9290 /* all done we have no more to retran */
9291 asoc->sent_queue_retran_cnt = 0;
9295 /* No more room in rwnd */
9298 /* stop the for loop here. we sent out a packet */
9306 sctp_timer_validation(struct sctp_inpcb *inp,
9307 struct sctp_tcb *stcb,
9308 struct sctp_association *asoc,
9311 struct sctp_nets *net;
9313 /* Validate that a timer is running somewhere */
9314 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9315 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9316 /* Here is a timer */
9320 SCTP_TCB_LOCK_ASSERT(stcb);
9321 /* Gak, we did not have a timer somewhere */
9322 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9323 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9328 sctp_chunk_output(struct sctp_inpcb *inp,
9329 struct sctp_tcb *stcb,
9332 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9338 * Ok this is the generic chunk service queue. we must do the
9340 * - See if there are retransmits pending, if so we must
9342 * - Service the stream queue that is next, moving any
9343 * message (note I must get a complete message i.e.
9344 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9346 * - Check to see if the cwnd/rwnd allows any output, if so we
9347 * go ahead and fomulate and send the low level chunks. Making sure
9348 * to combine any control in the control chunk queue also.
9350 struct sctp_association *asoc;
9351 struct sctp_nets *net;
9352 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
9353 burst_cnt = 0, burst_limit = 0;
9358 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9360 int fr_done, tot_frs = 0;
9363 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9364 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9370 SCTP_TCB_LOCK_ASSERT(stcb);
9372 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9374 if ((un_sent <= 0) &&
9375 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9376 (asoc->sent_queue_retran_cnt == 0)) {
9377 /* Nothing to do unless there is something to be sent left */
9381 * Do we have something to send, data or control AND a sack timer
9382 * running, if so piggy-back the sack.
9384 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9385 sctp_send_sack(stcb);
9386 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9388 while (asoc->sent_queue_retran_cnt) {
9390 * Ok, it is retransmission time only, we send out only ONE
9391 * packet with a single call off to the retran code.
9393 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9395 * Special hook for handling cookiess discarded
9396 * by peer that carried data. Send cookie-ack only
9397 * and then the next call with get the retran's.
9399 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9400 &cwnd_full, from_where,
9401 &now, &now_filled, frag_point, so_locked);
9403 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9404 /* if its not from a HB then do it */
9406 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9412 * its from any other place, we don't allow retran
9413 * output (only control)
9418 /* Can't send anymore */
9420 * now lets push out control by calling med-level
9421 * output once. this assures that we WILL send HB's
9424 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9425 &cwnd_full, from_where,
9426 &now, &now_filled, frag_point, so_locked);
9427 #ifdef SCTP_AUDITING_ENABLED
9428 sctp_auditing(8, inp, stcb, NULL);
9430 (void)sctp_timer_validation(inp, stcb, asoc, ret);
9435 * The count was off.. retran is not happening so do
9436 * the normal retransmission.
9438 #ifdef SCTP_AUDITING_ENABLED
9439 sctp_auditing(9, inp, stcb, NULL);
9441 if (ret == SCTP_RETRAN_EXIT) {
9446 if (from_where == SCTP_OUTPUT_FROM_T3) {
9447 /* Only one transmission allowed out of a timeout */
9448 #ifdef SCTP_AUDITING_ENABLED
9449 sctp_auditing(10, inp, stcb, NULL);
9451 /* Push out any control */
9452 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where,
9453 &now, &now_filled, frag_point, so_locked);
9456 if (tot_frs > asoc->max_burst) {
9457 /* Hit FR burst limit */
9460 if ((num_out == 0) && (ret == 0)) {
9462 /* No more retrans to send */
9466 #ifdef SCTP_AUDITING_ENABLED
9467 sctp_auditing(12, inp, stcb, NULL);
9469 /* Check for bad destinations, if they exist move chunks around. */
9470 burst_limit = asoc->max_burst;
9471 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9472 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
9473 SCTP_ADDR_NOT_REACHABLE) {
9475 * if possible move things off of this address we
9476 * still may send below due to the dormant state but
9477 * we try to find an alternate address to send to
9478 * and if we have one we move all queued data on the
9479 * out wheel to this alternate address.
9481 if (net->ref_count > 1)
9482 sctp_move_to_an_alt(stcb, asoc, net);
9483 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
9484 SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
9485 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
9487 * JRS 5/14/07 - If CMT PF is on and the current
9488 * destination is in PF state, move all queued data
9489 * to an alternate desination.
9491 if (net->ref_count > 1)
9492 sctp_move_to_an_alt(stcb, asoc, net);
9495 * if ((asoc->sat_network) || (net->addr_is_local))
9496 * { burst_limit = asoc->max_burst *
9497 * SCTP_SAT_NETWORK_BURST_INCR; }
9499 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9500 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
9502 * JRS - Use the congestion control
9503 * given in the congestion control
9506 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit);
9507 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9508 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
9510 SCTP_STAT_INCR(sctps_maxburstqueued);
9512 net->fast_retran_ip = 0;
9514 if (net->flight_size == 0) {
9515 /* Should be decaying the cwnd here */
9525 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
9526 &reason_code, 0, &cwnd_full, from_where,
9527 &now, &now_filled, frag_point, so_locked);
9529 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
9530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9531 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
9533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9534 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
9535 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
9539 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
9543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9544 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
9546 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
9551 * When nagle is on, we look at how much is un_sent, then
9552 * if its smaller than an MTU and we have data in
9555 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9556 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
9557 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
9558 (stcb->asoc.total_flight > 0)) {
9562 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
9563 TAILQ_EMPTY(&asoc->send_queue) &&
9564 TAILQ_EMPTY(&asoc->out_wheel)) {
9565 /* Nothing left to send */
9568 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
9569 /* Nothing left to send */
9572 } while (num_out && (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
9573 (burst_cnt < burst_limit)));
9575 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
9576 if (burst_cnt >= burst_limit) {
9577 SCTP_STAT_INCR(sctps_maxburstqueued);
9578 asoc->burst_limit_applied = 1;
9579 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9580 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
9583 asoc->burst_limit_applied = 0;
9586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9587 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
9589 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
9593 * Now we need to clean up the control chunk chain if a ECNE is on
9594 * it. It must be marked as UNSENT again so next call will continue
9595 * to send it until such time that we get a CWR, to remove it.
9597 if (stcb->asoc.ecn_echo_cnt_onq)
9598 sctp_fix_ecn_echo(asoc);
9604 sctp_output(inp, m, addr, control, p, flags)
9605 struct sctp_inpcb *inp;
9607 struct sockaddr *addr;
9608 struct mbuf *control;
9613 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9616 if (inp->sctp_socket == NULL) {
9617 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9620 return (sctp_sosend(inp->sctp_socket,
9630 send_forward_tsn(struct sctp_tcb *stcb,
9631 struct sctp_association *asoc)
9633 struct sctp_tmit_chunk *chk;
9634 struct sctp_forward_tsn_chunk *fwdtsn;
9636 SCTP_TCB_LOCK_ASSERT(stcb);
9637 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9638 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9639 /* mark it to unsent */
9640 chk->sent = SCTP_DATAGRAM_UNSENT;
9642 /* Do we correct its output location? */
9643 if (chk->whoTo != asoc->primary_destination) {
9644 sctp_free_remote_addr(chk->whoTo);
9645 chk->whoTo = asoc->primary_destination;
9646 atomic_add_int(&chk->whoTo->ref_count, 1);
9648 goto sctp_fill_in_rest;
9651 /* Ok if we reach here we must build one */
9652 sctp_alloc_a_chunk(stcb, chk);
9656 chk->copy_by_ref = 0;
9657 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
9658 chk->rec.chunk_id.can_take_data = 0;
9662 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
9663 if (chk->data == NULL) {
9664 sctp_free_a_chunk(stcb, chk);
9667 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9668 chk->sent = SCTP_DATAGRAM_UNSENT;
9670 chk->whoTo = asoc->primary_destination;
9671 atomic_add_int(&chk->whoTo->ref_count, 1);
9672 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
9673 asoc->ctrl_queue_cnt++;
9676 * Here we go through and fill out the part that deals with
9677 * stream/seq of the ones we skip.
9679 SCTP_BUF_LEN(chk->data) = 0;
9681 struct sctp_tmit_chunk *at, *tp1, *last;
9682 struct sctp_strseq *strseq;
9683 unsigned int cnt_of_space, i, ovh;
9684 unsigned int space_needed;
9685 unsigned int cnt_of_skipped = 0;
9687 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
9688 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
9689 /* no more to look at */
9692 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9693 /* We don't report these */
9698 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
9699 (cnt_of_skipped * sizeof(struct sctp_strseq)));
9701 cnt_of_space = M_TRAILINGSPACE(chk->data);
9703 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9704 ovh = SCTP_MIN_OVERHEAD;
9706 ovh = SCTP_MIN_V4_OVERHEAD;
9708 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
9709 /* trim to a mtu size */
9710 cnt_of_space = asoc->smallest_mtu - ovh;
9712 if (cnt_of_space < space_needed) {
9714 * ok we must trim down the chunk by lowering the
9715 * advance peer ack point.
9717 cnt_of_skipped = (cnt_of_space -
9718 ((sizeof(struct sctp_forward_tsn_chunk)) /
9719 sizeof(struct sctp_strseq)));
9721 * Go through and find the TSN that will be the one
9724 at = TAILQ_FIRST(&asoc->sent_queue);
9725 for (i = 0; i < cnt_of_skipped; i++) {
9726 tp1 = TAILQ_NEXT(at, sctp_next);
9731 * last now points to last one I can report, update
9734 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq;
9735 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
9737 chk->send_size = space_needed;
9738 /* Setup the chunk */
9739 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
9740 fwdtsn->ch.chunk_length = htons(chk->send_size);
9741 fwdtsn->ch.chunk_flags = 0;
9742 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
9743 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point);
9744 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
9745 (cnt_of_skipped * sizeof(struct sctp_strseq)));
9746 SCTP_BUF_LEN(chk->data) = chk->send_size;
9749 * Move pointer to after the fwdtsn and transfer to the
9752 strseq = (struct sctp_strseq *)fwdtsn;
9754 * Now populate the strseq list. This is done blindly
9755 * without pulling out duplicate stream info. This is
9756 * inefficent but won't harm the process since the peer will
9757 * look at these in sequence and will thus release anything.
9758 * It could mean we exceed the PMTU and chop off some that
9759 * we could have included.. but this is unlikely (aka 1432/4
9760 * would mean 300+ stream seq's would have to be reported in
9761 * one FWD-TSN. With a bit of work we can later FIX this to
9762 * optimize and pull out duplcates.. but it does add more
9763 * overhead. So for now... not!
9765 at = TAILQ_FIRST(&asoc->sent_queue);
9766 for (i = 0; i < cnt_of_skipped; i++) {
9767 tp1 = TAILQ_NEXT(at, sctp_next);
9768 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9769 /* We don't report these */
9774 strseq->stream = ntohs(at->rec.data.stream_number);
9775 strseq->sequence = ntohs(at->rec.data.stream_seq);
9785 sctp_send_sack(struct sctp_tcb *stcb)
9788 * Queue up a SACK in the control queue. We must first check to see
9789 * if a SACK is somehow on the control queue. If so, we will take
9790 * and and remove the old one.
9792 struct sctp_association *asoc;
9793 struct sctp_tmit_chunk *chk, *a_chk;
9794 struct sctp_sack_chunk *sack;
9795 struct sctp_gap_ack_block *gap_descriptor;
9796 struct sack_track *selector;
9801 int limit_reached = 0;
9802 unsigned int i, jstart, siz, j;
9803 unsigned int num_gap_blocks = 0, space;
9809 SCTP_TCB_LOCK_ASSERT(stcb);
9810 if (asoc->last_data_chunk_from == NULL) {
9811 /* Hmm we never received anything */
9814 sctp_set_rwnd(stcb, asoc);
9815 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9816 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
9817 /* Hmm, found a sack already on queue, remove it */
9818 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
9819 asoc->ctrl_queue_cnt++;
9822 sctp_m_freem(a_chk->data);
9825 sctp_free_remote_addr(a_chk->whoTo);
9826 a_chk->whoTo = NULL;
9830 if (a_chk == NULL) {
9831 sctp_alloc_a_chunk(stcb, a_chk);
9832 if (a_chk == NULL) {
9833 /* No memory so we drop the idea, and set a timer */
9834 if (stcb->asoc.delayed_ack) {
9835 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9836 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
9837 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
9838 stcb->sctp_ep, stcb, NULL);
9840 stcb->asoc.send_sack = 1;
9844 a_chk->copy_by_ref = 0;
9845 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
9846 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK;
9847 a_chk->rec.chunk_id.can_take_data = 1;
9849 /* Clear our pkt counts */
9850 asoc->data_pkts_seen = 0;
9853 a_chk->snd_count = 0;
9854 a_chk->send_size = 0; /* fill in later */
9855 a_chk->sent = SCTP_DATAGRAM_UNSENT;
9856 a_chk->whoTo = NULL;
9858 if ((asoc->numduptsns) ||
9859 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
9862 * Ok, we have some duplicates or the destination for the
9863 * sack is unreachable, lets see if we can select an
9864 * alternate than asoc->last_data_chunk_from
9866 if ((!(asoc->last_data_chunk_from->dest_state &
9867 SCTP_ADDR_NOT_REACHABLE)) &&
9868 (asoc->used_alt_onsack > asoc->numnets)) {
9869 /* We used an alt last time, don't this time */
9870 a_chk->whoTo = NULL;
9872 asoc->used_alt_onsack++;
9873 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
9875 if (a_chk->whoTo == NULL) {
9876 /* Nope, no alternate */
9877 a_chk->whoTo = asoc->last_data_chunk_from;
9878 asoc->used_alt_onsack = 0;
9882 * No duplicates so we use the last place we received data
9885 asoc->used_alt_onsack = 0;
9886 a_chk->whoTo = asoc->last_data_chunk_from;
9889 atomic_add_int(&a_chk->whoTo->ref_count, 1);
9891 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
9893 space_req = sizeof(struct sctp_sack_chunk);
9895 /* gaps get a cluster */
9896 space_req = MCLBYTES;
9898 /* Ok now lets formulate a MBUF with our sack */
9899 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
9900 if ((a_chk->data == NULL) ||
9901 (a_chk->whoTo == NULL)) {
9902 /* rats, no mbuf memory */
9904 /* was a problem with the destination */
9905 sctp_m_freem(a_chk->data);
9908 sctp_free_a_chunk(stcb, a_chk);
9909 /* sa_ignore NO_NULL_CHK */
9910 if (stcb->asoc.delayed_ack) {
9911 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9912 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
9913 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
9914 stcb->sctp_ep, stcb, NULL);
9916 stcb->asoc.send_sack = 1;
9920 /* ok, lets go through and fill it in */
9921 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
9922 space = M_TRAILINGSPACE(a_chk->data);
9923 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
9924 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
9926 limit = mtod(a_chk->data, caddr_t);
9929 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
9930 sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
9931 /* 0x01 is used by nonce for ecn */
9932 if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
9933 (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
9934 (asoc->peer_supports_ecn_nonce))
9935 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
9937 sack->ch.chunk_flags = 0;
9939 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
9941 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
9942 * received, then set high bit to 1, else 0. Reset
9945 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
9946 asoc->cmt_dac_pkts_rcvd = 0;
9948 #ifdef SCTP_ASOCLOG_OF_TSNS
9949 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
9950 stcb->asoc.cumack_log_atsnt++;
9951 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
9952 stcb->asoc.cumack_log_atsnt = 0;
9955 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
9956 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
9957 asoc->my_last_reported_rwnd = asoc->my_rwnd;
9959 /* reset the readers interpretation */
9960 stcb->freed_by_sorcv_sincelast = 0;
9962 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
9964 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
9965 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
9968 * cum-ack behind the mapping array, so we start and use all
9973 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
9975 * we skip the first one when the cum-ack is at or above the
9976 * mapping array base. Note this only works if
9980 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
9981 /* we have a gap .. maybe */
9982 for (i = 0; i < siz; i++) {
9983 selector = &sack_array[asoc->mapping_array[i]];
9984 if (mergeable && selector->right_edge) {
9986 * Backup, left and right edges were ok to
9992 if (selector->num_entries == 0)
9995 for (j = jstart; j < selector->num_entries; j++) {
9996 if (mergeable && selector->right_edge) {
9998 * do a merge by NOT setting
10004 * no merge, set the left
10008 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10010 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10013 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10019 if (selector->left_edge) {
10023 if (limit_reached) {
10024 /* Reached the limit stop */
10030 if (num_gap_blocks == 0) {
10032 * slide not yet happened, and somehow we got called
10033 * to send a sack. Cumack needs to move up.
10035 int abort_flag = 0;
10037 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
10038 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10039 sctp_sack_check(stcb, 0, 0, &abort_flag);
10042 /* now we must add any dups we are going to report. */
10043 if ((limit_reached == 0) && (asoc->numduptsns)) {
10044 dup = (uint32_t *) gap_descriptor;
10045 for (i = 0; i < asoc->numduptsns; i++) {
10046 *dup = htonl(asoc->dup_tsns[i]);
10049 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10054 asoc->numduptsns = 0;
10057 * now that the chunk is prepared queue it to the control chunk
10060 a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
10061 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
10062 (num_dups * sizeof(int32_t)));
10063 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10064 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10065 sack->sack.num_dup_tsns = htons(num_dups);
10066 sack->ch.chunk_length = htons(a_chk->send_size);
10067 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10068 asoc->ctrl_queue_cnt++;
10069 asoc->send_sack = 0;
10070 SCTP_STAT_INCR(sctps_sendsacks);
10076 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10077 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10082 struct mbuf *m_abort;
10083 struct mbuf *m_out = NULL, *m_end = NULL;
10084 struct sctp_abort_chunk *abort = NULL;
10086 uint32_t auth_offset = 0;
10087 struct sctp_auth_chunk *auth = NULL;
10088 struct sctphdr *shdr;
10091 * Add an AUTH chunk, if chunk requires it and save the offset into
10092 * the chain for AUTH
10094 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10095 stcb->asoc.peer_auth_chunks)) {
10096 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10097 stcb, SCTP_ABORT_ASSOCIATION);
10099 SCTP_TCB_LOCK_ASSERT(stcb);
10100 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10101 if (m_abort == NULL) {
10104 sctp_m_freem(m_out);
10107 /* link in any error */
10108 SCTP_BUF_NEXT(m_abort) = operr;
10115 sz += SCTP_BUF_LEN(n);
10116 n = SCTP_BUF_NEXT(n);
10119 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10120 if (m_out == NULL) {
10121 /* NO Auth chunk prepended, so reserve space in front */
10122 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10125 /* Put AUTH chunk at the front of the chain */
10126 SCTP_BUF_NEXT(m_end) = m_abort;
10129 /* fill in the ABORT chunk */
10130 abort = mtod(m_abort, struct sctp_abort_chunk *);
10131 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10132 abort->ch.chunk_flags = 0;
10133 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10135 /* prepend and fill in the SCTP header */
10136 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT);
10137 if (m_out == NULL) {
10138 /* TSNH: no memory */
10141 shdr = mtod(m_out, struct sctphdr *);
10142 shdr->src_port = stcb->sctp_ep->sctp_lport;
10143 shdr->dest_port = stcb->rport;
10144 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
10145 shdr->checksum = 0;
10146 auth_offset += sizeof(struct sctphdr);
10148 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
10149 stcb->asoc.primary_destination,
10150 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
10151 m_out, auth_offset, auth, 1, 0, NULL, 0, stcb->asoc.primary_destination->port, so_locked, NULL);
10152 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10156 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10157 struct sctp_nets *net)
10159 /* formulate and SEND a SHUTDOWN-COMPLETE */
10160 struct mbuf *m_shutdown_comp;
10161 struct sctp_shutdown_complete_msg *comp_cp;
10163 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER);
10164 if (m_shutdown_comp == NULL) {
10168 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *);
10169 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10170 comp_cp->shut_cmp.ch.chunk_flags = 0;
10171 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10172 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport;
10173 comp_cp->sh.dest_port = stcb->rport;
10174 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag);
10175 comp_cp->sh.checksum = 0;
10177 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg);
10178 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10179 (struct sockaddr *)&net->ro._l_addr,
10180 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0, net->port, SCTP_SO_NOT_LOCKED, NULL);
10181 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10186 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
10187 uint32_t vrf_id, uint16_t port)
10189 /* formulate and SEND a SHUTDOWN-COMPLETE */
10190 struct mbuf *o_pak;
10192 struct ip *iph, *iph_out;
10193 struct udphdr *udp = NULL;
10196 struct ip6_hdr *ip6, *ip6_out;
10199 int offset_out, len, mlen;
10200 struct sctp_shutdown_complete_msg *comp_cp;
10202 iph = mtod(m, struct ip *);
10203 switch (iph->ip_v) {
10205 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10208 case IPV6_VERSION >> 4:
10209 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10216 len += sizeof(struct udphdr);
10218 mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
10219 if (mout == NULL) {
10222 SCTP_BUF_LEN(mout) = len;
10223 SCTP_BUF_NEXT(mout) = NULL;
10230 switch (iph->ip_v) {
10232 iph_out = mtod(mout, struct ip *);
10234 /* Fill in the IP header for the ABORT */
10235 iph_out->ip_v = IPVERSION;
10236 iph_out->ip_hl = (sizeof(struct ip) / 4);
10237 iph_out->ip_tos = (u_char)0;
10238 iph_out->ip_id = 0;
10239 iph_out->ip_off = 0;
10240 iph_out->ip_ttl = MAXTTL;
10242 iph_out->ip_p = IPPROTO_UDP;
10244 iph_out->ip_p = IPPROTO_SCTP;
10246 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10247 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10249 /* let IP layer calculate this */
10250 iph_out->ip_sum = 0;
10251 offset_out += sizeof(*iph_out);
10252 comp_cp = (struct sctp_shutdown_complete_msg *)(
10253 (caddr_t)iph_out + offset_out);
10256 case IPV6_VERSION >> 4:
10257 ip6 = (struct ip6_hdr *)iph;
10258 ip6_out = mtod(mout, struct ip6_hdr *);
10260 /* Fill in the IPv6 header for the ABORT */
10261 ip6_out->ip6_flow = ip6->ip6_flow;
10262 ip6_out->ip6_hlim = MODULE_GLOBAL(MOD_INET6, ip6_defhlim);
10264 ip6_out->ip6_nxt = IPPROTO_UDP;
10266 ip6_out->ip6_nxt = IPPROTO_SCTP;
10268 ip6_out->ip6_src = ip6->ip6_dst;
10269 ip6_out->ip6_dst = ip6->ip6_src;
10271 * ?? The old code had both the iph len + payload, I think
10272 * this is wrong and would never have worked
10274 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10275 offset_out += sizeof(*ip6_out);
10276 comp_cp = (struct sctp_shutdown_complete_msg *)(
10277 (caddr_t)ip6_out + offset_out);
10281 /* Currently not supported. */
10285 udp = (struct udphdr *)comp_cp;
10286 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10287 udp->uh_dport = port;
10288 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
10289 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10290 offset_out += sizeof(struct udphdr);
10291 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
10293 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10295 sctp_m_freem(mout);
10298 /* Now copy in and fill in the ABORT tags etc. */
10299 comp_cp->sh.src_port = sh->dest_port;
10300 comp_cp->sh.dest_port = sh->src_port;
10301 comp_cp->sh.checksum = 0;
10302 comp_cp->sh.v_tag = sh->v_tag;
10303 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
10304 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10305 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10308 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
10309 if (iph_out != NULL) {
10312 struct sctp_tcb *stcb = NULL;
10314 mlen = SCTP_BUF_LEN(mout);
10315 bzero(&ro, sizeof ro);
10316 /* set IPv4 length */
10317 iph_out->ip_len = mlen;
10318 #ifdef SCTP_PACKET_LOGGING
10319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10320 sctp_packet_log(mout, mlen);
10322 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10324 SCTP_ENABLE_UDP_CSUM(o_pak);
10327 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
10329 /* Free the route if we got one back */
10334 if (ip6_out != NULL) {
10335 struct route_in6 ro;
10337 struct sctp_tcb *stcb = NULL;
10338 struct ifnet *ifp = NULL;
10340 bzero(&ro, sizeof(ro));
10341 mlen = SCTP_BUF_LEN(mout);
10342 #ifdef SCTP_PACKET_LOGGING
10343 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10344 sctp_packet_log(mout, mlen);
10346 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10348 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr),
10349 sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr))) == 0) {
10350 udp->uh_sum = 0xffff;
10353 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
10355 /* Free the route if we got one back */
10360 SCTP_STAT_INCR(sctps_sendpackets);
10361 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10362 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10367 static struct sctp_nets *
10368 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
10370 struct sctp_nets *net, *hnet;
10371 int ms_goneby, highest_ms, state_overide = 0;
10373 (void)SCTP_GETTIME_TIMEVAL(now);
10376 SCTP_TCB_LOCK_ASSERT(stcb);
10377 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
10379 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
10380 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
10383 * Skip this guy from consideration if HB is off AND
10388 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
10389 /* skip this dest net from consideration */
10392 if (net->last_sent_time.tv_sec) {
10393 /* Sent to so we subtract */
10394 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
10396 /* Never been sent to */
10397 ms_goneby = 0x7fffffff;
10399 * When the address state is unconfirmed but still
10400 * considered reachable, we HB at a higher rate. Once it
10401 * goes confirmed OR reaches the "unreachable" state, thenw
10402 * we cut it back to HB at a more normal pace.
10404 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
10410 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
10411 (ms_goneby > highest_ms)) {
10412 highest_ms = ms_goneby;
10417 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
10423 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
10425 * Found the one with longest delay bounds OR it is
10426 * unconfirmed and still not marked unreachable.
10428 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
10431 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
10432 (struct sockaddr *)&hnet->ro._l_addr);
10434 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
10437 /* update the timer now */
10438 hnet->last_sent_time = *now;
10441 /* Nothing to HB */
10446 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
10448 struct sctp_tmit_chunk *chk;
10449 struct sctp_nets *net;
10450 struct sctp_heartbeat_chunk *hb;
10451 struct timeval now;
10452 struct sockaddr_in *sin;
10453 struct sockaddr_in6 *sin6;
10455 SCTP_TCB_LOCK_ASSERT(stcb);
10456 if (user_req == 0) {
10457 net = sctp_select_hb_destination(stcb, &now);
10460 * All our busy none to send to, just start the
10463 if (stcb->asoc.state == 0) {
10466 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
10477 (void)SCTP_GETTIME_TIMEVAL(&now);
10479 sin = (struct sockaddr_in *)&net->ro._l_addr;
10480 if (sin->sin_family != AF_INET) {
10481 if (sin->sin_family != AF_INET6) {
10486 sctp_alloc_a_chunk(stcb, chk);
10488 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
10491 chk->copy_by_ref = 0;
10492 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
10493 chk->rec.chunk_id.can_take_data = 1;
10494 chk->asoc = &stcb->asoc;
10495 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
10497 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10498 if (chk->data == NULL) {
10499 sctp_free_a_chunk(stcb, chk);
10502 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10503 SCTP_BUF_LEN(chk->data) = chk->send_size;
10504 chk->sent = SCTP_DATAGRAM_UNSENT;
10505 chk->snd_count = 0;
10507 atomic_add_int(&chk->whoTo->ref_count, 1);
10508 /* Now we have a mbuf that we can fill in with the details */
10509 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
10510 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
10511 /* fill out chunk header */
10512 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
10513 hb->ch.chunk_flags = 0;
10514 hb->ch.chunk_length = htons(chk->send_size);
10515 /* Fill out hb parameter */
10516 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
10517 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
10518 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
10519 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
10520 /* Did our user request this one, put it in */
10521 hb->heartbeat.hb_info.user_req = user_req;
10522 hb->heartbeat.hb_info.addr_family = sin->sin_family;
10523 hb->heartbeat.hb_info.addr_len = sin->sin_len;
10524 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
10526 * we only take from the entropy pool if the address is not
10529 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
10530 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
10532 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
10533 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
10535 if (sin->sin_family == AF_INET) {
10536 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
10537 } else if (sin->sin_family == AF_INET6) {
10538 /* We leave the scope the way it is in our lookup table. */
10539 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
10540 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
10542 /* huh compiler bug */
10547 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
10548 * PF-heartbeats. Because of this, threshold management is done by
10549 * the t3 timer handler, and does not need to be done upon the send
10550 * of a PF-heartbeat. If CMT PF is on and the destination to which a
10551 * heartbeat is being sent is in PF state, do NOT do threshold
10554 if ((SCTP_BASE_SYSCTL(sctp_cmt_pf) == 0) || ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
10555 /* ok we have a destination that needs a beat */
10556 /* lets do the theshold management Qiaobing style */
10557 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
10558 stcb->asoc.max_send_times)) {
10560 * we have lost the association, in a way this is
10561 * quite bad since we really are one less time since
10562 * we really did not send yet. This is the down side
10563 * to the Q's style as defined in the RFC and not my
10564 * alternate style defined in the RFC.
10566 if (chk->data != NULL) {
10567 sctp_m_freem(chk->data);
10571 * Here we do NOT use the macro since the
10572 * association is now gone.
10575 sctp_free_remote_addr(chk->whoTo);
10578 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
10582 net->hb_responded = 0;
10583 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10584 stcb->asoc.ctrl_queue_cnt++;
10585 SCTP_STAT_INCR(sctps_sendheartbeat);
10587 * Call directly med level routine to put out the chunk. It will
10588 * always tumble out control chunks aka HB but it may even tumble
10595 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
10598 struct sctp_association *asoc;
10599 struct sctp_ecne_chunk *ecne;
10600 struct sctp_tmit_chunk *chk;
10602 asoc = &stcb->asoc;
10603 SCTP_TCB_LOCK_ASSERT(stcb);
10604 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10605 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
10606 /* found a previous ECN_ECHO update it if needed */
10607 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
10608 ecne->tsn = htonl(high_tsn);
10612 /* nope could not find one to update so we must build one */
10613 sctp_alloc_a_chunk(stcb, chk);
10617 chk->copy_by_ref = 0;
10618 SCTP_STAT_INCR(sctps_sendecne);
10619 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
10620 chk->rec.chunk_id.can_take_data = 0;
10621 chk->asoc = &stcb->asoc;
10622 chk->send_size = sizeof(struct sctp_ecne_chunk);
10623 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10624 if (chk->data == NULL) {
10625 sctp_free_a_chunk(stcb, chk);
10628 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10629 SCTP_BUF_LEN(chk->data) = chk->send_size;
10630 chk->sent = SCTP_DATAGRAM_UNSENT;
10631 chk->snd_count = 0;
10633 atomic_add_int(&chk->whoTo->ref_count, 1);
10634 stcb->asoc.ecn_echo_cnt_onq++;
10635 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
10636 ecne->ch.chunk_type = SCTP_ECN_ECHO;
10637 ecne->ch.chunk_flags = 0;
10638 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
10639 ecne->tsn = htonl(high_tsn);
10640 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10641 asoc->ctrl_queue_cnt++;
10645 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
10646 struct mbuf *m, int iphlen, int bad_crc)
10648 struct sctp_association *asoc;
10649 struct sctp_pktdrop_chunk *drp;
10650 struct sctp_tmit_chunk *chk;
10657 struct ip6_hdr *ip6h;
10660 int fullsz = 0, extra = 0;
10663 struct sctp_chunkhdr *ch, chunk_buf;
10664 unsigned int chk_length;
10669 asoc = &stcb->asoc;
10670 SCTP_TCB_LOCK_ASSERT(stcb);
10671 if (asoc->peer_supports_pktdrop == 0) {
10673 * peer must declare support before I send one.
10677 if (stcb->sctp_socket == NULL) {
10680 sctp_alloc_a_chunk(stcb, chk);
10684 chk->copy_by_ref = 0;
10685 iph = mtod(m, struct ip *);
10687 sctp_free_a_chunk(stcb, chk);
10690 switch (iph->ip_v) {
10693 len = chk->send_size = iph->ip_len;
10696 case IPV6_VERSION >> 4:
10698 ip6h = mtod(m, struct ip6_hdr *);
10699 len = chk->send_size = htons(ip6h->ip6_plen);
10705 /* Validate that we do not have an ABORT in here. */
10706 offset = iphlen + sizeof(struct sctphdr);
10707 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
10708 sizeof(*ch), (uint8_t *) & chunk_buf);
10709 while (ch != NULL) {
10710 chk_length = ntohs(ch->chunk_length);
10711 if (chk_length < sizeof(*ch)) {
10712 /* break to abort land */
10715 switch (ch->chunk_type) {
10716 case SCTP_PACKET_DROPPED:
10717 case SCTP_ABORT_ASSOCIATION:
10719 * we don't respond with an PKT-DROP to an ABORT
10722 sctp_free_a_chunk(stcb, chk);
10727 offset += SCTP_SIZE32(chk_length);
10728 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
10729 sizeof(*ch), (uint8_t *) & chunk_buf);
10732 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
10733 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
10735 * only send 1 mtu worth, trim off the excess on the end.
10737 fullsz = len - extra;
10738 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
10741 chk->asoc = &stcb->asoc;
10742 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
10743 if (chk->data == NULL) {
10745 sctp_free_a_chunk(stcb, chk);
10748 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10749 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
10751 sctp_m_freem(chk->data);
10755 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
10756 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
10757 chk->book_size_scale = 0;
10759 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
10760 drp->trunc_len = htons(fullsz);
10762 * Len is already adjusted to size minus overhead above take
10763 * out the pkt_drop chunk itself from it.
10765 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
10766 len = chk->send_size;
10768 /* no truncation needed */
10769 drp->ch.chunk_flags = 0;
10770 drp->trunc_len = htons(0);
10773 drp->ch.chunk_flags |= SCTP_BADCRC;
10775 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
10776 SCTP_BUF_LEN(chk->data) = chk->send_size;
10777 chk->sent = SCTP_DATAGRAM_UNSENT;
10778 chk->snd_count = 0;
10780 /* we should hit here */
10783 chk->whoTo = asoc->primary_destination;
10785 atomic_add_int(&chk->whoTo->ref_count, 1);
10786 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
10787 chk->rec.chunk_id.can_take_data = 1;
10788 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
10789 drp->ch.chunk_length = htons(chk->send_size);
10790 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
10794 drp->bottle_bw = htonl(spc);
10795 if (asoc->my_rwnd) {
10796 drp->current_onq = htonl(asoc->size_on_reasm_queue +
10797 asoc->size_on_all_streams +
10798 asoc->my_rwnd_control_len +
10799 stcb->sctp_socket->so_rcv.sb_cc);
10802 * If my rwnd is 0, possibly from mbuf depletion as well as
10803 * space used, tell the peer there is NO space aka onq == bw
10805 drp->current_onq = htonl(spc);
10809 m_copydata(m, iphlen, len, (caddr_t)datap);
10810 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10811 asoc->ctrl_queue_cnt++;
10815 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
10817 struct sctp_association *asoc;
10818 struct sctp_cwr_chunk *cwr;
10819 struct sctp_tmit_chunk *chk;
10821 asoc = &stcb->asoc;
10822 SCTP_TCB_LOCK_ASSERT(stcb);
10823 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10824 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
10825 /* found a previous ECN_CWR update it if needed */
10826 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
10827 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
10829 cwr->tsn = htonl(high_tsn);
10834 /* nope could not find one to update so we must build one */
10835 sctp_alloc_a_chunk(stcb, chk);
10839 chk->copy_by_ref = 0;
10840 chk->rec.chunk_id.id = SCTP_ECN_CWR;
10841 chk->rec.chunk_id.can_take_data = 1;
10842 chk->asoc = &stcb->asoc;
10843 chk->send_size = sizeof(struct sctp_cwr_chunk);
10844 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10845 if (chk->data == NULL) {
10846 sctp_free_a_chunk(stcb, chk);
10849 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10850 SCTP_BUF_LEN(chk->data) = chk->send_size;
10851 chk->sent = SCTP_DATAGRAM_UNSENT;
10852 chk->snd_count = 0;
10854 atomic_add_int(&chk->whoTo->ref_count, 1);
10855 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
10856 cwr->ch.chunk_type = SCTP_ECN_CWR;
10857 cwr->ch.chunk_flags = 0;
10858 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
10859 cwr->tsn = htonl(high_tsn);
10860 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10861 asoc->ctrl_queue_cnt++;
10865 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
10866 int number_entries, uint16_t * list,
10867 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
10869 int len, old_len, i;
10870 struct sctp_stream_reset_out_request *req_out;
10871 struct sctp_chunkhdr *ch;
10873 ch = mtod(chk->data, struct sctp_chunkhdr *);
10876 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
10878 /* get to new offset for the param. */
10879 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
10880 /* now how long will this param be? */
10881 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
10882 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
10883 req_out->ph.param_length = htons(len);
10884 req_out->request_seq = htonl(seq);
10885 req_out->response_seq = htonl(resp_seq);
10886 req_out->send_reset_at_tsn = htonl(last_sent);
10887 if (number_entries) {
10888 for (i = 0; i < number_entries; i++) {
10889 req_out->list_of_streams[i] = htons(list[i]);
10892 if (SCTP_SIZE32(len) > len) {
10894 * Need to worry about the pad we may end up adding to the
10895 * end. This is easy since the struct is either aligned to 4
10896 * bytes or 2 bytes off.
10898 req_out->list_of_streams[number_entries] = 0;
10900 /* now fix the chunk length */
10901 ch->chunk_length = htons(len + old_len);
10902 chk->book_size = len + old_len;
10903 chk->book_size_scale = 0;
10904 chk->send_size = SCTP_SIZE32(chk->book_size);
10905 SCTP_BUF_LEN(chk->data) = chk->send_size;
10911 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
10912 int number_entries, uint16_t * list,
10915 int len, old_len, i;
10916 struct sctp_stream_reset_in_request *req_in;
10917 struct sctp_chunkhdr *ch;
10919 ch = mtod(chk->data, struct sctp_chunkhdr *);
10922 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
10924 /* get to new offset for the param. */
10925 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
10926 /* now how long will this param be? */
10927 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
10928 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
10929 req_in->ph.param_length = htons(len);
10930 req_in->request_seq = htonl(seq);
10931 if (number_entries) {
10932 for (i = 0; i < number_entries; i++) {
10933 req_in->list_of_streams[i] = htons(list[i]);
10936 if (SCTP_SIZE32(len) > len) {
10938 * Need to worry about the pad we may end up adding to the
10939 * end. This is easy since the struct is either aligned to 4
10940 * bytes or 2 bytes off.
10942 req_in->list_of_streams[number_entries] = 0;
10944 /* now fix the chunk length */
10945 ch->chunk_length = htons(len + old_len);
10946 chk->book_size = len + old_len;
10947 chk->book_size_scale = 0;
10948 chk->send_size = SCTP_SIZE32(chk->book_size);
10949 SCTP_BUF_LEN(chk->data) = chk->send_size;
10955 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
10959 struct sctp_stream_reset_tsn_request *req_tsn;
10960 struct sctp_chunkhdr *ch;
10962 ch = mtod(chk->data, struct sctp_chunkhdr *);
10965 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
10967 /* get to new offset for the param. */
10968 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
10969 /* now how long will this param be? */
10970 len = sizeof(struct sctp_stream_reset_tsn_request);
10971 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
10972 req_tsn->ph.param_length = htons(len);
10973 req_tsn->request_seq = htonl(seq);
10975 /* now fix the chunk length */
10976 ch->chunk_length = htons(len + old_len);
10977 chk->send_size = len + old_len;
10978 chk->book_size = SCTP_SIZE32(chk->send_size);
10979 chk->book_size_scale = 0;
10980 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
10985 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
10986 uint32_t resp_seq, uint32_t result)
10989 struct sctp_stream_reset_response *resp;
10990 struct sctp_chunkhdr *ch;
10992 ch = mtod(chk->data, struct sctp_chunkhdr *);
10995 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
10997 /* get to new offset for the param. */
10998 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
10999 /* now how long will this param be? */
11000 len = sizeof(struct sctp_stream_reset_response);
11001 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11002 resp->ph.param_length = htons(len);
11003 resp->response_seq = htonl(resp_seq);
11004 resp->result = ntohl(result);
11006 /* now fix the chunk length */
11007 ch->chunk_length = htons(len + old_len);
11008 chk->book_size = len + old_len;
11009 chk->book_size_scale = 0;
11010 chk->send_size = SCTP_SIZE32(chk->book_size);
11011 SCTP_BUF_LEN(chk->data) = chk->send_size;
11018 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11019 uint32_t resp_seq, uint32_t result,
11020 uint32_t send_una, uint32_t recv_next)
11023 struct sctp_stream_reset_response_tsn *resp;
11024 struct sctp_chunkhdr *ch;
11026 ch = mtod(chk->data, struct sctp_chunkhdr *);
11029 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11031 /* get to new offset for the param. */
11032 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11033 /* now how long will this param be? */
11034 len = sizeof(struct sctp_stream_reset_response_tsn);
11035 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11036 resp->ph.param_length = htons(len);
11037 resp->response_seq = htonl(resp_seq);
11038 resp->result = htonl(result);
11039 resp->senders_next_tsn = htonl(send_una);
11040 resp->receivers_next_tsn = htonl(recv_next);
11042 /* now fix the chunk length */
11043 ch->chunk_length = htons(len + old_len);
11044 chk->book_size = len + old_len;
11045 chk->send_size = SCTP_SIZE32(chk->book_size);
11046 chk->book_size_scale = 0;
11047 SCTP_BUF_LEN(chk->data) = chk->send_size;
11053 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11054 int number_entries, uint16_t * list,
11055 uint8_t send_out_req, uint32_t resp_seq,
11056 uint8_t send_in_req,
11057 uint8_t send_tsn_req)
11060 struct sctp_association *asoc;
11061 struct sctp_tmit_chunk *chk;
11062 struct sctp_chunkhdr *ch;
11065 asoc = &stcb->asoc;
11066 if (asoc->stream_reset_outstanding) {
11068 * Already one pending, must get ACK back to clear the flag.
11070 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11073 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) {
11074 /* nothing to do */
11075 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11078 if (send_tsn_req && (send_out_req || send_in_req)) {
11079 /* error, can't do that */
11080 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11083 sctp_alloc_a_chunk(stcb, chk);
11085 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11088 chk->copy_by_ref = 0;
11089 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11090 chk->rec.chunk_id.can_take_data = 0;
11091 chk->asoc = &stcb->asoc;
11092 chk->book_size = sizeof(struct sctp_chunkhdr);
11093 chk->send_size = SCTP_SIZE32(chk->book_size);
11094 chk->book_size_scale = 0;
11096 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11097 if (chk->data == NULL) {
11098 sctp_free_a_chunk(stcb, chk);
11099 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11102 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11104 /* setup chunk parameters */
11105 chk->sent = SCTP_DATAGRAM_UNSENT;
11106 chk->snd_count = 0;
11107 chk->whoTo = asoc->primary_destination;
11108 atomic_add_int(&chk->whoTo->ref_count, 1);
11110 ch = mtod(chk->data, struct sctp_chunkhdr *);
11111 ch->chunk_type = SCTP_STREAM_RESET;
11112 ch->chunk_flags = 0;
11113 ch->chunk_length = htons(chk->book_size);
11114 SCTP_BUF_LEN(chk->data) = chk->send_size;
11116 seq = stcb->asoc.str_reset_seq_out;
11117 if (send_out_req) {
11118 sctp_add_stream_reset_out(chk, number_entries, list,
11119 seq, resp_seq, (stcb->asoc.sending_seq - 1));
11120 asoc->stream_reset_out_is_outstanding = 1;
11122 asoc->stream_reset_outstanding++;
11125 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11126 asoc->stream_reset_outstanding++;
11128 if (send_tsn_req) {
11129 sctp_add_stream_reset_tsn(chk, seq);
11130 asoc->stream_reset_outstanding++;
11132 asoc->str_reset = chk;
11134 /* insert the chunk for sending */
11135 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11138 asoc->ctrl_queue_cnt++;
11139 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11144 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11145 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11148 * Formulate the abort message, and send it back down.
11150 struct mbuf *o_pak;
11152 struct sctp_abort_msg *abm;
11153 struct ip *iph, *iph_out;
11154 struct udphdr *udp;
11157 struct ip6_hdr *ip6, *ip6_out;
11160 int iphlen_out, len;
11162 /* don't respond to ABORT with ABORT */
11163 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11165 sctp_m_freem(err_cause);
11168 iph = mtod(m, struct ip *);
11169 switch (iph->ip_v) {
11171 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11174 case IPV6_VERSION >> 4:
11175 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11182 len += sizeof(struct udphdr);
11184 mout = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
11185 if (mout == NULL) {
11187 sctp_m_freem(err_cause);
11190 SCTP_BUF_LEN(mout) = len;
11191 SCTP_BUF_NEXT(mout) = err_cause;
11196 switch (iph->ip_v) {
11198 iph_out = mtod(mout, struct ip *);
11200 /* Fill in the IP header for the ABORT */
11201 iph_out->ip_v = IPVERSION;
11202 iph_out->ip_hl = (sizeof(struct ip) / 4);
11203 iph_out->ip_tos = (u_char)0;
11204 iph_out->ip_id = 0;
11205 iph_out->ip_off = 0;
11206 iph_out->ip_ttl = MAXTTL;
11208 iph_out->ip_p = IPPROTO_UDP;
11210 iph_out->ip_p = IPPROTO_SCTP;
11212 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11213 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11214 /* let IP layer calculate this */
11215 iph_out->ip_sum = 0;
11217 iphlen_out = sizeof(*iph_out);
11218 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
11221 case IPV6_VERSION >> 4:
11222 ip6 = (struct ip6_hdr *)iph;
11223 ip6_out = mtod(mout, struct ip6_hdr *);
11225 /* Fill in the IP6 header for the ABORT */
11226 ip6_out->ip6_flow = ip6->ip6_flow;
11227 ip6_out->ip6_hlim = MODULE_GLOBAL(MOD_INET6, ip6_defhlim);
11229 ip6_out->ip6_nxt = IPPROTO_UDP;
11231 ip6_out->ip6_nxt = IPPROTO_SCTP;
11233 ip6_out->ip6_src = ip6->ip6_dst;
11234 ip6_out->ip6_dst = ip6->ip6_src;
11236 iphlen_out = sizeof(*ip6_out);
11237 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
11241 /* Currently not supported */
11243 sctp_m_freem(err_cause);
11244 sctp_m_freem(mout);
11248 udp = (struct udphdr *)abm;
11250 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11251 udp->uh_dport = port;
11252 /* set udp->uh_ulen later */
11254 iphlen_out += sizeof(struct udphdr);
11255 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
11257 abm->sh.src_port = sh->dest_port;
11258 abm->sh.dest_port = sh->src_port;
11259 abm->sh.checksum = 0;
11261 abm->sh.v_tag = sh->v_tag;
11262 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
11264 abm->sh.v_tag = htonl(vtag);
11265 abm->msg.ch.chunk_flags = 0;
11267 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11270 struct mbuf *m_tmp = err_cause;
11273 /* get length of the err_cause chain */
11274 while (m_tmp != NULL) {
11275 err_len += SCTP_BUF_LEN(m_tmp);
11276 m_tmp = SCTP_BUF_NEXT(m_tmp);
11278 len = SCTP_BUF_LEN(mout) + err_len;
11280 /* need pad at end of chunk */
11281 uint32_t cpthis = 0;
11284 padlen = 4 - (len % 4);
11285 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11288 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
11290 len = SCTP_BUF_LEN(mout);
11291 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
11295 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
11296 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11298 sctp_m_freem(mout);
11301 if (iph_out != NULL) {
11303 struct sctp_tcb *stcb = NULL;
11306 /* zap the stack pointer to the route */
11307 bzero(&ro, sizeof ro);
11309 udp->uh_ulen = htons(len - sizeof(struct ip));
11310 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11312 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
11313 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
11314 /* set IPv4 length */
11315 iph_out->ip_len = len;
11317 #ifdef SCTP_PACKET_LOGGING
11318 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11319 sctp_packet_log(mout, len);
11321 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11323 SCTP_ENABLE_UDP_CSUM(o_pak);
11325 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
11327 /* Free the route if we got one back */
11332 if (ip6_out != NULL) {
11333 struct route_in6 ro;
11335 struct sctp_tcb *stcb = NULL;
11336 struct ifnet *ifp = NULL;
11338 /* zap the stack pointer to the route */
11339 bzero(&ro, sizeof(ro));
11341 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11343 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
11344 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
11345 ip6_out->ip6_plen = len - sizeof(*ip6_out);
11346 #ifdef SCTP_PACKET_LOGGING
11347 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11348 sctp_packet_log(mout, len);
11350 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11352 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11353 udp->uh_sum = 0xffff;
11356 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
11358 /* Free the route if we got one back */
11363 SCTP_STAT_INCR(sctps_sendpackets);
11364 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11368 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
11369 uint32_t vrf_id, uint16_t port)
11371 struct mbuf *o_pak;
11372 struct sctphdr *ihdr;
11374 struct sctphdr *ohdr;
11375 struct sctp_chunkhdr *ophdr;
11377 struct udphdr *udp = NULL;
11382 struct sockaddr_in6 lsa6, fsa6;
11390 iph = mtod(m, struct ip *);
11391 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen);
11393 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT);
11395 /* can't send because we can't add a mbuf */
11398 ohdr = mtod(scm, struct sctphdr *);
11399 ohdr->src_port = ihdr->dest_port;
11400 ohdr->dest_port = ihdr->src_port;
11401 ohdr->v_tag = vtag;
11402 ohdr->checksum = 0;
11403 ophdr = (struct sctp_chunkhdr *)(ohdr + 1);
11404 ophdr->chunk_type = SCTP_OPERATION_ERROR;
11405 ophdr->chunk_flags = 0;
11409 len += SCTP_BUF_LEN(at);
11410 at = SCTP_BUF_NEXT(at);
11412 ophdr->chunk_length = htons(len - sizeof(struct sctphdr));
11415 uint32_t cpthis = 0;
11418 padlen = 4 - (len % 4);
11419 m_copyback(scm, len, padlen, (caddr_t)&cpthis);
11422 val = sctp_calculate_sum(scm, NULL, 0);
11425 mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
11427 mout = sctp_get_mbuf_for_msg(sizeof(struct ip6_hdr), 1, M_DONTWAIT, 1, MT_DATA);
11431 mout = sctp_get_mbuf_for_msg(sizeof(struct ip) + sizeof(struct udphdr), 1, M_DONTWAIT, 1, MT_DATA);
11433 mout = sctp_get_mbuf_for_msg(sizeof(struct ip), 1, M_DONTWAIT, 1, MT_DATA);
11436 if (mout == NULL) {
11440 SCTP_BUF_NEXT(mout) = scm;
11441 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11442 sctp_m_freem(mout);
11445 ohdr->checksum = val;
11446 switch (iph->ip_v) {
11452 struct sctp_tcb *stcb = NULL;
11454 SCTP_BUF_LEN(mout) = sizeof(struct ip);
11455 len += sizeof(struct ip);
11457 SCTP_BUF_LEN(mout) += sizeof(struct udphdr);
11458 len += sizeof(struct udphdr);
11460 bzero(&ro, sizeof ro);
11461 out = mtod(mout, struct ip *);
11462 out->ip_v = iph->ip_v;
11463 out->ip_hl = (sizeof(struct ip) / 4);
11464 out->ip_tos = iph->ip_tos;
11465 out->ip_id = iph->ip_id;
11467 out->ip_ttl = MAXTTL;
11469 out->ip_p = IPPROTO_UDP;
11471 out->ip_p = IPPROTO_SCTP;
11474 out->ip_src = iph->ip_dst;
11475 out->ip_dst = iph->ip_src;
11478 udp = (struct udphdr *)(out + 1);
11479 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11480 udp->uh_dport = port;
11481 udp->uh_ulen = htons(len - sizeof(struct ip));
11482 udp->uh_sum = in_pseudo(out->ip_src.s_addr, out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11484 #ifdef SCTP_PACKET_LOGGING
11485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11486 sctp_packet_log(mout, len);
11488 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11490 SCTP_ENABLE_UDP_CSUM(o_pak);
11492 SCTP_IP_OUTPUT(retcode, o_pak, &ro, stcb, vrf_id);
11494 SCTP_STAT_INCR(sctps_sendpackets);
11495 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11496 /* Free the route if we got one back */
11502 case IPV6_VERSION >> 4:
11505 struct route_in6 ro;
11507 struct sctp_tcb *stcb = NULL;
11508 struct ifnet *ifp = NULL;
11509 struct ip6_hdr *out6, *in6;
11511 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr);
11512 len += sizeof(struct ip6_hdr);
11513 bzero(&ro, sizeof ro);
11515 SCTP_BUF_LEN(mout) += sizeof(struct udphdr);
11516 len += sizeof(struct udphdr);
11518 in6 = mtod(m, struct ip6_hdr *);
11519 out6 = mtod(mout, struct ip6_hdr *);
11520 out6->ip6_flow = in6->ip6_flow;
11521 out6->ip6_hlim = MODULE_GLOBAL(MOD_INET6, ip6_defhlim);
11523 out6->ip6_nxt = IPPROTO_UDP;
11525 out6->ip6_nxt = IPPROTO_SCTP;
11527 out6->ip6_src = in6->ip6_dst;
11528 out6->ip6_dst = in6->ip6_src;
11529 out6->ip6_plen = len - sizeof(struct ip6_hdr);
11531 udp = (struct udphdr *)(out6 + 1);
11532 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11533 udp->uh_dport = port;
11534 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11538 bzero(&lsa6, sizeof(lsa6));
11539 lsa6.sin6_len = sizeof(lsa6);
11540 lsa6.sin6_family = AF_INET6;
11541 lsa6.sin6_addr = out6->ip6_src;
11542 bzero(&fsa6, sizeof(fsa6));
11543 fsa6.sin6_len = sizeof(fsa6);
11544 fsa6.sin6_family = AF_INET6;
11545 fsa6.sin6_addr = out6->ip6_dst;
11547 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_operr_to calling ipv6 output:\n");
11548 SCTPDBG(SCTP_DEBUG_OUTPUT2, "src: ");
11549 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&lsa6);
11550 SCTPDBG(SCTP_DEBUG_OUTPUT2, "dst ");
11551 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&fsa6);
11553 #ifdef SCTP_PACKET_LOGGING
11554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11555 sctp_packet_log(mout, len);
11557 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11559 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11560 udp->uh_sum = 0xffff;
11563 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
11565 SCTP_STAT_INCR(sctps_sendpackets);
11566 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11567 /* Free the route if we got one back */
11579 static struct mbuf *
11580 sctp_copy_resume(struct sctp_stream_queue_pending *sp,
11582 struct sctp_sndrcvinfo *srcv,
11584 int user_marks_eor,
11587 struct mbuf **new_tail)
11591 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
11592 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
11594 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11597 *sndout = m_length(m, NULL);
11598 *new_tail = m_last(m);
11604 sctp_copy_one(struct sctp_stream_queue_pending *sp,
11611 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
11613 if (sp->data == NULL) {
11614 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11617 sp->tail_mbuf = m_last(sp->data);
11623 static struct sctp_stream_queue_pending *
11624 sctp_copy_it_in(struct sctp_tcb *stcb,
11625 struct sctp_association *asoc,
11626 struct sctp_sndrcvinfo *srcv,
11628 struct sctp_nets *net,
11630 int user_marks_eor,
11635 * This routine must be very careful in its work. Protocol
11636 * processing is up and running so care must be taken to spl...()
11637 * when you need to do something that may effect the stcb/asoc. The
11638 * sb is locked however. When data is copied the protocol processing
11639 * should be enabled since this is a slower operation...
11641 struct sctp_stream_queue_pending *sp = NULL;
11645 /* Now can we send this? */
11646 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
11647 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
11648 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
11649 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
11650 /* got data while shutting down */
11651 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
11652 *error = ECONNRESET;
11655 sctp_alloc_a_strmoq(stcb, sp);
11657 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11662 sp->sender_all_done = 0;
11663 sp->sinfo_flags = srcv->sinfo_flags;
11664 sp->timetolive = srcv->sinfo_timetolive;
11665 sp->ppid = srcv->sinfo_ppid;
11666 sp->context = srcv->sinfo_context;
11668 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
11670 sp->stream = srcv->sinfo_stream;
11671 sp->length = min(uio->uio_resid, max_send_len);
11672 if ((sp->length == (uint32_t) uio->uio_resid) &&
11673 ((user_marks_eor == 0) ||
11674 (srcv->sinfo_flags & SCTP_EOF) ||
11675 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
11676 sp->msg_is_complete = 1;
11678 sp->msg_is_complete = 0;
11680 sp->sender_all_done = 0;
11681 sp->some_taken = 0;
11682 sp->put_last_out = 0;
11683 resv_in_first = sizeof(struct sctp_data_chunk);
11684 sp->data = sp->tail_mbuf = NULL;
11685 *error = sctp_copy_one(sp, uio, resv_in_first);
11687 sctp_free_a_strmoq(stcb, sp);
11690 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
11694 sp->net = asoc->primary_destination;
11697 atomic_add_int(&sp->net->ref_count, 1);
11698 sctp_set_prsctp_policy(stcb, sp);
11706 sctp_sosend(struct socket *so,
11707 struct sockaddr *addr,
11710 struct mbuf *control,
11715 struct sctp_inpcb *inp;
11716 int error, use_rcvinfo = 0;
11717 struct sctp_sndrcvinfo srcv;
11718 struct sockaddr *addr_to_use;
11721 struct sockaddr_in sin;
11725 inp = (struct sctp_inpcb *)so->so_pcb;
11727 /* process cmsg snd/rcv info (maybe a assoc-id) */
11728 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
11734 addr_to_use = addr;
11735 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
11736 if ((addr) && (addr->sa_family == AF_INET6)) {
11737 struct sockaddr_in6 *sin6;
11739 sin6 = (struct sockaddr_in6 *)addr;
11740 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
11741 in6_sin6_2_sin(&sin, sin6);
11742 addr_to_use = (struct sockaddr *)&sin;
11746 error = sctp_lower_sosend(so, addr_to_use, uio, top,
11757 sctp_lower_sosend(struct socket *so,
11758 struct sockaddr *addr,
11760 struct mbuf *i_pak,
11761 struct mbuf *control,
11764 struct sctp_sndrcvinfo *srcv
11769 unsigned int sndlen = 0, max_len;
11771 struct mbuf *top = NULL;
11772 int queue_only = 0, queue_only_for_init = 0;
11773 int free_cnt_applied = 0;
11775 int now_filled = 0;
11776 unsigned int inqueue_bytes = 0;
11777 struct sctp_block_entry be;
11778 struct sctp_inpcb *inp;
11779 struct sctp_tcb *stcb = NULL;
11780 struct timeval now;
11781 struct sctp_nets *net;
11782 struct sctp_association *asoc;
11783 struct sctp_inpcb *t_inp;
11784 int user_marks_eor;
11785 int create_lock_applied = 0;
11786 int nagle_applies = 0;
11787 int some_on_control = 0;
11788 int got_all_of_the_send = 0;
11789 int hold_tcblock = 0;
11790 int non_blocking = 0;
11791 int temp_flags = 0;
11792 uint32_t local_add_more, local_soresv = 0;
11799 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
11801 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11804 SCTP_RELEASE_PKT(i_pak);
11808 if ((uio == NULL) && (i_pak == NULL)) {
11809 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11812 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
11813 atomic_add_int(&inp->total_sends, 1);
11815 if (uio->uio_resid < 0) {
11816 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11819 sndlen = uio->uio_resid;
11821 top = SCTP_HEADER_TO_CHAIN(i_pak);
11822 sndlen = SCTP_HEADER_LEN(i_pak);
11824 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
11828 * Pre-screen address, if one is given the sin-len
11829 * must be set correctly!
11832 if ((addr->sa_family == AF_INET) &&
11833 (addr->sa_len != sizeof(struct sockaddr_in))) {
11834 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11837 } else if ((addr->sa_family == AF_INET6) &&
11838 (addr->sa_len != sizeof(struct sockaddr_in6))) {
11839 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11846 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
11847 (inp->sctp_socket->so_qlimit)) {
11848 /* The listener can NOT send */
11849 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11853 if ((use_rcvinfo) && srcv) {
11854 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) ||
11855 PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) {
11856 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11860 if (srcv->sinfo_flags)
11861 SCTP_STAT_INCR(sctps_sends_with_flags);
11863 if (srcv->sinfo_flags & SCTP_SENDALL) {
11864 /* its a sendall */
11865 error = sctp_sendall(inp, uio, top, srcv);
11870 /* now we must find the assoc */
11871 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
11872 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
11873 SCTP_INP_RLOCK(inp);
11874 stcb = LIST_FIRST(&inp->sctp_asoc_list);
11875 if (stcb == NULL) {
11876 SCTP_INP_RUNLOCK(inp);
11877 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
11882 SCTP_INP_RUNLOCK(inp);
11884 /* Must locate the net structure if addr given */
11885 net = sctp_findnet(stcb, addr);
11887 /* validate port was 0 or correct */
11888 struct sockaddr_in *sin;
11890 sin = (struct sockaddr_in *)addr;
11891 if ((sin->sin_port != 0) &&
11892 (sin->sin_port != stcb->rport)) {
11896 temp_flags |= SCTP_ADDR_OVER;
11898 net = stcb->asoc.primary_destination;
11899 if (addr && (net == NULL)) {
11900 /* Could not find address, was it legal */
11901 if (addr->sa_family == AF_INET) {
11902 struct sockaddr_in *sin;
11904 sin = (struct sockaddr_in *)addr;
11905 if (sin->sin_addr.s_addr == 0) {
11906 if ((sin->sin_port == 0) ||
11907 (sin->sin_port == stcb->rport)) {
11908 net = stcb->asoc.primary_destination;
11912 struct sockaddr_in6 *sin6;
11914 sin6 = (struct sockaddr_in6 *)addr;
11915 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
11916 if ((sin6->sin6_port == 0) ||
11917 (sin6->sin6_port == stcb->rport)) {
11918 net = stcb->asoc.primary_destination;
11924 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11928 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) {
11929 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0);
11933 * Must locate the net structure if addr
11936 net = sctp_findnet(stcb, addr);
11938 net = stcb->asoc.primary_destination;
11939 if ((srcv->sinfo_flags & SCTP_ADDR_OVER) &&
11940 ((net == NULL) || (addr == NULL))) {
11941 struct sockaddr_in *sin;
11943 if (addr == NULL) {
11944 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11948 sin = (struct sockaddr_in *)addr;
11949 /* Validate port is 0 or correct */
11950 if ((sin->sin_port != 0) &&
11951 (sin->sin_port != stcb->rport)) {
11959 * Since we did not use findep we must
11960 * increment it, and if we don't find a tcb
11963 SCTP_INP_WLOCK(inp);
11964 SCTP_INP_INCR_REF(inp);
11965 SCTP_INP_WUNLOCK(inp);
11966 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
11967 if (stcb == NULL) {
11968 SCTP_INP_WLOCK(inp);
11969 SCTP_INP_DECR_REF(inp);
11970 SCTP_INP_WUNLOCK(inp);
11975 if ((stcb == NULL) && (addr)) {
11976 /* Possible implicit send? */
11977 SCTP_ASOC_CREATE_LOCK(inp);
11978 create_lock_applied = 1;
11979 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
11980 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
11981 /* Should I really unlock ? */
11982 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
11987 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
11988 (addr->sa_family == AF_INET6)) {
11989 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11993 SCTP_INP_WLOCK(inp);
11994 SCTP_INP_INCR_REF(inp);
11995 SCTP_INP_WUNLOCK(inp);
11996 /* With the lock applied look again */
11997 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
11998 if (stcb == NULL) {
11999 SCTP_INP_WLOCK(inp);
12000 SCTP_INP_DECR_REF(inp);
12001 SCTP_INP_WUNLOCK(inp);
12005 if (t_inp != inp) {
12006 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12011 if (stcb == NULL) {
12012 if (addr == NULL) {
12013 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12018 * UDP style, we must go ahead and start the INIT
12023 if ((use_rcvinfo) && (srcv) &&
12024 ((srcv->sinfo_flags & SCTP_ABORT) ||
12025 ((srcv->sinfo_flags & SCTP_EOF) &&
12028 * User asks to abort a non-existant assoc,
12029 * or EOF a non-existant assoc with no data
12031 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12035 /* get an asoc/stcb struct */
12036 vrf_id = inp->def_vrf_id;
12038 if (create_lock_applied == 0) {
12039 panic("Error, should hold create lock and I don't?");
12042 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id,
12045 if (stcb == NULL) {
12046 /* Error is setup for us in the call */
12049 if (create_lock_applied) {
12050 SCTP_ASOC_CREATE_UNLOCK(inp);
12051 create_lock_applied = 0;
12053 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12056 * Turn on queue only flag to prevent data from
12060 asoc = &stcb->asoc;
12061 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12062 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12064 /* initialize authentication params for the assoc */
12065 sctp_initialize_auth_params(inp, stcb);
12069 * see if a init structure exists in cmsg
12072 struct sctp_initmsg initm;
12075 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
12078 * we have an INIT override of the
12081 if (initm.sinit_max_attempts)
12082 asoc->max_init_times = initm.sinit_max_attempts;
12083 if (initm.sinit_num_ostreams)
12084 asoc->pre_open_streams = initm.sinit_num_ostreams;
12085 if (initm.sinit_max_instreams)
12086 asoc->max_inbound_streams = initm.sinit_max_instreams;
12087 if (initm.sinit_max_init_timeo)
12088 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
12089 if (asoc->streamoutcnt < asoc->pre_open_streams) {
12090 /* Default is NOT correct */
12091 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
12092 asoc->streamoutcnt, asoc->pre_open_streams);
12094 * What happens if this
12095 * fails? we panic ...
12098 struct sctp_stream_out *tmp_str;
12101 if (hold_tcblock) {
12103 SCTP_TCB_UNLOCK(stcb);
12105 SCTP_MALLOC(tmp_str,
12106 struct sctp_stream_out *,
12107 (asoc->pre_open_streams *
12108 sizeof(struct sctp_stream_out)),
12111 SCTP_TCB_LOCK(stcb);
12113 if (tmp_str != NULL) {
12114 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
12115 asoc->strmout = tmp_str;
12116 asoc->streamoutcnt = asoc->pre_open_streams;
12118 asoc->pre_open_streams = asoc->streamoutcnt;
12121 for (i = 0; i < asoc->streamoutcnt; i++) {
12123 * inbound side must be set
12124 * to 0xffff, also NOTE when
12125 * we get the INIT-ACK back
12126 * (for INIT sender) we MUST
12128 * (streamoutcnt) but first
12129 * check if we sent to any
12130 * of the upper streams that
12131 * were dropped (if some
12132 * were). Those that were
12133 * dropped must be notified
12134 * to the upper layer as
12137 asoc->strmout[i].next_sequence_sent = 0x0;
12138 TAILQ_INIT(&asoc->strmout[i].outqueue);
12139 asoc->strmout[i].stream_no = i;
12140 asoc->strmout[i].last_msg_incomplete = 0;
12141 asoc->strmout[i].next_spoke.tqe_next = 0;
12142 asoc->strmout[i].next_spoke.tqe_prev = 0;
12148 /* out with the INIT */
12149 queue_only_for_init = 1;
12151 * we may want to dig in after this call and adjust the MTU
12152 * value. It defaulted to 1500 (constant) but the ro
12153 * structure may now have an update and thus we may need to
12154 * change it BEFORE we append the message.
12156 net = stcb->asoc.primary_destination;
12157 asoc = &stcb->asoc;
12160 if ((SCTP_SO_IS_NBIO(so)
12161 || (flags & MSG_NBIO)
12165 asoc = &stcb->asoc;
12167 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12168 if (sndlen > asoc->smallest_mtu) {
12169 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12174 /* would we block? */
12175 if (non_blocking) {
12176 if (hold_tcblock == 0) {
12177 SCTP_TCB_LOCK(stcb);
12180 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12181 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12182 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12183 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12184 if (sndlen > SCTP_SB_LIMIT_SND(so))
12187 error = EWOULDBLOCK;
12190 stcb->asoc.sb_send_resv += sndlen;
12191 SCTP_TCB_UNLOCK(stcb);
12194 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12196 local_soresv = sndlen;
12197 /* Keep the stcb from being freed under our feet */
12198 if (free_cnt_applied) {
12200 panic("refcnt already incremented");
12202 printf("refcnt:1 already incremented?\n");
12205 atomic_add_int(&stcb->asoc.refcnt, 1);
12206 free_cnt_applied = 1;
12208 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12209 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12210 error = ECONNRESET;
12213 if (create_lock_applied) {
12214 SCTP_ASOC_CREATE_UNLOCK(inp);
12215 create_lock_applied = 0;
12217 if (asoc->stream_reset_outstanding) {
12219 * Can't queue any data while stream reset is underway.
12221 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12225 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12226 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12229 if ((use_rcvinfo == 0) || (srcv == NULL)) {
12230 /* Grab the default stuff from the asoc */
12231 srcv = (struct sctp_sndrcvinfo *)&stcb->asoc.def_send;
12233 /* we are now done with all control */
12235 sctp_m_freem(control);
12238 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12239 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12240 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12241 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12242 if ((use_rcvinfo) &&
12243 (srcv->sinfo_flags & SCTP_ABORT)) {
12246 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12247 error = ECONNRESET;
12251 /* Ok, we will attempt a msgsnd :> */
12253 p->td_ru.ru_msgsnd++;
12256 if (((srcv->sinfo_flags | temp_flags) & SCTP_ADDR_OVER) == 0) {
12257 net = stcb->asoc.primary_destination;
12261 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12265 if ((net->flight_size > net->cwnd) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
12267 * CMT: Added check for CMT above. net above is the primary
12268 * dest. If CMT is ON, sender should always attempt to send
12269 * with the output routine sctp_fill_outqueue() that loops
12270 * through all destination addresses. Therefore, if CMT is
12271 * ON, queue_only is NOT set to 1 here, so that
12272 * sctp_chunk_output() can be called below.
12276 } else if (asoc->ifp_had_enobuf) {
12277 SCTP_STAT_INCR(sctps_ifnomemqueued);
12278 if (net->flight_size > (net->mtu * 2))
12280 asoc->ifp_had_enobuf = 0;
12282 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12283 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12285 /* Are we aborting? */
12286 if (srcv->sinfo_flags & SCTP_ABORT) {
12288 int tot_demand, tot_out = 0, max_out;
12290 SCTP_STAT_INCR(sctps_sends_with_abort);
12291 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12292 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12293 /* It has to be up before we abort */
12294 /* how big is the user initiated abort? */
12295 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12299 if (hold_tcblock) {
12300 SCTP_TCB_UNLOCK(stcb);
12304 struct mbuf *cntm = NULL;
12306 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
12310 tot_out += SCTP_BUF_LEN(cntm);
12311 cntm = SCTP_BUF_NEXT(cntm);
12314 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12316 /* Must fit in a MTU */
12318 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12319 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12321 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12325 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12328 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12332 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12333 max_out -= sizeof(struct sctp_abort_msg);
12334 if (tot_out > max_out) {
12338 struct sctp_paramhdr *ph;
12340 /* now move forward the data pointer */
12341 ph = mtod(mm, struct sctp_paramhdr *);
12342 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12343 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12345 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12347 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12350 * Here if we can't get his data we
12351 * still abort we just don't get to
12352 * send the users note :-0
12359 SCTP_BUF_NEXT(mm) = top;
12363 if (hold_tcblock == 0) {
12364 SCTP_TCB_LOCK(stcb);
12367 atomic_add_int(&stcb->asoc.refcnt, -1);
12368 free_cnt_applied = 0;
12369 /* release this lock, otherwise we hang on ourselves */
12370 sctp_abort_an_association(stcb->sctp_ep, stcb,
12371 SCTP_RESPONSE_TO_USER_REQ,
12372 mm, SCTP_SO_LOCKED);
12373 /* now relock the stcb so everything is sane */
12377 * In this case top is already chained to mm avoid double
12378 * free, since we free it below if top != NULL and driver
12379 * would free it after sending the packet out
12386 /* Calculate the maximum we can send */
12387 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12388 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12389 if (non_blocking) {
12390 /* we already checked for non-blocking above. */
12393 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12398 if (hold_tcblock) {
12399 SCTP_TCB_UNLOCK(stcb);
12402 /* Is the stream no. valid? */
12403 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12404 /* Invalid stream number */
12405 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12409 if (asoc->strmout == NULL) {
12410 /* huh? software error */
12411 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12415 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12416 if ((user_marks_eor == 0) &&
12417 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12418 /* It will NEVER fit */
12419 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12423 if ((uio == NULL) && user_marks_eor) {
12425 * We do not support eeor mode for
12426 * sending with mbuf chains (like sendfile).
12428 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12432 if (user_marks_eor) {
12433 local_add_more = SCTP_BASE_SYSCTL(sctp_add_more_threshold);
12436 * For non-eeor the whole message must fit in
12437 * the socket send buffer.
12439 local_add_more = sndlen;
12442 if (non_blocking) {
12443 goto skip_preblock;
12445 if (((max_len <= local_add_more) &&
12446 (SCTP_SB_LIMIT_SND(so) > local_add_more)) ||
12447 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { /* if */
12448 /* No room right now ! */
12449 SOCKBUF_LOCK(&so->so_snd);
12450 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12451 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12452 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) /* while */ )) {
12454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12455 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
12459 stcb->block_entry = &be;
12460 error = sbwait(&so->so_snd);
12461 stcb->block_entry = NULL;
12462 if (error || so->so_error || be.error) {
12465 error = so->so_error;
12470 SOCKBUF_UNLOCK(&so->so_snd);
12473 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12474 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12475 so, asoc, stcb->asoc.total_output_queue_size);
12477 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12480 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12482 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12483 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12484 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12488 SOCKBUF_UNLOCK(&so->so_snd);
12491 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12494 atomic_add_int(&stcb->total_sends, 1);
12496 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12497 * case NOTE: uio will be null when top/mbuf is passed
12500 if (srcv->sinfo_flags & SCTP_EOF) {
12501 got_all_of_the_send = 1;
12504 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12510 struct sctp_stream_queue_pending *sp;
12511 struct sctp_stream_out *strm;
12512 uint32_t sndout, initial_out;
12514 initial_out = uio->uio_resid;
12516 SCTP_TCB_SEND_LOCK(stcb);
12517 if ((asoc->stream_locked) &&
12518 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12519 SCTP_TCB_SEND_UNLOCK(stcb);
12520 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12524 SCTP_TCB_SEND_UNLOCK(stcb);
12526 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12527 if (strm->last_msg_incomplete == 0) {
12529 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
12530 if ((sp == NULL) || (error)) {
12533 SCTP_TCB_SEND_LOCK(stcb);
12534 if (sp->msg_is_complete) {
12535 strm->last_msg_incomplete = 0;
12536 asoc->stream_locked = 0;
12539 * Just got locked to this guy in case of an
12542 strm->last_msg_incomplete = 1;
12543 asoc->stream_locked = 1;
12544 asoc->stream_locked_on = srcv->sinfo_stream;
12545 sp->sender_all_done = 0;
12547 sctp_snd_sb_alloc(stcb, sp->length);
12548 atomic_add_int(&asoc->stream_queue_cnt, 1);
12549 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
12550 sp->strseq = strm->next_sequence_sent;
12551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
12552 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
12553 (uintptr_t) stcb, sp->length,
12554 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
12556 strm->next_sequence_sent++;
12558 SCTP_STAT_INCR(sctps_sends_with_unord);
12560 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12561 if ((strm->next_spoke.tqe_next == NULL) &&
12562 (strm->next_spoke.tqe_prev == NULL)) {
12563 /* Not on wheel, insert */
12564 sctp_insert_on_wheel(stcb, asoc, strm, 1);
12566 SCTP_TCB_SEND_UNLOCK(stcb);
12568 SCTP_TCB_SEND_LOCK(stcb);
12569 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12570 SCTP_TCB_SEND_UNLOCK(stcb);
12572 /* ???? Huh ??? last msg is gone */
12574 panic("Warning: Last msg marked incomplete, yet nothing left?");
12576 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12577 strm->last_msg_incomplete = 0;
12583 while (uio->uio_resid > 0) {
12584 /* How much room do we have? */
12585 struct mbuf *new_tail, *mm;
12587 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12588 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
12592 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
12593 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
12595 (uio->uio_resid <= (int)max_len))) {
12598 if (hold_tcblock) {
12599 SCTP_TCB_UNLOCK(stcb);
12602 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
12603 if ((mm == NULL) || error) {
12609 /* Update the mbuf and count */
12610 SCTP_TCB_SEND_LOCK(stcb);
12611 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12613 * we need to get out. Peer probably
12617 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
12618 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12619 error = ECONNRESET;
12621 SCTP_TCB_SEND_UNLOCK(stcb);
12624 if (sp->tail_mbuf) {
12625 /* tack it to the end */
12626 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
12627 sp->tail_mbuf = new_tail;
12629 /* A stolen mbuf */
12631 sp->tail_mbuf = new_tail;
12633 sctp_snd_sb_alloc(stcb, sndout);
12634 atomic_add_int(&sp->length, sndout);
12637 /* Did we reach EOR? */
12638 if ((uio->uio_resid == 0) &&
12639 ((user_marks_eor == 0) ||
12640 (srcv->sinfo_flags & SCTP_EOF) ||
12641 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
12643 sp->msg_is_complete = 1;
12645 sp->msg_is_complete = 0;
12647 SCTP_TCB_SEND_UNLOCK(stcb);
12649 if (uio->uio_resid == 0) {
12654 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
12656 * This is ugly but we must assure locking
12659 if (hold_tcblock == 0) {
12660 SCTP_TCB_LOCK(stcb);
12663 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
12664 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12665 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12666 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12672 SCTP_TCB_UNLOCK(stcb);
12675 /* wait for space now */
12676 if (non_blocking) {
12677 /* Non-blocking io in place out */
12680 if ((net->flight_size > net->cwnd) &&
12681 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
12683 } else if (asoc->ifp_had_enobuf) {
12684 SCTP_STAT_INCR(sctps_ifnomemqueued);
12685 if (net->flight_size > (net->mtu * 2)) {
12690 asoc->ifp_had_enobuf = 0;
12691 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12692 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12694 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12695 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12696 if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) {
12698 SCTP_STAT_INCR(sctps_send_burst_avoid);
12699 } else if (net->flight_size > net->cwnd) {
12701 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12706 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
12707 (stcb->asoc.total_flight > 0) &&
12708 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
12709 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
12713 * Ok, Nagle is set on and we have data outstanding.
12714 * Don't send anything and let SACKs drive out the
12715 * data unless wen have a "full" segment to send.
12717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12718 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
12720 SCTP_STAT_INCR(sctps_naglequeued);
12723 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12724 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
12725 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
12727 SCTP_STAT_INCR(sctps_naglesent);
12730 /* What about the INIT, send it maybe */
12731 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12733 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
12734 nagle_applies, un_sent);
12735 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
12736 stcb->asoc.total_flight,
12737 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
12739 if (queue_only_for_init) {
12740 if (hold_tcblock == 0) {
12741 SCTP_TCB_LOCK(stcb);
12744 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
12745 /* a collision took us forward? */
12746 queue_only_for_init = 0;
12749 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
12750 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12751 queue_only_for_init = 0;
12755 if ((queue_only == 0) && (nagle_applies == 0)
12758 * need to start chunk output
12759 * before blocking.. note that if
12760 * a lock is already applied, then
12761 * the input via the net is happening
12762 * and I don't need to start output :-D
12764 if (hold_tcblock == 0) {
12765 if (SCTP_TCB_TRYLOCK(stcb)) {
12767 sctp_chunk_output(inp,
12769 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12772 sctp_chunk_output(inp,
12774 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
12776 if (hold_tcblock == 1) {
12777 SCTP_TCB_UNLOCK(stcb);
12781 SOCKBUF_LOCK(&so->so_snd);
12783 * This is a bit strange, but I think it will
12784 * work. The total_output_queue_size is locked and
12785 * protected by the TCB_LOCK, which we just released.
12786 * There is a race that can occur between releasing it
12787 * above, and me getting the socket lock, where sacks
12788 * come in but we have not put the SB_WAIT on the
12789 * so_snd buffer to get the wakeup. After the LOCK
12790 * is applied the sack_processing will also need to
12791 * LOCK the so->so_snd to do the actual sowwakeup(). So
12792 * once we have the socket buffer lock if we recheck the
12793 * size we KNOW we will get to sleep safely with the
12794 * wakeup flag in place.
12796 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
12797 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))
12799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12800 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
12801 so, asoc, uio->uio_resid);
12804 stcb->block_entry = &be;
12805 error = sbwait(&so->so_snd);
12806 stcb->block_entry = NULL;
12808 if (error || so->so_error || be.error) {
12811 error = so->so_error;
12816 SOCKBUF_UNLOCK(&so->so_snd);
12819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12820 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12821 so, asoc, stcb->asoc.total_output_queue_size);
12824 SOCKBUF_UNLOCK(&so->so_snd);
12825 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12829 SCTP_TCB_SEND_LOCK(stcb);
12831 if (sp->msg_is_complete == 0) {
12832 strm->last_msg_incomplete = 1;
12833 asoc->stream_locked = 1;
12834 asoc->stream_locked_on = srcv->sinfo_stream;
12836 sp->sender_all_done = 1;
12837 strm->last_msg_incomplete = 0;
12838 asoc->stream_locked = 0;
12841 SCTP_PRINTF("Huh no sp TSNH?\n");
12842 strm->last_msg_incomplete = 0;
12843 asoc->stream_locked = 0;
12845 SCTP_TCB_SEND_UNLOCK(stcb);
12846 if (uio->uio_resid == 0) {
12847 got_all_of_the_send = 1;
12850 /* We send in a 0, since we do NOT have any locks */
12851 error = sctp_msg_append(stcb, net, top, srcv, 0);
12853 if (srcv->sinfo_flags & SCTP_EOF) {
12855 * This should only happen for Panda for the mbuf
12856 * send case, which does NOT yet support EEOR mode.
12857 * Thus, we can just set this flag to do the proper
12860 got_all_of_the_send = 1;
12868 if ((srcv->sinfo_flags & SCTP_EOF) &&
12869 (got_all_of_the_send == 1) &&
12870 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
12874 SCTP_STAT_INCR(sctps_sends_with_eof);
12876 if (hold_tcblock == 0) {
12877 SCTP_TCB_LOCK(stcb);
12880 cnt = sctp_is_there_unsent_data(stcb);
12881 if (TAILQ_EMPTY(&asoc->send_queue) &&
12882 TAILQ_EMPTY(&asoc->sent_queue) &&
12884 if (asoc->locked_on_sending) {
12887 /* there is nothing queued to send, so I'm done... */
12888 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
12889 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
12890 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
12891 /* only send SHUTDOWN the first time through */
12892 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
12893 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
12894 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
12896 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
12897 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
12898 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
12899 asoc->primary_destination);
12900 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
12901 asoc->primary_destination);
12905 * we still got (or just got) data to send, so set
12909 * XXX sockets draft says that SCTP_EOF should be
12910 * sent with no data. currently, we will allow user
12911 * data to be sent first and move to
12914 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
12915 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
12916 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
12917 if (hold_tcblock == 0) {
12918 SCTP_TCB_LOCK(stcb);
12921 if (asoc->locked_on_sending) {
12922 /* Locked to send out the data */
12923 struct sctp_stream_queue_pending *sp;
12925 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
12927 if ((sp->length == 0) && (sp->msg_is_complete == 0))
12928 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
12931 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
12932 if (TAILQ_EMPTY(&asoc->send_queue) &&
12933 TAILQ_EMPTY(&asoc->sent_queue) &&
12934 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
12936 if (free_cnt_applied) {
12937 atomic_add_int(&stcb->asoc.refcnt, -1);
12938 free_cnt_applied = 0;
12940 sctp_abort_an_association(stcb->sctp_ep, stcb,
12941 SCTP_RESPONSE_TO_USER_REQ,
12942 NULL, SCTP_SO_LOCKED);
12944 * now relock the stcb so everything
12951 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
12952 asoc->primary_destination);
12953 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
12958 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
12959 some_on_control = 1;
12961 if ((net->flight_size > net->cwnd) &&
12962 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
12964 } else if (asoc->ifp_had_enobuf) {
12965 SCTP_STAT_INCR(sctps_ifnomemqueued);
12966 if (net->flight_size > (net->mtu * 2)) {
12971 asoc->ifp_had_enobuf = 0;
12972 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12973 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12975 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12976 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12977 if (net->flight_size > (net->mtu * stcb->asoc.max_burst)) {
12979 SCTP_STAT_INCR(sctps_send_burst_avoid);
12980 } else if (net->flight_size > net->cwnd) {
12982 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
12987 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
12988 (stcb->asoc.total_flight > 0) &&
12989 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
12990 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
12993 * Ok, Nagle is set on and we have data outstanding.
12994 * Don't send anything and let SACKs drive out the
12995 * data unless wen have a "full" segment to send.
12997 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
12998 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13000 SCTP_STAT_INCR(sctps_naglequeued);
13003 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13004 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13005 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13007 SCTP_STAT_INCR(sctps_naglesent);
13010 if (queue_only_for_init) {
13011 if (hold_tcblock == 0) {
13012 SCTP_TCB_LOCK(stcb);
13015 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13016 /* a collision took us forward? */
13017 queue_only_for_init = 0;
13020 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13021 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13022 queue_only_for_init = 0;
13026 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13027 /* we can attempt to send too. */
13028 if (hold_tcblock == 0) {
13030 * If there is activity recv'ing sacks no need to
13033 if (SCTP_TCB_TRYLOCK(stcb)) {
13034 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13038 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13040 } else if ((queue_only == 0) &&
13041 (stcb->asoc.peers_rwnd == 0) &&
13042 (stcb->asoc.total_flight == 0)) {
13043 /* We get to have a probe outstanding */
13044 if (hold_tcblock == 0) {
13046 SCTP_TCB_LOCK(stcb);
13048 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13049 } else if (some_on_control) {
13050 int num_out, reason, cwnd_full, frag_point;
13052 /* Here we do control only */
13053 if (hold_tcblock == 0) {
13055 SCTP_TCB_LOCK(stcb);
13057 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13058 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13059 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13061 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d",
13062 queue_only, stcb->asoc.peers_rwnd, un_sent,
13063 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13064 stcb->asoc.total_output_queue_size, error);
13069 if (local_soresv && stcb) {
13070 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13073 if (create_lock_applied) {
13074 SCTP_ASOC_CREATE_UNLOCK(inp);
13075 create_lock_applied = 0;
13077 if ((stcb) && hold_tcblock) {
13078 SCTP_TCB_UNLOCK(stcb);
13080 if (stcb && free_cnt_applied) {
13081 atomic_add_int(&stcb->asoc.refcnt, -1);
13085 if (mtx_owned(&stcb->tcb_mtx)) {
13086 panic("Leaving with tcb mtx owned?");
13088 if (mtx_owned(&stcb->tcb_send_mtx)) {
13089 panic("Leaving with tcb send mtx owned?");
13097 sctp_m_freem(control);
13104 * generate an AUTHentication chunk, if required
13107 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13108 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13109 struct sctp_tcb *stcb, uint8_t chunk)
13111 struct mbuf *m_auth;
13112 struct sctp_auth_chunk *auth;
13115 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13119 /* sysctl disabled auth? */
13120 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13123 /* peer doesn't do auth... */
13124 if (!stcb->asoc.peer_supports_auth) {
13127 /* does the requested chunk require auth? */
13128 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13131 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13132 if (m_auth == NULL) {
13136 /* reserve some space if this will be the first mbuf */
13138 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13139 /* fill in the AUTH chunk details */
13140 auth = mtod(m_auth, struct sctp_auth_chunk *);
13141 bzero(auth, sizeof(*auth));
13142 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13143 auth->ch.chunk_flags = 0;
13144 chunk_len = sizeof(*auth) +
13145 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13146 auth->ch.chunk_length = htons(chunk_len);
13147 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13148 /* key id and hmac digest will be computed and filled in upon send */
13150 /* save the offset where the auth was inserted into the chain */
13157 *offset += SCTP_BUF_LEN(cn);
13158 cn = SCTP_BUF_NEXT(cn);
13163 /* update length and return pointer to the auth chunk */
13164 SCTP_BUF_LEN(m_auth) = chunk_len;
13165 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13166 if (auth_ret != NULL)
13174 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13176 struct nd_prefix *pfx = NULL;
13177 struct nd_pfxrouter *pfxrtr = NULL;
13178 struct sockaddr_in6 gw6;
13180 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13183 /* get prefix entry of address */
13184 LIST_FOREACH(pfx, &MODULE_GLOBAL(MOD_INET6, nd_prefix), ndpr_entry) {
13185 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13187 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13188 &src6->sin6_addr, &pfx->ndpr_mask))
13191 /* no prefix entry in the prefix list */
13193 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13194 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13197 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13198 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13200 /* search installed gateway from prefix entry */
13201 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
13202 pfxrtr->pfr_next) {
13203 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13204 gw6.sin6_family = AF_INET6;
13205 gw6.sin6_len = sizeof(struct sockaddr_in6);
13206 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13207 sizeof(struct in6_addr));
13208 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13209 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13210 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13211 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13212 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13213 ro->ro_rt->rt_gateway)) {
13214 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13218 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13225 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13227 struct sockaddr_in *sin, *mask;
13228 struct ifaddr *ifa;
13229 struct in_addr srcnetaddr, gwnetaddr;
13231 if (ro == NULL || ro->ro_rt == NULL ||
13232 sifa->address.sa.sa_family != AF_INET) {
13235 ifa = (struct ifaddr *)sifa->ifa;
13236 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13237 sin = (struct sockaddr_in *)&sifa->address.sin;
13238 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13239 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13240 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13241 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13243 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13244 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13245 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13246 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13247 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13248 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {