2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <machine/in_cksum.h>
58 #define SCTP_MAX_GAPS_INARRAY 4
60 uint8_t right_edge; /* mergable on the right edge */
61 uint8_t left_edge; /* mergable on the left edge */
64 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
67 struct sack_track sack_array[256] = {
68 {0, 0, 0, 0, /* 0x00 */
75 {1, 0, 1, 0, /* 0x01 */
82 {0, 0, 1, 0, /* 0x02 */
89 {1, 0, 1, 0, /* 0x03 */
96 {0, 0, 1, 0, /* 0x04 */
103 {1, 0, 2, 0, /* 0x05 */
110 {0, 0, 1, 0, /* 0x06 */
117 {1, 0, 1, 0, /* 0x07 */
124 {0, 0, 1, 0, /* 0x08 */
131 {1, 0, 2, 0, /* 0x09 */
138 {0, 0, 2, 0, /* 0x0a */
145 {1, 0, 2, 0, /* 0x0b */
152 {0, 0, 1, 0, /* 0x0c */
159 {1, 0, 2, 0, /* 0x0d */
166 {0, 0, 1, 0, /* 0x0e */
173 {1, 0, 1, 0, /* 0x0f */
180 {0, 0, 1, 0, /* 0x10 */
187 {1, 0, 2, 0, /* 0x11 */
194 {0, 0, 2, 0, /* 0x12 */
201 {1, 0, 2, 0, /* 0x13 */
208 {0, 0, 2, 0, /* 0x14 */
215 {1, 0, 3, 0, /* 0x15 */
222 {0, 0, 2, 0, /* 0x16 */
229 {1, 0, 2, 0, /* 0x17 */
236 {0, 0, 1, 0, /* 0x18 */
243 {1, 0, 2, 0, /* 0x19 */
250 {0, 0, 2, 0, /* 0x1a */
257 {1, 0, 2, 0, /* 0x1b */
264 {0, 0, 1, 0, /* 0x1c */
271 {1, 0, 2, 0, /* 0x1d */
278 {0, 0, 1, 0, /* 0x1e */
285 {1, 0, 1, 0, /* 0x1f */
292 {0, 0, 1, 0, /* 0x20 */
299 {1, 0, 2, 0, /* 0x21 */
306 {0, 0, 2, 0, /* 0x22 */
313 {1, 0, 2, 0, /* 0x23 */
320 {0, 0, 2, 0, /* 0x24 */
327 {1, 0, 3, 0, /* 0x25 */
334 {0, 0, 2, 0, /* 0x26 */
341 {1, 0, 2, 0, /* 0x27 */
348 {0, 0, 2, 0, /* 0x28 */
355 {1, 0, 3, 0, /* 0x29 */
362 {0, 0, 3, 0, /* 0x2a */
369 {1, 0, 3, 0, /* 0x2b */
376 {0, 0, 2, 0, /* 0x2c */
383 {1, 0, 3, 0, /* 0x2d */
390 {0, 0, 2, 0, /* 0x2e */
397 {1, 0, 2, 0, /* 0x2f */
404 {0, 0, 1, 0, /* 0x30 */
411 {1, 0, 2, 0, /* 0x31 */
418 {0, 0, 2, 0, /* 0x32 */
425 {1, 0, 2, 0, /* 0x33 */
432 {0, 0, 2, 0, /* 0x34 */
439 {1, 0, 3, 0, /* 0x35 */
446 {0, 0, 2, 0, /* 0x36 */
453 {1, 0, 2, 0, /* 0x37 */
460 {0, 0, 1, 0, /* 0x38 */
467 {1, 0, 2, 0, /* 0x39 */
474 {0, 0, 2, 0, /* 0x3a */
481 {1, 0, 2, 0, /* 0x3b */
488 {0, 0, 1, 0, /* 0x3c */
495 {1, 0, 2, 0, /* 0x3d */
502 {0, 0, 1, 0, /* 0x3e */
509 {1, 0, 1, 0, /* 0x3f */
516 {0, 0, 1, 0, /* 0x40 */
523 {1, 0, 2, 0, /* 0x41 */
530 {0, 0, 2, 0, /* 0x42 */
537 {1, 0, 2, 0, /* 0x43 */
544 {0, 0, 2, 0, /* 0x44 */
551 {1, 0, 3, 0, /* 0x45 */
558 {0, 0, 2, 0, /* 0x46 */
565 {1, 0, 2, 0, /* 0x47 */
572 {0, 0, 2, 0, /* 0x48 */
579 {1, 0, 3, 0, /* 0x49 */
586 {0, 0, 3, 0, /* 0x4a */
593 {1, 0, 3, 0, /* 0x4b */
600 {0, 0, 2, 0, /* 0x4c */
607 {1, 0, 3, 0, /* 0x4d */
614 {0, 0, 2, 0, /* 0x4e */
621 {1, 0, 2, 0, /* 0x4f */
628 {0, 0, 2, 0, /* 0x50 */
635 {1, 0, 3, 0, /* 0x51 */
642 {0, 0, 3, 0, /* 0x52 */
649 {1, 0, 3, 0, /* 0x53 */
656 {0, 0, 3, 0, /* 0x54 */
663 {1, 0, 4, 0, /* 0x55 */
670 {0, 0, 3, 0, /* 0x56 */
677 {1, 0, 3, 0, /* 0x57 */
684 {0, 0, 2, 0, /* 0x58 */
691 {1, 0, 3, 0, /* 0x59 */
698 {0, 0, 3, 0, /* 0x5a */
705 {1, 0, 3, 0, /* 0x5b */
712 {0, 0, 2, 0, /* 0x5c */
719 {1, 0, 3, 0, /* 0x5d */
726 {0, 0, 2, 0, /* 0x5e */
733 {1, 0, 2, 0, /* 0x5f */
740 {0, 0, 1, 0, /* 0x60 */
747 {1, 0, 2, 0, /* 0x61 */
754 {0, 0, 2, 0, /* 0x62 */
761 {1, 0, 2, 0, /* 0x63 */
768 {0, 0, 2, 0, /* 0x64 */
775 {1, 0, 3, 0, /* 0x65 */
782 {0, 0, 2, 0, /* 0x66 */
789 {1, 0, 2, 0, /* 0x67 */
796 {0, 0, 2, 0, /* 0x68 */
803 {1, 0, 3, 0, /* 0x69 */
810 {0, 0, 3, 0, /* 0x6a */
817 {1, 0, 3, 0, /* 0x6b */
824 {0, 0, 2, 0, /* 0x6c */
831 {1, 0, 3, 0, /* 0x6d */
838 {0, 0, 2, 0, /* 0x6e */
845 {1, 0, 2, 0, /* 0x6f */
852 {0, 0, 1, 0, /* 0x70 */
859 {1, 0, 2, 0, /* 0x71 */
866 {0, 0, 2, 0, /* 0x72 */
873 {1, 0, 2, 0, /* 0x73 */
880 {0, 0, 2, 0, /* 0x74 */
887 {1, 0, 3, 0, /* 0x75 */
894 {0, 0, 2, 0, /* 0x76 */
901 {1, 0, 2, 0, /* 0x77 */
908 {0, 0, 1, 0, /* 0x78 */
915 {1, 0, 2, 0, /* 0x79 */
922 {0, 0, 2, 0, /* 0x7a */
929 {1, 0, 2, 0, /* 0x7b */
936 {0, 0, 1, 0, /* 0x7c */
943 {1, 0, 2, 0, /* 0x7d */
950 {0, 0, 1, 0, /* 0x7e */
957 {1, 0, 1, 0, /* 0x7f */
964 {0, 1, 1, 0, /* 0x80 */
971 {1, 1, 2, 0, /* 0x81 */
978 {0, 1, 2, 0, /* 0x82 */
985 {1, 1, 2, 0, /* 0x83 */
992 {0, 1, 2, 0, /* 0x84 */
999 {1, 1, 3, 0, /* 0x85 */
1006 {0, 1, 2, 0, /* 0x86 */
1013 {1, 1, 2, 0, /* 0x87 */
1020 {0, 1, 2, 0, /* 0x88 */
1027 {1, 1, 3, 0, /* 0x89 */
1034 {0, 1, 3, 0, /* 0x8a */
1041 {1, 1, 3, 0, /* 0x8b */
1048 {0, 1, 2, 0, /* 0x8c */
1055 {1, 1, 3, 0, /* 0x8d */
1062 {0, 1, 2, 0, /* 0x8e */
1069 {1, 1, 2, 0, /* 0x8f */
1076 {0, 1, 2, 0, /* 0x90 */
1083 {1, 1, 3, 0, /* 0x91 */
1090 {0, 1, 3, 0, /* 0x92 */
1097 {1, 1, 3, 0, /* 0x93 */
1104 {0, 1, 3, 0, /* 0x94 */
1111 {1, 1, 4, 0, /* 0x95 */
1118 {0, 1, 3, 0, /* 0x96 */
1125 {1, 1, 3, 0, /* 0x97 */
1132 {0, 1, 2, 0, /* 0x98 */
1139 {1, 1, 3, 0, /* 0x99 */
1146 {0, 1, 3, 0, /* 0x9a */
1153 {1, 1, 3, 0, /* 0x9b */
1160 {0, 1, 2, 0, /* 0x9c */
1167 {1, 1, 3, 0, /* 0x9d */
1174 {0, 1, 2, 0, /* 0x9e */
1181 {1, 1, 2, 0, /* 0x9f */
1188 {0, 1, 2, 0, /* 0xa0 */
1195 {1, 1, 3, 0, /* 0xa1 */
1202 {0, 1, 3, 0, /* 0xa2 */
1209 {1, 1, 3, 0, /* 0xa3 */
1216 {0, 1, 3, 0, /* 0xa4 */
1223 {1, 1, 4, 0, /* 0xa5 */
1230 {0, 1, 3, 0, /* 0xa6 */
1237 {1, 1, 3, 0, /* 0xa7 */
1244 {0, 1, 3, 0, /* 0xa8 */
1251 {1, 1, 4, 0, /* 0xa9 */
1258 {0, 1, 4, 0, /* 0xaa */
1265 {1, 1, 4, 0, /* 0xab */
1272 {0, 1, 3, 0, /* 0xac */
1279 {1, 1, 4, 0, /* 0xad */
1286 {0, 1, 3, 0, /* 0xae */
1293 {1, 1, 3, 0, /* 0xaf */
1300 {0, 1, 2, 0, /* 0xb0 */
1307 {1, 1, 3, 0, /* 0xb1 */
1314 {0, 1, 3, 0, /* 0xb2 */
1321 {1, 1, 3, 0, /* 0xb3 */
1328 {0, 1, 3, 0, /* 0xb4 */
1335 {1, 1, 4, 0, /* 0xb5 */
1342 {0, 1, 3, 0, /* 0xb6 */
1349 {1, 1, 3, 0, /* 0xb7 */
1356 {0, 1, 2, 0, /* 0xb8 */
1363 {1, 1, 3, 0, /* 0xb9 */
1370 {0, 1, 3, 0, /* 0xba */
1377 {1, 1, 3, 0, /* 0xbb */
1384 {0, 1, 2, 0, /* 0xbc */
1391 {1, 1, 3, 0, /* 0xbd */
1398 {0, 1, 2, 0, /* 0xbe */
1405 {1, 1, 2, 0, /* 0xbf */
1412 {0, 1, 1, 0, /* 0xc0 */
1419 {1, 1, 2, 0, /* 0xc1 */
1426 {0, 1, 2, 0, /* 0xc2 */
1433 {1, 1, 2, 0, /* 0xc3 */
1440 {0, 1, 2, 0, /* 0xc4 */
1447 {1, 1, 3, 0, /* 0xc5 */
1454 {0, 1, 2, 0, /* 0xc6 */
1461 {1, 1, 2, 0, /* 0xc7 */
1468 {0, 1, 2, 0, /* 0xc8 */
1475 {1, 1, 3, 0, /* 0xc9 */
1482 {0, 1, 3, 0, /* 0xca */
1489 {1, 1, 3, 0, /* 0xcb */
1496 {0, 1, 2, 0, /* 0xcc */
1503 {1, 1, 3, 0, /* 0xcd */
1510 {0, 1, 2, 0, /* 0xce */
1517 {1, 1, 2, 0, /* 0xcf */
1524 {0, 1, 2, 0, /* 0xd0 */
1531 {1, 1, 3, 0, /* 0xd1 */
1538 {0, 1, 3, 0, /* 0xd2 */
1545 {1, 1, 3, 0, /* 0xd3 */
1552 {0, 1, 3, 0, /* 0xd4 */
1559 {1, 1, 4, 0, /* 0xd5 */
1566 {0, 1, 3, 0, /* 0xd6 */
1573 {1, 1, 3, 0, /* 0xd7 */
1580 {0, 1, 2, 0, /* 0xd8 */
1587 {1, 1, 3, 0, /* 0xd9 */
1594 {0, 1, 3, 0, /* 0xda */
1601 {1, 1, 3, 0, /* 0xdb */
1608 {0, 1, 2, 0, /* 0xdc */
1615 {1, 1, 3, 0, /* 0xdd */
1622 {0, 1, 2, 0, /* 0xde */
1629 {1, 1, 2, 0, /* 0xdf */
1636 {0, 1, 1, 0, /* 0xe0 */
1643 {1, 1, 2, 0, /* 0xe1 */
1650 {0, 1, 2, 0, /* 0xe2 */
1657 {1, 1, 2, 0, /* 0xe3 */
1664 {0, 1, 2, 0, /* 0xe4 */
1671 {1, 1, 3, 0, /* 0xe5 */
1678 {0, 1, 2, 0, /* 0xe6 */
1685 {1, 1, 2, 0, /* 0xe7 */
1692 {0, 1, 2, 0, /* 0xe8 */
1699 {1, 1, 3, 0, /* 0xe9 */
1706 {0, 1, 3, 0, /* 0xea */
1713 {1, 1, 3, 0, /* 0xeb */
1720 {0, 1, 2, 0, /* 0xec */
1727 {1, 1, 3, 0, /* 0xed */
1734 {0, 1, 2, 0, /* 0xee */
1741 {1, 1, 2, 0, /* 0xef */
1748 {0, 1, 1, 0, /* 0xf0 */
1755 {1, 1, 2, 0, /* 0xf1 */
1762 {0, 1, 2, 0, /* 0xf2 */
1769 {1, 1, 2, 0, /* 0xf3 */
1776 {0, 1, 2, 0, /* 0xf4 */
1783 {1, 1, 3, 0, /* 0xf5 */
1790 {0, 1, 2, 0, /* 0xf6 */
1797 {1, 1, 2, 0, /* 0xf7 */
1804 {0, 1, 1, 0, /* 0xf8 */
1811 {1, 1, 2, 0, /* 0xf9 */
1818 {0, 1, 2, 0, /* 0xfa */
1825 {1, 1, 2, 0, /* 0xfb */
1832 {0, 1, 1, 0, /* 0xfc */
1839 {1, 1, 2, 0, /* 0xfd */
1846 {0, 1, 1, 0, /* 0xfe */
1853 {1, 1, 1, 0, /* 0xff */
1864 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1865 int ipv4_addr_legal,
1866 int ipv6_addr_legal,
1868 int ipv4_local_scope,
1873 if ((loopback_scope == 0) &&
1874 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1876 * skip loopback if not in scope *
1880 switch (ifa->address.sa.sa_family) {
1882 if (ipv4_addr_legal) {
1883 struct sockaddr_in *sin;
1885 sin = (struct sockaddr_in *)&ifa->address.sin;
1886 if (sin->sin_addr.s_addr == 0) {
1887 /* not in scope , unspecified */
1890 if ((ipv4_local_scope == 0) &&
1891 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1892 /* private address not in scope */
1901 if (ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1909 sctp_gather_internal_ifa_flags(ifa);
1911 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1914 /* ok to use deprecated addresses? */
1915 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1924 if ((site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1939 static struct mbuf *
1940 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1942 struct sctp_paramhdr *parmh;
1946 if (ifa->address.sa.sa_family == AF_INET) {
1947 len = sizeof(struct sctp_ipv4addr_param);
1948 } else if (ifa->address.sa.sa_family == AF_INET6) {
1949 len = sizeof(struct sctp_ipv6addr_param);
1954 if (M_TRAILINGSPACE(m) >= len) {
1955 /* easy side we just drop it on the end */
1956 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1959 /* Need more space */
1961 while (SCTP_BUF_NEXT(mret) != NULL) {
1962 mret = SCTP_BUF_NEXT(mret);
1964 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1965 if (SCTP_BUF_NEXT(mret) == NULL) {
1966 /* We are hosed, can't add more addresses */
1969 mret = SCTP_BUF_NEXT(mret);
1970 parmh = mtod(mret, struct sctp_paramhdr *);
1972 /* now add the parameter */
1973 switch (ifa->address.sa.sa_family) {
1976 struct sctp_ipv4addr_param *ipv4p;
1977 struct sockaddr_in *sin;
1979 sin = (struct sockaddr_in *)&ifa->address.sin;
1980 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1981 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1982 parmh->param_length = htons(len);
1983 ipv4p->addr = sin->sin_addr.s_addr;
1984 SCTP_BUF_LEN(mret) += len;
1990 struct sctp_ipv6addr_param *ipv6p;
1991 struct sockaddr_in6 *sin6;
1993 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1994 ipv6p = (struct sctp_ipv6addr_param *)parmh;
1995 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
1996 parmh->param_length = htons(len);
1997 memcpy(ipv6p->addr, &sin6->sin6_addr,
1998 sizeof(ipv6p->addr));
1999 /* clear embedded scope in the address */
2000 in6_clearscope((struct in6_addr *)ipv6p->addr);
2001 SCTP_BUF_LEN(mret) += len;
2013 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
2014 struct mbuf *m_at, int cnt_inits_to)
2016 struct sctp_vrf *vrf = NULL;
2017 int cnt, limit_out = 0, total_count;
2020 vrf_id = inp->def_vrf_id;
2021 SCTP_IPI_ADDR_RLOCK();
2022 vrf = sctp_find_vrf(vrf_id);
2024 SCTP_IPI_ADDR_RUNLOCK();
2027 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2028 struct sctp_ifa *sctp_ifap;
2029 struct sctp_ifn *sctp_ifnp;
2032 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2034 cnt = SCTP_ADDRESS_LIMIT;
2037 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2038 if ((scope->loopback_scope == 0) &&
2039 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2041 * Skip loopback devices if loopback_scope
2046 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2047 if (sctp_is_address_in_scope(sctp_ifap,
2048 scope->ipv4_addr_legal,
2049 scope->ipv6_addr_legal,
2050 scope->loopback_scope,
2051 scope->ipv4_local_scope,
2053 scope->site_scope, 1) == 0) {
2057 if (cnt > SCTP_ADDRESS_LIMIT) {
2061 if (cnt > SCTP_ADDRESS_LIMIT) {
2068 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2070 if ((scope->loopback_scope == 0) &&
2071 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2073 * Skip loopback devices if
2074 * loopback_scope not set
2078 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2079 if (sctp_is_address_in_scope(sctp_ifap,
2080 scope->ipv4_addr_legal,
2081 scope->ipv6_addr_legal,
2082 scope->loopback_scope,
2083 scope->ipv4_local_scope,
2085 scope->site_scope, 0) == 0) {
2088 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2099 if (total_count > SCTP_ADDRESS_LIMIT) {
2100 /* No more addresses */
2108 struct sctp_laddr *laddr;
2111 /* First, how many ? */
2112 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2113 if (laddr->ifa == NULL) {
2116 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2118 * Address being deleted by the system, dont
2122 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2124 * Address being deleted on this ep don't
2129 if (sctp_is_address_in_scope(laddr->ifa,
2130 scope->ipv4_addr_legal,
2131 scope->ipv6_addr_legal,
2132 scope->loopback_scope,
2133 scope->ipv4_local_scope,
2135 scope->site_scope, 1) == 0) {
2140 if (cnt > SCTP_ADDRESS_LIMIT) {
2144 * To get through a NAT we only list addresses if we have
2145 * more than one. That way if you just bind a single address
2146 * we let the source of the init dictate our address.
2149 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2151 if (laddr->ifa == NULL) {
2154 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2157 if (sctp_is_address_in_scope(laddr->ifa,
2158 scope->ipv4_addr_legal,
2159 scope->ipv6_addr_legal,
2160 scope->loopback_scope,
2161 scope->ipv4_local_scope,
2163 scope->site_scope, 0) == 0) {
2166 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2168 if (cnt >= SCTP_ADDRESS_LIMIT) {
2174 SCTP_IPI_ADDR_RUNLOCK();
2178 static struct sctp_ifa *
2179 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2180 uint8_t dest_is_loop,
2181 uint8_t dest_is_priv,
2184 uint8_t dest_is_global = 0;
2186 /* dest_is_priv is true if destination is a private address */
2187 /* dest_is_loop is true if destination is a loopback addresses */
2190 * Here we determine if its a preferred address. A preferred address
2191 * means it is the same scope or higher scope then the destination.
2192 * L = loopback, P = private, G = global
2193 * ----------------------------------------- src | dest | result
2194 * ---------------------------------------- L | L | yes
2195 * ----------------------------------------- P | L |
2196 * yes-v4 no-v6 ----------------------------------------- G |
2197 * L | yes-v4 no-v6 ----------------------------------------- L
2198 * | P | no ----------------------------------------- P |
2199 * P | yes ----------------------------------------- G |
2200 * P | no ----------------------------------------- L | G
2201 * | no ----------------------------------------- P | G |
2202 * no ----------------------------------------- G | G |
2203 * yes -----------------------------------------
2206 if (ifa->address.sa.sa_family != fam) {
2207 /* forget mis-matched family */
2210 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2213 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2214 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2215 /* Ok the address may be ok */
2216 if (fam == AF_INET6) {
2217 /* ok to use deprecated addresses? no lets not! */
2218 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2219 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2222 if (ifa->src_is_priv && !ifa->src_is_loop) {
2224 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2228 if (ifa->src_is_glob) {
2230 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2236 * Now that we know what is what, implement or table this could in
2237 * theory be done slicker (it used to be), but this is
2238 * straightforward and easier to validate :-)
2240 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2241 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2242 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2243 dest_is_loop, dest_is_priv, dest_is_global);
2245 if ((ifa->src_is_loop) && (dest_is_priv)) {
2246 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2249 if ((ifa->src_is_glob) && (dest_is_priv)) {
2250 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2253 if ((ifa->src_is_loop) && (dest_is_global)) {
2254 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2257 if ((ifa->src_is_priv) && (dest_is_global)) {
2258 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2261 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2262 /* its a preferred address */
2266 static struct sctp_ifa *
2267 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2268 uint8_t dest_is_loop,
2269 uint8_t dest_is_priv,
2272 uint8_t dest_is_global = 0;
2276 * Here we determine if its a acceptable address. A acceptable
2277 * address means it is the same scope or higher scope but we can
2278 * allow for NAT which means its ok to have a global dest and a
2281 * L = loopback, P = private, G = global
2282 * ----------------------------------------- src | dest | result
2283 * ----------------------------------------- L | L | yes
2284 * ----------------------------------------- P | L |
2285 * yes-v4 no-v6 ----------------------------------------- G |
2286 * L | yes ----------------------------------------- L |
2287 * P | no ----------------------------------------- P | P
2288 * | yes ----------------------------------------- G | P
2289 * | yes - May not work -----------------------------------------
2290 * L | G | no ----------------------------------------- P
2291 * | G | yes - May not work
2292 * ----------------------------------------- G | G | yes
2293 * -----------------------------------------
2296 if (ifa->address.sa.sa_family != fam) {
2297 /* forget non matching family */
2300 /* Ok the address may be ok */
2301 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2304 if (fam == AF_INET6) {
2305 /* ok to use deprecated addresses? */
2306 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2309 if (ifa->src_is_priv) {
2310 /* Special case, linklocal to loop */
2316 * Now that we know what is what, implement our table. This could in
2317 * theory be done slicker (it used to be), but this is
2318 * straightforward and easier to validate :-)
2320 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2323 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2326 /* its an acceptable address */
2331 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2333 struct sctp_laddr *laddr;
2336 /* There are no restrictions, no TCB :-) */
2339 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2340 if (laddr->ifa == NULL) {
2341 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2345 if (laddr->ifa == ifa) {
2346 /* Yes it is on the list */
2355 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2357 struct sctp_laddr *laddr;
2361 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2362 if (laddr->ifa == NULL) {
2363 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2367 if ((laddr->ifa == ifa) && laddr->action == 0)
2376 static struct sctp_ifa *
2377 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2380 int non_asoc_addr_ok,
2381 uint8_t dest_is_priv,
2382 uint8_t dest_is_loop,
2385 struct sctp_laddr *laddr, *starting_point;
2388 struct sctp_ifn *sctp_ifn;
2389 struct sctp_ifa *sctp_ifa, *sifa;
2390 struct sctp_vrf *vrf;
2393 vrf = sctp_find_vrf(vrf_id);
2397 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2398 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2399 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2401 * first question, is the ifn we will emit on in our list, if so, we
2402 * want such an address. Note that we first looked for a preferred
2406 /* is a preferred one on the interface we route out? */
2407 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2408 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2409 (non_asoc_addr_ok == 0))
2411 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2416 if (sctp_is_addr_in_ep(inp, sifa)) {
2417 atomic_add_int(&sifa->refcount, 1);
2423 * ok, now we now need to find one on the list of the addresses. We
2424 * can't get one on the emitting interface so let's find first a
2425 * preferred one. If not that an acceptable one otherwise... we
2428 starting_point = inp->next_addr_touse;
2430 if (inp->next_addr_touse == NULL) {
2431 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2434 for (laddr = inp->next_addr_touse; laddr;
2435 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2436 if (laddr->ifa == NULL) {
2437 /* address has been removed */
2440 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2441 /* address is being deleted */
2444 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2448 atomic_add_int(&sifa->refcount, 1);
2451 if (resettotop == 0) {
2452 inp->next_addr_touse = NULL;
2455 inp->next_addr_touse = starting_point;
2458 if (inp->next_addr_touse == NULL) {
2459 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2462 /* ok, what about an acceptable address in the inp */
2463 for (laddr = inp->next_addr_touse; laddr;
2464 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2465 if (laddr->ifa == NULL) {
2466 /* address has been removed */
2469 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2470 /* address is being deleted */
2473 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2477 atomic_add_int(&sifa->refcount, 1);
2480 if (resettotop == 0) {
2481 inp->next_addr_touse = NULL;
2482 goto once_again_too;
2485 * no address bound can be a source for the destination we are in
2493 static struct sctp_ifa *
2494 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2495 struct sctp_tcb *stcb,
2496 struct sctp_nets *net,
2499 uint8_t dest_is_priv,
2500 uint8_t dest_is_loop,
2501 int non_asoc_addr_ok,
2504 struct sctp_laddr *laddr, *starting_point;
2506 struct sctp_ifn *sctp_ifn;
2507 struct sctp_ifa *sctp_ifa, *sifa;
2508 uint8_t start_at_beginning = 0;
2509 struct sctp_vrf *vrf;
2513 * first question, is the ifn we will emit on in our list, if so, we
2516 vrf = sctp_find_vrf(vrf_id);
2520 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2521 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2522 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2525 * first question, is the ifn we will emit on in our list? If so,
2526 * we want that one. First we look for a preferred. Second, we go
2527 * for an acceptable.
2530 /* first try for a preferred address on the ep */
2531 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2532 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2534 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2535 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2538 if (((non_asoc_addr_ok == 0) &&
2539 (sctp_is_addr_restricted(stcb, sifa))) ||
2540 (non_asoc_addr_ok &&
2541 (sctp_is_addr_restricted(stcb, sifa)) &&
2542 (!sctp_is_addr_pending(stcb, sifa)))) {
2543 /* on the no-no list */
2546 atomic_add_int(&sifa->refcount, 1);
2550 /* next try for an acceptable address on the ep */
2551 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2552 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2554 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2555 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2558 if (((non_asoc_addr_ok == 0) &&
2559 (sctp_is_addr_restricted(stcb, sifa))) ||
2560 (non_asoc_addr_ok &&
2561 (sctp_is_addr_restricted(stcb, sifa)) &&
2562 (!sctp_is_addr_pending(stcb, sifa)))) {
2563 /* on the no-no list */
2566 atomic_add_int(&sifa->refcount, 1);
2573 * if we can't find one like that then we must look at all addresses
2574 * bound to pick one at first preferable then secondly acceptable.
2576 starting_point = stcb->asoc.last_used_address;
2578 if (stcb->asoc.last_used_address == NULL) {
2579 start_at_beginning = 1;
2580 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2582 /* search beginning with the last used address */
2583 for (laddr = stcb->asoc.last_used_address; laddr;
2584 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2585 if (laddr->ifa == NULL) {
2586 /* address has been removed */
2589 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2590 /* address is being deleted */
2593 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2596 if (((non_asoc_addr_ok == 0) &&
2597 (sctp_is_addr_restricted(stcb, sifa))) ||
2598 (non_asoc_addr_ok &&
2599 (sctp_is_addr_restricted(stcb, sifa)) &&
2600 (!sctp_is_addr_pending(stcb, sifa)))) {
2601 /* on the no-no list */
2604 stcb->asoc.last_used_address = laddr;
2605 atomic_add_int(&sifa->refcount, 1);
2608 if (start_at_beginning == 0) {
2609 stcb->asoc.last_used_address = NULL;
2610 goto sctp_from_the_top;
2612 /* now try for any higher scope than the destination */
2613 stcb->asoc.last_used_address = starting_point;
2614 start_at_beginning = 0;
2616 if (stcb->asoc.last_used_address == NULL) {
2617 start_at_beginning = 1;
2618 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2620 /* search beginning with the last used address */
2621 for (laddr = stcb->asoc.last_used_address; laddr;
2622 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2623 if (laddr->ifa == NULL) {
2624 /* address has been removed */
2627 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2628 /* address is being deleted */
2631 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2635 if (((non_asoc_addr_ok == 0) &&
2636 (sctp_is_addr_restricted(stcb, sifa))) ||
2637 (non_asoc_addr_ok &&
2638 (sctp_is_addr_restricted(stcb, sifa)) &&
2639 (!sctp_is_addr_pending(stcb, sifa)))) {
2640 /* on the no-no list */
2643 stcb->asoc.last_used_address = laddr;
2644 atomic_add_int(&sifa->refcount, 1);
2647 if (start_at_beginning == 0) {
2648 stcb->asoc.last_used_address = NULL;
2649 goto sctp_from_the_top2;
2654 static struct sctp_ifa *
2655 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2656 struct sctp_tcb *stcb,
2657 int non_asoc_addr_ok,
2658 uint8_t dest_is_loop,
2659 uint8_t dest_is_priv,
2665 struct sctp_ifa *ifa, *sifa;
2666 int num_eligible_addr = 0;
2669 struct sockaddr_in6 sin6, lsa6;
2671 if (fam == AF_INET6) {
2672 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2673 (void)sa6_recoverscope(&sin6);
2676 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2677 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2678 (non_asoc_addr_ok == 0))
2680 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2685 if (fam == AF_INET6 &&
2687 sifa->src_is_loop && sifa->src_is_priv) {
2689 * don't allow fe80::1 to be a src on loop ::1, we
2690 * don't list it to the peer so we will get an
2695 if (fam == AF_INET6 &&
2696 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2697 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2699 * link-local <-> link-local must belong to the same
2702 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2703 (void)sa6_recoverscope(&lsa6);
2704 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2711 * Check if the IPv6 address matches to next-hop. In the
2712 * mobile case, old IPv6 address may be not deleted from the
2713 * interface. Then, the interface has previous and new
2714 * addresses. We should use one corresponding to the
2715 * next-hop. (by micchie)
2718 if (stcb && fam == AF_INET6 &&
2719 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2720 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2726 /* Avoid topologically incorrect IPv4 address */
2727 if (stcb && fam == AF_INET &&
2728 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2729 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2734 if (((non_asoc_addr_ok == 0) &&
2735 (sctp_is_addr_restricted(stcb, sifa))) ||
2736 (non_asoc_addr_ok &&
2737 (sctp_is_addr_restricted(stcb, sifa)) &&
2738 (!sctp_is_addr_pending(stcb, sifa)))) {
2740 * It is restricted for some reason..
2741 * probably not yet added.
2746 if (num_eligible_addr >= addr_wanted) {
2749 num_eligible_addr++;
2756 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2757 struct sctp_tcb *stcb,
2758 int non_asoc_addr_ok,
2759 uint8_t dest_is_loop,
2760 uint8_t dest_is_priv,
2763 struct sctp_ifa *ifa, *sifa;
2764 int num_eligible_addr = 0;
2766 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2767 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2768 (non_asoc_addr_ok == 0)) {
2771 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2777 if (((non_asoc_addr_ok == 0) &&
2778 (sctp_is_addr_restricted(stcb, sifa))) ||
2779 (non_asoc_addr_ok &&
2780 (sctp_is_addr_restricted(stcb, sifa)) &&
2781 (!sctp_is_addr_pending(stcb, sifa)))) {
2783 * It is restricted for some reason..
2784 * probably not yet added.
2789 num_eligible_addr++;
2791 return (num_eligible_addr);
2794 static struct sctp_ifa *
2795 sctp_choose_boundall(struct sctp_inpcb *inp,
2796 struct sctp_tcb *stcb,
2797 struct sctp_nets *net,
2800 uint8_t dest_is_priv,
2801 uint8_t dest_is_loop,
2802 int non_asoc_addr_ok,
2805 int cur_addr_num = 0, num_preferred = 0;
2807 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2808 struct sctp_ifa *sctp_ifa, *sifa;
2810 struct sctp_vrf *vrf;
2813 * For boundall we can use any address in the association.
2814 * If non_asoc_addr_ok is set we can use any address (at least in
2815 * theory). So we look for preferred addresses first. If we find one,
2816 * we use it. Otherwise we next try to get an address on the
2817 * interface, which we should be able to do (unless non_asoc_addr_ok
2818 * is false and we are routed out that way). In these cases where we
2819 * can't use the address of the interface we go through all the
2820 * ifn's looking for an address we can use and fill that in. Punting
2821 * means we send back address 0, which will probably cause problems
2822 * actually since then IP will fill in the address of the route ifn,
2823 * which means we probably already rejected it.. i.e. here comes an
2826 vrf = sctp_find_vrf(vrf_id);
2830 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2831 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2832 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2833 if (sctp_ifn == NULL) {
2834 /* ?? We don't have this guy ?? */
2835 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2836 goto bound_all_plan_b;
2838 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2839 ifn_index, sctp_ifn->ifn_name);
2842 cur_addr_num = net->indx_of_eligible_next_to_use;
2844 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2849 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2850 num_preferred, sctp_ifn->ifn_name);
2851 if (num_preferred == 0) {
2853 * no eligible addresses, we must use some other interface
2854 * address if we can find one.
2856 goto bound_all_plan_b;
2859 * Ok we have num_eligible_addr set with how many we can use, this
2860 * may vary from call to call due to addresses being deprecated
2863 if (cur_addr_num >= num_preferred) {
2867 * select the nth address from the list (where cur_addr_num is the
2868 * nth) and 0 is the first one, 1 is the second one etc...
2870 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2872 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2873 dest_is_priv, cur_addr_num, fam, ro);
2875 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2877 atomic_add_int(&sctp_ifa->refcount, 1);
2879 /* save off where the next one we will want */
2880 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2885 * plan_b: Look at all interfaces and find a preferred address. If
2886 * no preferred fall through to plan_c.
2889 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2890 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2891 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2892 sctp_ifn->ifn_name);
2893 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2894 /* wrong base scope */
2895 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2898 if ((sctp_ifn == looked_at) && looked_at) {
2899 /* already looked at this guy */
2900 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2903 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2904 dest_is_loop, dest_is_priv, fam);
2905 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2906 "Found ifn:%p %d preferred source addresses\n",
2907 ifn, num_preferred);
2908 if (num_preferred == 0) {
2909 /* None on this interface. */
2910 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2913 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2914 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2915 num_preferred, sctp_ifn, cur_addr_num);
2918 * Ok we have num_eligible_addr set with how many we can
2919 * use, this may vary from call to call due to addresses
2920 * being deprecated etc..
2922 if (cur_addr_num >= num_preferred) {
2925 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2926 dest_is_priv, cur_addr_num, fam, ro);
2930 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2931 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
2933 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
2934 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
2935 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
2936 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
2938 atomic_add_int(&sifa->refcount, 1);
2943 /* plan_c: do we have an acceptable address on the emit interface */
2944 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
2945 if (emit_ifn == NULL) {
2948 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
2949 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2950 (non_asoc_addr_ok == 0))
2952 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
2957 if (((non_asoc_addr_ok == 0) &&
2958 (sctp_is_addr_restricted(stcb, sifa))) ||
2959 (non_asoc_addr_ok &&
2960 (sctp_is_addr_restricted(stcb, sifa)) &&
2961 (!sctp_is_addr_pending(stcb, sifa)))) {
2963 * It is restricted for some reason..
2964 * probably not yet added.
2969 atomic_add_int(&sifa->refcount, 1);
2974 * plan_d: We are in trouble. No preferred address on the emit
2975 * interface. And not even a preferred address on all interfaces. Go
2976 * out and see if we can find an acceptable address somewhere
2977 * amongst all interfaces.
2979 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n");
2980 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2981 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2982 /* wrong base scope */
2985 if ((sctp_ifn == looked_at) && looked_at)
2986 /* already looked at this guy */
2989 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2990 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2991 (non_asoc_addr_ok == 0))
2993 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
2999 if (((non_asoc_addr_ok == 0) &&
3000 (sctp_is_addr_restricted(stcb, sifa))) ||
3001 (non_asoc_addr_ok &&
3002 (sctp_is_addr_restricted(stcb, sifa)) &&
3003 (!sctp_is_addr_pending(stcb, sifa)))) {
3005 * It is restricted for some
3006 * reason.. probably not yet added.
3011 atomic_add_int(&sifa->refcount, 1);
3016 * Ok we can find NO address to source from that is not on our
3017 * restricted list and non_asoc_address is NOT ok, or it is on our
3018 * restricted list. We can't source to it :-(
3025 /* tcb may be NULL */
3027 sctp_source_address_selection(struct sctp_inpcb *inp,
3028 struct sctp_tcb *stcb,
3030 struct sctp_nets *net,
3031 int non_asoc_addr_ok, uint32_t vrf_id)
3033 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3036 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3039 struct sctp_ifa *answer;
3040 uint8_t dest_is_priv, dest_is_loop;
3044 * Rules: - Find the route if needed, cache if I can. - Look at
3045 * interface address in route, Is it in the bound list. If so we
3046 * have the best source. - If not we must rotate amongst the
3051 * Do we need to pay attention to scope. We can have a private address
3052 * or a global address we are sourcing or sending to. So if we draw
3054 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3056 *------------------------------------------
3057 * source * dest * result
3058 * -----------------------------------------
3059 * <a> Private * Global * NAT
3060 * -----------------------------------------
3061 * <b> Private * Private * No problem
3062 * -----------------------------------------
3063 * <c> Global * Private * Huh, How will this work?
3064 * -----------------------------------------
3065 * <d> Global * Global * No Problem
3066 *------------------------------------------
3067 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3069 *------------------------------------------
3070 * source * dest * result
3071 * -----------------------------------------
3072 * <a> Linklocal * Global *
3073 * -----------------------------------------
3074 * <b> Linklocal * Linklocal * No problem
3075 * -----------------------------------------
3076 * <c> Global * Linklocal * Huh, How will this work?
3077 * -----------------------------------------
3078 * <d> Global * Global * No Problem
3079 *------------------------------------------
3080 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3082 * And then we add to that what happens if there are multiple addresses
3083 * assigned to an interface. Remember the ifa on a ifn is a linked
3084 * list of addresses. So one interface can have more than one IP
3085 * address. What happens if we have both a private and a global
3086 * address? Do we then use context of destination to sort out which
3087 * one is best? And what about NAT's sending P->G may get you a NAT
3088 * translation, or should you select the G thats on the interface in
3093 * - count the number of addresses on the interface.
3094 * - if it is one, no problem except case <c>.
3095 * For <a> we will assume a NAT out there.
3096 * - if there are more than one, then we need to worry about scope P
3097 * or G. We should prefer G -> G and P -> P if possible.
3098 * Then as a secondary fall back to mixed types G->P being a last
3100 * - The above all works for bound all, but bound specific we need to
3101 * use the same concept but instead only consider the bound
3102 * addresses. If the bound set is NOT assigned to the interface then
3103 * we must use rotation amongst the bound addresses..
3105 if (ro->ro_rt == NULL) {
3107 * Need a route to cache.
3109 SCTP_RTALLOC(ro, vrf_id);
3111 if (ro->ro_rt == NULL) {
3114 fam = to->sin_family;
3115 dest_is_priv = dest_is_loop = 0;
3116 /* Setup our scopes for the destination */
3119 /* Scope based on outbound address */
3120 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3123 /* mark it as local */
3124 net->addr_is_local = 1;
3126 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3132 /* Scope based on outbound address */
3133 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3134 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3136 * If the address is a loopback address, which
3137 * consists of "::1" OR "fe80::1%lo0", we are
3138 * loopback scope. But we don't use dest_is_priv
3139 * (link local addresses).
3143 /* mark it as local */
3144 net->addr_is_local = 1;
3146 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3152 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3153 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
3154 SCTP_IPI_ADDR_RLOCK();
3155 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3159 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3160 dest_is_priv, dest_is_loop,
3161 non_asoc_addr_ok, fam);
3162 SCTP_IPI_ADDR_RUNLOCK();
3169 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
3170 vrf_id, dest_is_priv,
3172 non_asoc_addr_ok, fam);
3174 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3179 SCTP_IPI_ADDR_RUNLOCK();
3184 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
3189 tlen = SCTP_BUF_LEN(control);
3192 * Independent of how many mbufs, find the c_type inside the control
3193 * structure and copy out the data.
3196 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3197 /* not enough room for one more we are done. */
3200 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3201 if (((int)cmh.cmsg_len + at) > tlen) {
3203 * this is real messed up since there is not enough
3204 * data here to cover the cmsg header. We are done.
3208 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3209 (c_type == cmh.cmsg_type)) {
3210 /* found the one we want, copy it out */
3211 at += CMSG_ALIGN(sizeof(struct cmsghdr));
3212 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3214 * space of cmsg_len after header not big
3219 m_copydata(control, at, cpsize, data);
3222 at += CMSG_ALIGN(cmh.cmsg_len);
3223 if (cmh.cmsg_len == 0) {
3232 static struct mbuf *
3233 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
3234 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3236 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3237 struct sctp_state_cookie *stc;
3238 struct sctp_paramhdr *ph;
3244 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3245 sizeof(struct sctp_paramhdr)), 0,
3246 M_DONTWAIT, 1, MT_DATA);
3250 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3251 if (copy_init == NULL) {
3255 #ifdef SCTP_MBUF_LOGGING
3256 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3261 if (SCTP_BUF_IS_EXTENDED(mat)) {
3262 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3264 mat = SCTP_BUF_NEXT(mat);
3268 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3270 if (copy_initack == NULL) {
3272 sctp_m_freem(copy_init);
3275 #ifdef SCTP_MBUF_LOGGING
3276 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3281 if (SCTP_BUF_IS_EXTENDED(mat)) {
3282 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3284 mat = SCTP_BUF_NEXT(mat);
3288 /* easy side we just drop it on the end */
3289 ph = mtod(mret, struct sctp_paramhdr *);
3290 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3291 sizeof(struct sctp_paramhdr);
3292 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3293 sizeof(struct sctp_paramhdr));
3294 ph->param_type = htons(SCTP_STATE_COOKIE);
3295 ph->param_length = 0; /* fill in at the end */
3296 /* Fill in the stc cookie data */
3297 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3299 /* tack the INIT and then the INIT-ACK onto the chain */
3302 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3303 cookie_sz += SCTP_BUF_LEN(m_at);
3304 if (SCTP_BUF_NEXT(m_at) == NULL) {
3305 SCTP_BUF_NEXT(m_at) = copy_init;
3310 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3311 cookie_sz += SCTP_BUF_LEN(m_at);
3312 if (SCTP_BUF_NEXT(m_at) == NULL) {
3313 SCTP_BUF_NEXT(m_at) = copy_initack;
3318 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3319 cookie_sz += SCTP_BUF_LEN(m_at);
3320 if (SCTP_BUF_NEXT(m_at) == NULL) {
3324 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3326 /* no space, so free the entire chain */
3330 SCTP_BUF_LEN(sig) = 0;
3331 SCTP_BUF_NEXT(m_at) = sig;
3333 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3334 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3336 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3337 cookie_sz += SCTP_SIGNATURE_SIZE;
3338 ph->param_length = htons(cookie_sz);
3344 sctp_get_ect(struct sctp_tcb *stcb,
3345 struct sctp_tmit_chunk *chk)
3347 uint8_t this_random;
3350 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 0)
3353 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce) == 0)
3354 /* no nonce, always return ECT0 */
3355 return (SCTP_ECT0_BIT);
3357 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
3358 /* Peer does NOT support it, so we send a ECT0 only */
3359 return (SCTP_ECT0_BIT);
3362 return (SCTP_ECT0_BIT);
3364 if ((stcb->asoc.hb_random_idx > 3) ||
3365 ((stcb->asoc.hb_random_idx == 3) &&
3366 (stcb->asoc.hb_ect_randombit > 7))) {
3370 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
3371 memcpy(stcb->asoc.hb_random_values, &rndval,
3372 sizeof(stcb->asoc.hb_random_values));
3373 this_random = stcb->asoc.hb_random_values[0];
3374 stcb->asoc.hb_random_idx = 0;
3375 stcb->asoc.hb_ect_randombit = 0;
3377 if (stcb->asoc.hb_ect_randombit > 7) {
3378 stcb->asoc.hb_ect_randombit = 0;
3379 stcb->asoc.hb_random_idx++;
3380 if (stcb->asoc.hb_random_idx > 3) {
3384 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
3386 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
3388 /* ECN Nonce stuff */
3389 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
3390 stcb->asoc.hb_ect_randombit++;
3391 return (SCTP_ECT1_BIT);
3393 stcb->asoc.hb_ect_randombit++;
3394 return (SCTP_ECT0_BIT);
3399 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3400 struct sctp_tcb *stcb, /* may be NULL */
3401 struct sctp_nets *net,
3402 struct sockaddr *to,
3404 uint32_t auth_offset,
3405 struct sctp_auth_chunk *auth,
3406 uint16_t auth_keyid,
3407 int nofragment_flag,
3409 struct sctp_tmit_chunk *chk,
3416 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3419 union sctp_sockstore *over_addr
3421 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3424 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
3425 * header WITH an SCTPHDR but no IP header, endpoint inp and sa
3426 * structure: - fill in the HMAC digest of any AUTH chunk in the
3427 * packet. - calculate and fill in the SCTP checksum. - prepend an
3428 * IP address header. - if boundall use INADDR_ANY. - if
3429 * boundspecific do source address selection. - set fragmentation
3430 * option for ipV4. - On return from IP output, check/adjust mtu
3431 * size of output interface and smallest_mtu size as well.
3433 /* Will need ifdefs around this */
3436 struct sctphdr *sctphdr;
3440 sctp_route_t *ro = NULL;
3441 struct udphdr *udp = NULL;
3443 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3444 struct socket *so = NULL;
3448 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3449 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3454 vrf_id = stcb->asoc.vrf_id;
3456 vrf_id = inp->def_vrf_id;
3459 /* fill in the HMAC digest for any AUTH chunk in the packet */
3460 if ((auth != NULL) && (stcb != NULL)) {
3461 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3463 if (to->sa_family == AF_INET) {
3464 struct ip *ip = NULL;
3465 sctp_route_t iproute;
3469 len = sizeof(struct ip) + sizeof(struct sctphdr);
3471 len += sizeof(struct udphdr);
3473 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3476 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3479 SCTP_ALIGN_TO_END(newm, len);
3480 SCTP_BUF_LEN(newm) = len;
3481 SCTP_BUF_NEXT(newm) = m;
3483 packet_length = sctp_calculate_len(m);
3484 ip = mtod(m, struct ip *);
3485 ip->ip_v = IPVERSION;
3486 ip->ip_hl = (sizeof(struct ip) >> 2);
3488 tos_value = net->tos_flowlabel & 0x000000ff;
3490 tos_value = inp->ip_inp.inp.inp_ip_tos;
3492 if ((nofragment_flag) && (port == 0)) {
3493 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
3496 ip->ip_off = htons(IP_DF);
3501 /* FreeBSD has a function for ip_id's */
3502 ip->ip_id = ip_newid();
3504 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3505 ip->ip_len = packet_length;
3507 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3509 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
3512 ip->ip_tos = (u_char)(tos_value & 0xfc);
3515 /* no association at all */
3516 ip->ip_tos = (tos_value & 0xfc);
3519 ip->ip_p = IPPROTO_UDP;
3521 ip->ip_p = IPPROTO_SCTP;
3526 memset(&iproute, 0, sizeof(iproute));
3527 memcpy(&ro->ro_dst, to, to->sa_len);
3529 ro = (sctp_route_t *) & net->ro;
3531 /* Now the address selection part */
3532 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3534 /* call the routine to select the src address */
3535 if (net && out_of_asoc_ok == 0) {
3536 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3537 sctp_free_ifa(net->ro._s_addr);
3538 net->ro._s_addr = NULL;
3539 net->src_addr_selected = 0;
3545 if (net->src_addr_selected == 0) {
3546 /* Cache the source address */
3547 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
3550 net->src_addr_selected = 1;
3552 if (net->ro._s_addr == NULL) {
3553 /* No route to host */
3554 net->src_addr_selected = 0;
3557 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
3559 if (over_addr == NULL) {
3560 struct sctp_ifa *_lsrc;
3562 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3566 if (_lsrc == NULL) {
3569 ip->ip_src = _lsrc->address.sin.sin_addr;
3570 sctp_free_ifa(_lsrc);
3572 ip->ip_src = over_addr->sin.sin_addr;
3573 SCTP_RTALLOC(ro, vrf_id);
3577 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
3578 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3579 udp->uh_dport = port;
3580 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
3581 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
3582 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
3584 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
3587 sctphdr->src_port = src_port;
3588 sctphdr->dest_port = dest_port;
3589 sctphdr->v_tag = v_tag;
3590 sctphdr->checksum = 0;
3593 * If source address selection fails and we find no route
3594 * then the ip_output should fail as well with a
3595 * NO_ROUTE_TO_HOST type error. We probably should catch
3596 * that somewhere and abort the association right away
3597 * (assuming this is an INIT being sent).
3599 if ((ro->ro_rt == NULL)) {
3601 * src addr selection failed to find a route (or
3602 * valid source addr), so we can't get there from
3606 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3607 "%s: dropped packet - no valid source addr\n",
3610 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3611 "Destination was ");
3612 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1,
3613 &net->ro._l_addr.sa);
3614 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3615 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3616 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3617 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3619 SCTP_FAILED_THRESHOLD,
3622 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3623 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3625 * JRS 5/14/07 - If a
3627 * unreachable, the PF bit
3628 * is turned off. This
3629 * allows an unambiguous use
3631 * destinations that are
3632 * reachable but potentially
3634 * destination is set to the
3635 * unreachable state, also
3636 * set the destination to
3640 * Add debug message here if
3641 * destination is not in PF
3645 * Stop any running T3
3648 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
3649 net->dest_state &= ~SCTP_ADDR_PF;
3650 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
3656 if (net == stcb->asoc.primary_destination) {
3657 /* need a new primary */
3658 struct sctp_nets *alt;
3660 alt = sctp_find_alternate_net(stcb, net, 0);
3662 if (sctp_set_primary_addr(stcb,
3663 (struct sockaddr *)NULL,
3665 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3666 if (net->ro._s_addr) {
3667 sctp_free_ifa(net->ro._s_addr);
3668 net->ro._s_addr = NULL;
3670 net->src_addr_selected = 0;
3676 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
3678 return (EHOSTUNREACH);
3680 if (ro != &iproute) {
3681 memcpy(&iproute, ro, sizeof(*ro));
3683 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
3684 (uint32_t) (ntohl(ip->ip_src.s_addr)));
3685 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
3686 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
3687 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
3690 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
3691 /* failed to prepend data, give up */
3692 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3696 #ifdef SCTP_PACKET_LOGGING
3697 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
3698 sctp_packet_log(m, packet_length);
3700 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
3702 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
3704 (stcb->asoc.loopback_scope))) {
3705 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
3706 SCTP_STAT_INCR(sctps_sendswcrc);
3708 SCTP_STAT_INCR(sctps_sendnocrc);
3710 SCTP_ENABLE_UDP_CSUM(o_pak);
3712 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
3714 (stcb->asoc.loopback_scope))) {
3715 m->m_pkthdr.csum_flags = CSUM_SCTP;
3716 m->m_pkthdr.csum_data = 0; /* FIXME MT */
3717 SCTP_STAT_INCR(sctps_sendhwcrc);
3719 SCTP_STAT_INCR(sctps_sendnocrc);
3722 /* send it out. table id is taken from stcb */
3723 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3724 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3725 so = SCTP_INP_SO(inp);
3726 SCTP_SOCKET_UNLOCK(so, 0);
3729 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
3730 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3731 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3732 atomic_add_int(&stcb->asoc.refcnt, 1);
3733 SCTP_TCB_UNLOCK(stcb);
3734 SCTP_SOCKET_LOCK(so, 0);
3735 SCTP_TCB_LOCK(stcb);
3736 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3739 SCTP_STAT_INCR(sctps_sendpackets);
3740 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
3742 SCTP_STAT_INCR(sctps_senderrors);
3744 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
3746 /* free tempy routes */
3752 /* PMTU check versus smallest asoc MTU goes here */
3753 if ((ro->ro_rt != NULL) &&
3754 (net->ro._s_addr)) {
3757 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
3759 mtu -= sizeof(struct udphdr);
3761 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
3762 #ifdef SCTP_PRINT_FOR_B_AND_M
3763 SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n", mtu);
3765 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
3768 } else if (ro->ro_rt == NULL) {
3769 /* route was freed */
3770 if (net->ro._s_addr &&
3771 net->src_addr_selected) {
3772 sctp_free_ifa(net->ro._s_addr);
3773 net->ro._s_addr = NULL;
3775 net->src_addr_selected = 0;
3781 else if (to->sa_family == AF_INET6) {
3783 struct ip6_hdr *ip6h;
3784 struct route_in6 ip6route;
3787 uint16_t flowBottom;
3788 u_char tosBottom, tosTop;
3789 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
3791 struct sockaddr_in6 lsa6_storage;
3793 u_short prev_port = 0;
3797 flowlabel = net->tos_flowlabel;
3799 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
3802 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
3804 len += sizeof(struct udphdr);
3806 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3809 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3812 SCTP_ALIGN_TO_END(newm, len);
3813 SCTP_BUF_LEN(newm) = len;
3814 SCTP_BUF_NEXT(newm) = m;
3816 packet_length = sctp_calculate_len(m);
3818 ip6h = mtod(m, struct ip6_hdr *);
3820 * We assume here that inp_flow is in host byte order within
3823 flowBottom = flowlabel & 0x0000ffff;
3824 flowTop = ((flowlabel & 0x000f0000) >> 16);
3825 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
3826 /* protect *sin6 from overwrite */
3827 sin6 = (struct sockaddr_in6 *)to;
3831 /* KAME hack: embed scopeid */
3832 if (sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone)) != 0) {
3833 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3837 memset(&ip6route, 0, sizeof(ip6route));
3838 ro = (sctp_route_t *) & ip6route;
3839 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
3841 ro = (sctp_route_t *) & net->ro;
3844 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3846 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
3849 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3852 /* we could get no asoc if it is a O-O-T-B packet */
3853 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3855 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
3857 ip6h->ip6_nxt = IPPROTO_UDP;
3859 ip6h->ip6_nxt = IPPROTO_SCTP;
3861 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
3862 ip6h->ip6_dst = sin6->sin6_addr;
3865 * Add SRC address selection here: we can only reuse to a
3866 * limited degree the kame src-addr-sel, since we can try
3867 * their selection but it may not be bound.
3869 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
3870 lsa6_tmp.sin6_family = AF_INET6;
3871 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
3873 if (net && out_of_asoc_ok == 0) {
3874 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3875 sctp_free_ifa(net->ro._s_addr);
3876 net->ro._s_addr = NULL;
3877 net->src_addr_selected = 0;
3883 if (net->src_addr_selected == 0) {
3884 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3885 /* KAME hack: embed scopeid */
3886 if (sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone)) != 0) {
3887 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3890 /* Cache the source address */
3891 net->ro._s_addr = sctp_source_address_selection(inp,
3897 (void)sa6_recoverscope(sin6);
3898 net->src_addr_selected = 1;
3900 if (net->ro._s_addr == NULL) {
3901 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
3902 net->src_addr_selected = 0;
3905 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
3907 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
3908 /* KAME hack: embed scopeid */
3909 if (sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone)) != 0) {
3910 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3913 if (over_addr == NULL) {
3914 struct sctp_ifa *_lsrc;
3916 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3920 if (_lsrc == NULL) {
3923 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
3924 sctp_free_ifa(_lsrc);
3926 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
3927 SCTP_RTALLOC(ro, vrf_id);
3929 (void)sa6_recoverscope(sin6);
3931 lsa6->sin6_port = inp->sctp_lport;
3933 if (ro->ro_rt == NULL) {
3935 * src addr selection failed to find a route (or
3936 * valid source addr), so we can't get there from
3942 * XXX: sa6 may not have a valid sin6_scope_id in the
3943 * non-SCOPEDROUTING case.
3945 bzero(&lsa6_storage, sizeof(lsa6_storage));
3946 lsa6_storage.sin6_family = AF_INET6;
3947 lsa6_storage.sin6_len = sizeof(lsa6_storage);
3948 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3949 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
3950 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
3955 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3956 lsa6_storage.sin6_port = inp->sctp_lport;
3957 lsa6 = &lsa6_storage;
3958 ip6h->ip6_src = lsa6->sin6_addr;
3961 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
3962 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3963 udp->uh_dport = port;
3964 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
3966 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
3968 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
3971 sctphdr->src_port = src_port;
3972 sctphdr->dest_port = dest_port;
3973 sctphdr->v_tag = v_tag;
3974 sctphdr->checksum = 0;
3977 * We set the hop limit now since there is a good chance
3978 * that our ro pointer is now filled
3980 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
3981 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
3984 /* Copy to be sure something bad is not happening */
3985 sin6->sin6_addr = ip6h->ip6_dst;
3986 lsa6->sin6_addr = ip6h->ip6_src;
3989 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
3990 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
3991 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
3992 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
3993 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
3995 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3996 /* preserve the port and scope for link local send */
3997 prev_scope = sin6->sin6_scope_id;
3998 prev_port = sin6->sin6_port;
4000 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4001 /* failed to prepend data, give up */
4003 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4006 #ifdef SCTP_PACKET_LOGGING
4007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4008 sctp_packet_log(m, packet_length);
4010 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4012 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4014 (stcb->asoc.loopback_scope))) {
4015 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4016 SCTP_STAT_INCR(sctps_sendswcrc);
4018 SCTP_STAT_INCR(sctps_sendnocrc);
4020 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4021 udp->uh_sum = 0xffff;
4024 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4026 (stcb->asoc.loopback_scope))) {
4027 m->m_pkthdr.csum_flags = CSUM_SCTP;
4028 m->m_pkthdr.csum_data = 0; /* FIXME MT */
4029 SCTP_STAT_INCR(sctps_sendhwcrc);
4031 SCTP_STAT_INCR(sctps_sendnocrc);
4034 /* send it out. table id is taken from stcb */
4035 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4036 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4037 so = SCTP_INP_SO(inp);
4038 SCTP_SOCKET_UNLOCK(so, 0);
4041 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4042 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4043 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4044 atomic_add_int(&stcb->asoc.refcnt, 1);
4045 SCTP_TCB_UNLOCK(stcb);
4046 SCTP_SOCKET_LOCK(so, 0);
4047 SCTP_TCB_LOCK(stcb);
4048 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4052 /* for link local this must be done */
4053 sin6->sin6_scope_id = prev_scope;
4054 sin6->sin6_port = prev_port;
4056 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4057 SCTP_STAT_INCR(sctps_sendpackets);
4058 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4060 SCTP_STAT_INCR(sctps_senderrors);
4063 /* Now if we had a temp route free it */
4068 /* PMTU check versus smallest asoc MTU goes here */
4069 if (ro->ro_rt == NULL) {
4070 /* Route was freed */
4071 if (net->ro._s_addr &&
4072 net->src_addr_selected) {
4073 sctp_free_ifa(net->ro._s_addr);
4074 net->ro._s_addr = NULL;
4076 net->src_addr_selected = 0;
4078 if ((ro->ro_rt != NULL) &&
4079 (net->ro._s_addr)) {
4082 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4084 (stcb->asoc.smallest_mtu > mtu)) {
4085 #ifdef SCTP_PRINT_FOR_B_AND_M
4086 SCTP_PRINTF("sctp_mtu_size_reset called after ip6_output mtu-change:%d\n",
4089 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4092 net->mtu -= sizeof(struct udphdr);
4096 if (ND_IFINFO(ifp)->linkmtu &&
4097 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4098 #ifdef SCTP_PRINT_FOR_B_AND_M
4099 SCTP_PRINTF("sctp_mtu_size_reset called via ifp ND_IFINFO() linkmtu:%d\n",
4100 ND_IFINFO(ifp)->linkmtu);
4102 sctp_mtu_size_reset(inp,
4104 ND_IFINFO(ifp)->linkmtu);
4112 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4113 ((struct sockaddr *)to)->sa_family);
4115 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4122 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4123 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4128 struct mbuf *m, *m_at, *mp_last;
4129 struct sctp_nets *net;
4130 struct sctp_init_chunk *init;
4131 struct sctp_supported_addr_param *sup_addr;
4132 struct sctp_adaptation_layer_indication *ali;
4133 struct sctp_ecn_supported_param *ecn;
4134 struct sctp_prsctp_supported_param *prsctp;
4135 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4136 struct sctp_supported_chunk_types_param *pr_supported;
4137 int cnt_inits_to = 0;
4142 /* INIT's always go to the primary (and usually ONLY address) */
4144 net = stcb->asoc.primary_destination;
4146 net = TAILQ_FIRST(&stcb->asoc.nets);
4151 /* we confirm any address we send an INIT to */
4152 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4153 (void)sctp_set_primary_addr(stcb, NULL, net);
4155 /* we confirm any address we send an INIT to */
4156 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4158 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4160 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4162 * special hook, if we are sending to link local it will not
4163 * show up in our private address count.
4165 struct sockaddr_in6 *sin6l;
4167 sin6l = &net->ro._l_addr.sin6;
4168 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4172 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4173 /* This case should not happen */
4174 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4177 /* start the INIT timer */
4178 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4180 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4182 /* No memory, INIT timer will re-attempt. */
4183 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4186 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4188 * assume peer supports asconf in order to be able to queue local
4189 * address changes while an INIT is in flight and before the assoc
4192 stcb->asoc.peer_supports_asconf = 1;
4193 /* Now lets put the SCTP header in place */
4194 init = mtod(m, struct sctp_init_chunk *);
4195 /* now the chunk header */
4196 init->ch.chunk_type = SCTP_INITIATION;
4197 init->ch.chunk_flags = 0;
4198 /* fill in later from mbuf we build */
4199 init->ch.chunk_length = 0;
4200 /* place in my tag */
4201 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4202 /* set up some of the credits. */
4203 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4204 SCTP_MINIMAL_RWND));
4206 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4207 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4208 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4209 /* now the address restriction */
4210 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4212 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4214 /* we support 2 types: IPv6/IPv4 */
4215 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
4216 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4217 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4219 /* we support 1 type: IPv4 */
4220 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
4221 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4222 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4224 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
4225 /* adaptation layer indication parameter */
4226 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
4227 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4228 ali->ph.param_length = htons(sizeof(*ali));
4229 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4230 SCTP_BUF_LEN(m) += sizeof(*ali);
4231 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4233 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4234 /* Add NAT friendly parameter */
4235 struct sctp_paramhdr *ph;
4237 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4238 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4239 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4240 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4241 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4243 /* now any cookie time extensions */
4244 if (stcb->asoc.cookie_preserve_req) {
4245 struct sctp_cookie_perserve_param *cookie_preserve;
4247 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4248 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4249 cookie_preserve->ph.param_length = htons(
4250 sizeof(*cookie_preserve));
4251 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4252 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4253 ecn = (struct sctp_ecn_supported_param *)(
4254 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4255 stcb->asoc.cookie_preserve_req = 0;
4258 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
4259 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4260 ecn->ph.param_length = htons(sizeof(*ecn));
4261 SCTP_BUF_LEN(m) += sizeof(*ecn);
4262 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4265 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4267 /* And now tell the peer we do pr-sctp */
4268 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4269 prsctp->ph.param_length = htons(sizeof(*prsctp));
4270 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4272 /* And now tell the peer we do all the extensions */
4273 pr_supported = (struct sctp_supported_chunk_types_param *)
4274 ((caddr_t)prsctp + sizeof(*prsctp));
4275 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4277 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4278 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4279 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4280 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4281 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4282 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4283 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4286 * EY if the initiator supports nr_sacks, need to report that to
4287 * responder in INIT chunk
4289 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) {
4290 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4292 p_len = sizeof(*pr_supported) + num_ext;
4293 pr_supported->ph.param_length = htons(p_len);
4294 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4295 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4298 /* ECN nonce: And now tell the peer we support ECN nonce */
4299 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
4300 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
4301 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
4302 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
4303 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
4304 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
4306 /* add authentication parameters */
4307 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4308 struct sctp_auth_random *randp;
4309 struct sctp_auth_hmac_algo *hmacs;
4310 struct sctp_auth_chunk_list *chunks;
4312 /* attach RANDOM parameter, if available */
4313 if (stcb->asoc.authinfo.random != NULL) {
4314 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4315 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4316 #ifdef SCTP_AUTH_DRAFT_04
4317 randp->ph.param_type = htons(SCTP_RANDOM);
4318 randp->ph.param_length = htons(p_len);
4319 bcopy(stcb->asoc.authinfo.random->key,
4321 stcb->asoc.authinfo.random_len);
4323 /* random key already contains the header */
4324 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4326 /* zero out any padding required */
4327 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4328 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4330 /* add HMAC_ALGO parameter */
4331 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4332 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4333 (uint8_t *) hmacs->hmac_ids);
4335 p_len += sizeof(*hmacs);
4336 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4337 hmacs->ph.param_length = htons(p_len);
4338 /* zero out any padding required */
4339 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4340 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4342 /* add CHUNKS parameter */
4343 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4344 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4345 chunks->chunk_types);
4347 p_len += sizeof(*chunks);
4348 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4349 chunks->ph.param_length = htons(p_len);
4350 /* zero out any padding required */
4351 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4352 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4356 /* now the addresses */
4358 struct sctp_scoping scp;
4361 * To optimize this we could put the scoping stuff into a
4362 * structure and remove the individual uint8's from the
4363 * assoc structure. Then we could just sifa in the address
4364 * within the stcb.. but for now this is a quick hack to get
4365 * the address stuff teased apart.
4367 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4368 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4369 scp.loopback_scope = stcb->asoc.loopback_scope;
4370 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4371 scp.local_scope = stcb->asoc.local_scope;
4372 scp.site_scope = stcb->asoc.site_scope;
4374 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
4377 /* calulate the size and update pkt header and chunk header */
4379 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4380 if (SCTP_BUF_NEXT(m_at) == NULL)
4382 p_len += SCTP_BUF_LEN(m_at);
4384 init->ch.chunk_length = htons(p_len);
4386 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4387 * here since the timer will drive a retranmission.
4390 /* I don't expect this to execute but we will be safe here */
4392 if ((padval) && (mp_last)) {
4394 * The compiler worries that mp_last may not be set even
4395 * though I think it is impossible :-> however we add
4396 * mp_last here just in case.
4398 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4400 /* Houston we have a problem, no space */
4406 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4407 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4408 (struct sockaddr *)&net->ro._l_addr,
4409 m, 0, NULL, 0, 0, 0, NULL, 0,
4410 inp->sctp_lport, stcb->rport, htonl(0),
4411 net->port, so_locked, NULL);
4412 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4413 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4414 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4418 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4419 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4422 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4423 * being equal to the beginning of the params i.e. (iphlen +
4424 * sizeof(struct sctp_init_msg) parse through the parameters to the
4425 * end of the mbuf verifying that all parameters are known.
4427 * For unknown parameters build and return a mbuf with
4428 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4429 * processing this chunk stop, and set *abort_processing to 1.
4431 * By having param_offset be pre-set to where parameters begin it is
4432 * hoped that this routine may be reused in the future by new
4435 struct sctp_paramhdr *phdr, params;
4437 struct mbuf *mat, *op_err;
4438 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4439 int at, limit, pad_needed;
4440 uint16_t ptype, plen, padded_size;
4443 *abort_processing = 0;
4446 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4449 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4450 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4451 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4452 ptype = ntohs(phdr->param_type);
4453 plen = ntohs(phdr->param_length);
4454 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4455 /* wacked parameter */
4456 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4459 limit -= SCTP_SIZE32(plen);
4461 * All parameters for all chunks that we know/understand are
4462 * listed here. We process them other places and make
4463 * appropriate stop actions per the upper bits. However this
4464 * is the generic routine processor's can call to get back
4465 * an operr.. to either incorporate (init-ack) or send.
4467 padded_size = SCTP_SIZE32(plen);
4469 /* Param's with variable size */
4470 case SCTP_HEARTBEAT_INFO:
4471 case SCTP_STATE_COOKIE:
4472 case SCTP_UNRECOG_PARAM:
4473 case SCTP_ERROR_CAUSE_IND:
4477 /* Param's with variable size within a range */
4478 case SCTP_CHUNK_LIST:
4479 case SCTP_SUPPORTED_CHUNK_EXT:
4480 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4481 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4486 case SCTP_SUPPORTED_ADDRTYPE:
4487 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4488 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4494 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4495 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4500 case SCTP_SET_PRIM_ADDR:
4501 case SCTP_DEL_IP_ADDRESS:
4502 case SCTP_ADD_IP_ADDRESS:
4503 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4504 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4505 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4510 /* Param's with a fixed size */
4511 case SCTP_IPV4_ADDRESS:
4512 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4513 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4518 case SCTP_IPV6_ADDRESS:
4519 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4520 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4525 case SCTP_COOKIE_PRESERVE:
4526 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4527 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4532 case SCTP_HAS_NAT_SUPPORT:
4535 case SCTP_ECN_NONCE_SUPPORTED:
4536 case SCTP_PRSCTP_SUPPORTED:
4538 if (padded_size != sizeof(struct sctp_paramhdr)) {
4539 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp/nat support %d\n", plen);
4544 case SCTP_ECN_CAPABLE:
4545 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4546 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4551 case SCTP_ULP_ADAPTATION:
4552 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4553 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4558 case SCTP_SUCCESS_REPORT:
4559 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4560 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4565 case SCTP_HOSTNAME_ADDRESS:
4567 /* We can NOT handle HOST NAME addresses!! */
4570 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
4571 *abort_processing = 1;
4572 if (op_err == NULL) {
4573 /* Ok need to try to get a mbuf */
4575 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4577 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4580 l_len += sizeof(struct sctp_paramhdr);
4581 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4583 SCTP_BUF_LEN(op_err) = 0;
4585 * pre-reserve space for ip
4586 * and sctp header and
4590 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4592 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4594 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4595 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4599 /* If we have space */
4600 struct sctp_paramhdr s;
4603 uint32_t cpthis = 0;
4605 pad_needed = 4 - (err_at % 4);
4606 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4607 err_at += pad_needed;
4609 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
4610 s.param_length = htons(sizeof(s) + plen);
4611 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4612 err_at += sizeof(s);
4613 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4615 sctp_m_freem(op_err);
4617 * we are out of memory but
4618 * we still need to have a
4619 * look at what to do (the
4620 * system is in trouble
4625 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4633 * we do not recognize the parameter figure out what
4636 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
4637 if ((ptype & 0x4000) == 0x4000) {
4638 /* Report bit is set?? */
4639 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
4640 if (op_err == NULL) {
4643 /* Ok need to try to get an mbuf */
4645 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4647 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4650 l_len += sizeof(struct sctp_paramhdr);
4651 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4653 SCTP_BUF_LEN(op_err) = 0;
4655 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4657 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4659 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4660 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4664 /* If we have space */
4665 struct sctp_paramhdr s;
4668 uint32_t cpthis = 0;
4670 pad_needed = 4 - (err_at % 4);
4671 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4672 err_at += pad_needed;
4674 s.param_type = htons(SCTP_UNRECOG_PARAM);
4675 s.param_length = htons(sizeof(s) + plen);
4676 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4677 err_at += sizeof(s);
4678 if (plen > sizeof(tempbuf)) {
4679 plen = sizeof(tempbuf);
4681 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4683 sctp_m_freem(op_err);
4685 * we are out of memory but
4686 * we still need to have a
4687 * look at what to do (the
4688 * system is in trouble
4692 goto more_processing;
4694 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4699 if ((ptype & 0x8000) == 0x0000) {
4700 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
4703 /* skip this chunk and continue processing */
4704 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
4705 at += SCTP_SIZE32(plen);
4710 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4714 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
4715 *abort_processing = 1;
4716 if ((op_err == NULL) && phdr) {
4720 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4722 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4724 l_len += (2 * sizeof(struct sctp_paramhdr));
4725 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4727 SCTP_BUF_LEN(op_err) = 0;
4729 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4731 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4733 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4734 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4737 if ((op_err) && phdr) {
4738 struct sctp_paramhdr s;
4741 uint32_t cpthis = 0;
4743 pad_needed = 4 - (err_at % 4);
4744 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4745 err_at += pad_needed;
4747 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4748 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
4749 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4750 err_at += sizeof(s);
4751 /* Only copy back the p-hdr that caused the issue */
4752 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
4758 sctp_are_there_new_addresses(struct sctp_association *asoc,
4759 struct mbuf *in_initpkt, int iphlen, int offset)
4762 * Given a INIT packet, look through the packet to verify that there
4763 * are NO new addresses. As we go through the parameters add reports
4764 * of any un-understood parameters that require an error. Also we
4765 * must return (1) to drop the packet if we see a un-understood
4766 * parameter that tells us to drop the chunk.
4768 struct sockaddr_in sin4, *sa4;
4771 struct sockaddr_in6 sin6, *sa6;
4774 struct sockaddr *sa_touse;
4775 struct sockaddr *sa;
4776 struct sctp_paramhdr *phdr, params;
4780 struct ip6_hdr *ip6h;
4784 uint16_t ptype, plen;
4787 struct sctp_nets *net;
4789 memset(&sin4, 0, sizeof(sin4));
4791 memset(&sin6, 0, sizeof(sin6));
4793 sin4.sin_family = AF_INET;
4794 sin4.sin_len = sizeof(sin4);
4796 sin6.sin6_family = AF_INET6;
4797 sin6.sin6_len = sizeof(sin6);
4800 /* First what about the src address of the pkt ? */
4801 iph = mtod(in_initpkt, struct ip *);
4802 switch (iph->ip_v) {
4804 /* source addr is IPv4 */
4805 sin4.sin_addr = iph->ip_src;
4806 sa_touse = (struct sockaddr *)&sin4;
4809 case IPV6_VERSION >> 4:
4810 /* source addr is IPv6 */
4811 ip6h = mtod(in_initpkt, struct ip6_hdr *);
4812 sin6.sin6_addr = ip6h->ip6_src;
4813 sa_touse = (struct sockaddr *)&sin6;
4821 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4822 sa = (struct sockaddr *)&net->ro._l_addr;
4823 if (sa->sa_family == sa_touse->sa_family) {
4824 if (sa->sa_family == AF_INET) {
4825 sa4 = (struct sockaddr_in *)sa;
4826 if (sa4->sin_addr.s_addr ==
4827 sin4.sin_addr.s_addr) {
4833 if (sa->sa_family == AF_INET6) {
4834 sa6 = (struct sockaddr_in6 *)sa;
4835 if (SCTP6_ARE_ADDR_EQUAL(sa6,
4845 /* New address added! no need to look futher. */
4848 /* Ok so far lets munge through the rest of the packet */
4852 offset += sizeof(struct sctp_init_chunk);
4853 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4855 ptype = ntohs(phdr->param_type);
4856 plen = ntohs(phdr->param_length);
4857 if (ptype == SCTP_IPV4_ADDRESS) {
4858 struct sctp_ipv4addr_param *p4, p4_buf;
4860 phdr = sctp_get_next_param(mat, offset,
4861 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4862 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4866 p4 = (struct sctp_ipv4addr_param *)phdr;
4867 sin4.sin_addr.s_addr = p4->addr;
4868 sa_touse = (struct sockaddr *)&sin4;
4869 } else if (ptype == SCTP_IPV6_ADDRESS) {
4870 struct sctp_ipv6addr_param *p6, p6_buf;
4872 phdr = sctp_get_next_param(mat, offset,
4873 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4874 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4878 p6 = (struct sctp_ipv6addr_param *)phdr;
4880 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4883 sa_touse = (struct sockaddr *)&sin4;
4886 /* ok, sa_touse points to one to check */
4888 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4889 sa = (struct sockaddr *)&net->ro._l_addr;
4890 if (sa->sa_family != sa_touse->sa_family) {
4893 if (sa->sa_family == AF_INET) {
4894 sa4 = (struct sockaddr_in *)sa;
4895 if (sa4->sin_addr.s_addr ==
4896 sin4.sin_addr.s_addr) {
4902 if (sa->sa_family == AF_INET6) {
4903 sa6 = (struct sockaddr_in6 *)sa;
4904 if (SCTP6_ARE_ADDR_EQUAL(
4913 /* New addr added! no need to look further */
4917 offset += SCTP_SIZE32(plen);
4918 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4924 * Given a MBUF chain that was sent into us containing an INIT. Build a
4925 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
4926 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
4927 * message (i.e. the struct sctp_init_msg).
4930 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4931 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
4932 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
4934 struct sctp_association *asoc;
4935 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
4936 struct sctp_init_ack_chunk *initack;
4937 struct sctp_adaptation_layer_indication *ali;
4938 struct sctp_ecn_supported_param *ecn;
4939 struct sctp_prsctp_supported_param *prsctp;
4940 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4941 struct sctp_supported_chunk_types_param *pr_supported;
4942 union sctp_sockstore store, store1, *over_addr;
4943 struct sockaddr_in *sin, *to_sin;
4946 struct sockaddr_in6 *sin6, *to_sin6;
4952 struct ip6_hdr *ip6;
4955 struct sockaddr *to;
4956 struct sctp_state_cookie stc;
4957 struct sctp_nets *net = NULL;
4958 uint8_t *signature = NULL;
4959 int cnt_inits_to = 0;
4960 uint16_t his_limit, i_want;
4961 int abort_flag, padval;
4964 int nat_friendly = 0;
4972 if ((asoc != NULL) &&
4973 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
4974 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
4975 /* new addresses, out of here in non-cookie-wait states */
4977 * Send a ABORT, we don't add the new address error clause
4978 * though we even set the T bit and copy in the 0 tag.. this
4979 * looks no different than if no listener was present.
4981 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
4985 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
4986 (offset + sizeof(struct sctp_init_chunk)),
4987 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
4990 sctp_send_abort(init_pkt, iphlen, sh,
4991 init_chk->init.initiate_tag, op_err, vrf_id, port);
4994 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
4996 /* No memory, INIT timer will re-attempt. */
4998 sctp_m_freem(op_err);
5001 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5003 /* the time I built cookie */
5004 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5006 /* populate any tie tags */
5008 /* unlock before tag selections */
5009 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5010 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5011 stc.cookie_life = asoc->cookie_life;
5012 net = asoc->primary_destination;
5014 stc.tie_tag_my_vtag = 0;
5015 stc.tie_tag_peer_vtag = 0;
5016 /* life I will award this cookie */
5017 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5020 /* copy in the ports for later check */
5021 stc.myport = sh->dest_port;
5022 stc.peerport = sh->src_port;
5025 * If we wanted to honor cookie life extentions, we would add to
5026 * stc.cookie_life. For now we should NOT honor any extension
5028 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5029 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5030 struct inpcb *in_inp;
5032 /* Its a V6 socket */
5033 in_inp = (struct inpcb *)inp;
5034 stc.ipv6_addr_legal = 1;
5035 /* Now look at the binding flag to see if V4 will be legal */
5036 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5037 stc.ipv4_addr_legal = 1;
5039 /* V4 addresses are NOT legal on the association */
5040 stc.ipv4_addr_legal = 0;
5043 /* Its a V4 socket, no - V6 */
5044 stc.ipv4_addr_legal = 1;
5045 stc.ipv6_addr_legal = 0;
5048 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5053 /* now for scope setup */
5054 memset((caddr_t)&store, 0, sizeof(store));
5055 memset((caddr_t)&store1, 0, sizeof(store1));
5057 to_sin = &store1.sin;
5060 to_sin6 = &store1.sin6;
5062 iph = mtod(init_pkt, struct ip *);
5063 /* establish the to_addr's */
5064 switch (iph->ip_v) {
5066 to_sin->sin_port = sh->dest_port;
5067 to_sin->sin_family = AF_INET;
5068 to_sin->sin_len = sizeof(struct sockaddr_in);
5069 to_sin->sin_addr = iph->ip_dst;
5072 case IPV6_VERSION >> 4:
5073 ip6 = mtod(init_pkt, struct ip6_hdr *);
5074 to_sin6->sin6_addr = ip6->ip6_dst;
5075 to_sin6->sin6_scope_id = 0;
5076 to_sin6->sin6_port = sh->dest_port;
5077 to_sin6->sin6_family = AF_INET6;
5078 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5087 to = (struct sockaddr *)&store;
5088 switch (iph->ip_v) {
5091 sin->sin_family = AF_INET;
5092 sin->sin_len = sizeof(struct sockaddr_in);
5093 sin->sin_port = sh->src_port;
5094 sin->sin_addr = iph->ip_src;
5095 /* lookup address */
5096 stc.address[0] = sin->sin_addr.s_addr;
5100 stc.addr_type = SCTP_IPV4_ADDRESS;
5101 /* local from address */
5102 stc.laddress[0] = to_sin->sin_addr.s_addr;
5103 stc.laddress[1] = 0;
5104 stc.laddress[2] = 0;
5105 stc.laddress[3] = 0;
5106 stc.laddr_type = SCTP_IPV4_ADDRESS;
5107 /* scope_id is only for v6 */
5109 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5110 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5115 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5116 /* Must use the address in this case */
5117 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5118 stc.loopback_scope = 1;
5121 stc.local_scope = 0;
5126 case IPV6_VERSION >> 4:
5128 ip6 = mtod(init_pkt, struct ip6_hdr *);
5129 sin6->sin6_family = AF_INET6;
5130 sin6->sin6_len = sizeof(struct sockaddr_in6);
5131 sin6->sin6_port = sh->src_port;
5132 sin6->sin6_addr = ip6->ip6_src;
5133 /* lookup address */
5134 memcpy(&stc.address, &sin6->sin6_addr,
5135 sizeof(struct in6_addr));
5136 sin6->sin6_scope_id = 0;
5137 stc.addr_type = SCTP_IPV6_ADDRESS;
5139 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5141 * FIX ME: does this have scope from
5144 (void)sa6_recoverscope(sin6);
5145 stc.scope_id = sin6->sin6_scope_id;
5146 sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone));
5147 stc.loopback_scope = 1;
5148 stc.local_scope = 0;
5151 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5153 * If the new destination is a
5154 * LINK_LOCAL we must have common
5155 * both site and local scope. Don't
5156 * set local scope though since we
5157 * must depend on the source to be
5158 * added implicitly. We cannot
5159 * assure just because we share one
5160 * link that all links are common.
5162 stc.local_scope = 0;
5166 * we start counting for the private
5167 * address stuff at 1. since the
5168 * link local we source from won't
5169 * show up in our scoped count.
5173 * pull out the scope_id from
5177 * FIX ME: does this have scope from
5180 (void)sa6_recoverscope(sin6);
5181 stc.scope_id = sin6->sin6_scope_id;
5182 sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone));
5183 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5185 * If the new destination is
5186 * SITE_LOCAL then we must have site
5191 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5192 stc.laddr_type = SCTP_IPV6_ADDRESS;
5202 /* set the scope per the existing tcb */
5205 struct sctp_nets *lnet;
5209 stc.loopback_scope = asoc->loopback_scope;
5210 stc.ipv4_scope = asoc->ipv4_local_scope;
5211 stc.site_scope = asoc->site_scope;
5212 stc.local_scope = asoc->local_scope;
5214 /* Why do we not consider IPv4 LL addresses? */
5215 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5216 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5217 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5219 * if we have a LL address, start
5227 /* use the net pointer */
5228 to = (struct sockaddr *)&net->ro._l_addr;
5229 switch (to->sa_family) {
5231 sin = (struct sockaddr_in *)to;
5232 stc.address[0] = sin->sin_addr.s_addr;
5236 stc.addr_type = SCTP_IPV4_ADDRESS;
5237 if (net->src_addr_selected == 0) {
5239 * strange case here, the INIT should have
5240 * did the selection.
5242 net->ro._s_addr = sctp_source_address_selection(inp,
5243 stcb, (sctp_route_t *) & net->ro,
5245 if (net->ro._s_addr == NULL)
5248 net->src_addr_selected = 1;
5251 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5252 stc.laddress[1] = 0;
5253 stc.laddress[2] = 0;
5254 stc.laddress[3] = 0;
5255 stc.laddr_type = SCTP_IPV4_ADDRESS;
5259 sin6 = (struct sockaddr_in6 *)to;
5260 memcpy(&stc.address, &sin6->sin6_addr,
5261 sizeof(struct in6_addr));
5262 stc.addr_type = SCTP_IPV6_ADDRESS;
5263 if (net->src_addr_selected == 0) {
5265 * strange case here, the INIT should have
5266 * did the selection.
5268 net->ro._s_addr = sctp_source_address_selection(inp,
5269 stcb, (sctp_route_t *) & net->ro,
5271 if (net->ro._s_addr == NULL)
5274 net->src_addr_selected = 1;
5276 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5277 sizeof(struct in6_addr));
5278 stc.laddr_type = SCTP_IPV6_ADDRESS;
5283 /* Now lets put the SCTP header in place */
5284 initack = mtod(m, struct sctp_init_ack_chunk *);
5285 /* Save it off for quick ref */
5286 stc.peers_vtag = init_chk->init.initiate_tag;
5288 memcpy(stc.identification, SCTP_VERSION_STRING,
5289 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5290 /* now the chunk header */
5291 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5292 initack->ch.chunk_flags = 0;
5293 /* fill in later from mbuf we build */
5294 initack->ch.chunk_length = 0;
5295 /* place in my tag */
5296 if ((asoc != NULL) &&
5297 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5298 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5299 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5300 /* re-use the v-tags and init-seq here */
5301 initack->init.initiate_tag = htonl(asoc->my_vtag);
5302 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5304 uint32_t vtag, itsn;
5306 if (hold_inp_lock) {
5307 SCTP_INP_INCR_REF(inp);
5308 SCTP_INP_RUNLOCK(inp);
5311 atomic_add_int(&asoc->refcnt, 1);
5312 SCTP_TCB_UNLOCK(stcb);
5314 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5315 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5317 * Got a duplicate vtag on some guy behind a
5318 * nat make sure we don't use it.
5322 initack->init.initiate_tag = htonl(vtag);
5323 /* get a TSN to use too */
5324 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5325 initack->init.initial_tsn = htonl(itsn);
5326 SCTP_TCB_LOCK(stcb);
5327 atomic_add_int(&asoc->refcnt, -1);
5329 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5330 initack->init.initiate_tag = htonl(vtag);
5331 /* get a TSN to use too */
5332 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5334 if (hold_inp_lock) {
5335 SCTP_INP_RLOCK(inp);
5336 SCTP_INP_DECR_REF(inp);
5339 /* save away my tag to */
5340 stc.my_vtag = initack->init.initiate_tag;
5342 /* set up some of the credits. */
5343 so = inp->sctp_socket;
5345 /* memory problem */
5349 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5351 /* set what I want */
5352 his_limit = ntohs(init_chk->init.num_inbound_streams);
5353 /* choose what I want */
5355 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5356 i_want = asoc->streamoutcnt;
5358 i_want = inp->sctp_ep.pre_open_stream_count;
5361 i_want = inp->sctp_ep.pre_open_stream_count;
5363 if (his_limit < i_want) {
5364 /* I Want more :< */
5365 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5367 /* I can have what I want :> */
5368 initack->init.num_outbound_streams = htons(i_want);
5370 /* tell him his limt. */
5371 initack->init.num_inbound_streams =
5372 htons(inp->sctp_ep.max_open_streams_intome);
5374 /* adaptation layer indication parameter */
5375 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5376 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5377 ali->ph.param_length = htons(sizeof(*ali));
5378 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5379 SCTP_BUF_LEN(m) += sizeof(*ali);
5380 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5383 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
5384 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5385 ecn->ph.param_length = htons(sizeof(*ecn));
5386 SCTP_BUF_LEN(m) += sizeof(*ecn);
5388 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5391 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5393 /* And now tell the peer we do pr-sctp */
5394 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5395 prsctp->ph.param_length = htons(sizeof(*prsctp));
5396 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5398 /* Add NAT friendly parameter */
5399 struct sctp_paramhdr *ph;
5401 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5402 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5403 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5404 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5406 /* And now tell the peer we do all the extensions */
5407 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5408 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5410 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5411 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5412 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5413 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5414 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5415 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5416 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5418 * EY if the sysctl variable is set, tell the assoc. initiator that
5421 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5422 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5423 p_len = sizeof(*pr_supported) + num_ext;
5424 pr_supported->ph.param_length = htons(p_len);
5425 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5426 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5428 /* ECN nonce: And now tell the peer we support ECN nonce */
5429 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
5430 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
5431 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
5432 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
5433 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
5434 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
5436 /* add authentication parameters */
5437 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5438 struct sctp_auth_random *randp;
5439 struct sctp_auth_hmac_algo *hmacs;
5440 struct sctp_auth_chunk_list *chunks;
5441 uint16_t random_len;
5443 /* generate and add RANDOM parameter */
5444 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5445 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5446 randp->ph.param_type = htons(SCTP_RANDOM);
5447 p_len = sizeof(*randp) + random_len;
5448 randp->ph.param_length = htons(p_len);
5449 SCTP_READ_RANDOM(randp->random_data, random_len);
5450 /* zero out any padding required */
5451 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5452 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5454 /* add HMAC_ALGO parameter */
5455 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5456 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5457 (uint8_t *) hmacs->hmac_ids);
5459 p_len += sizeof(*hmacs);
5460 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5461 hmacs->ph.param_length = htons(p_len);
5462 /* zero out any padding required */
5463 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5464 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5466 /* add CHUNKS parameter */
5467 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5468 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5469 chunks->chunk_types);
5471 p_len += sizeof(*chunks);
5472 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5473 chunks->ph.param_length = htons(p_len);
5474 /* zero out any padding required */
5475 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5476 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5480 /* now the addresses */
5482 struct sctp_scoping scp;
5485 * To optimize this we could put the scoping stuff into a
5486 * structure and remove the individual uint8's from the stc
5487 * structure. Then we could just sifa in the address within
5488 * the stc.. but for now this is a quick hack to get the
5489 * address stuff teased apart.
5491 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5492 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5493 scp.loopback_scope = stc.loopback_scope;
5494 scp.ipv4_local_scope = stc.ipv4_scope;
5495 scp.local_scope = stc.local_scope;
5496 scp.site_scope = stc.site_scope;
5497 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
5500 /* tack on the operational error if present */
5508 llen += SCTP_BUF_LEN(ol);
5509 ol = SCTP_BUF_NEXT(ol);
5512 /* must add a pad to the param */
5513 uint32_t cpthis = 0;
5516 padlen = 4 - (llen % 4);
5517 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5519 while (SCTP_BUF_NEXT(m_at) != NULL) {
5520 m_at = SCTP_BUF_NEXT(m_at);
5522 SCTP_BUF_NEXT(m_at) = op_err;
5523 while (SCTP_BUF_NEXT(m_at) != NULL) {
5524 m_at = SCTP_BUF_NEXT(m_at);
5527 /* pre-calulate the size and update pkt header and chunk header */
5529 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5530 p_len += SCTP_BUF_LEN(m_tmp);
5531 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5532 /* m_tmp should now point to last one */
5537 /* Now we must build a cookie */
5538 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature);
5539 if (m_cookie == NULL) {
5540 /* memory problem */
5544 /* Now append the cookie to the end and update the space/size */
5545 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5547 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5548 p_len += SCTP_BUF_LEN(m_tmp);
5549 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5550 /* m_tmp should now point to last one */
5556 * Place in the size, but we don't include the last pad (if any) in
5559 initack->ch.chunk_length = htons(p_len);
5562 * Time to sign the cookie, we don't sign over the cookie signature
5563 * though thus we set trailer.
5565 (void)sctp_hmac_m(SCTP_HMAC,
5566 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5567 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5568 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5570 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5571 * here since the timer will drive a retranmission.
5574 if ((padval) && (mp_last)) {
5575 /* see my previous comments on mp_last */
5578 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
5580 /* Houston we have a problem, no space */
5586 if (stc.loopback_scope) {
5587 over_addr = &store1;
5592 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5594 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
5595 port, SCTP_SO_NOT_LOCKED, over_addr);
5596 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5601 sctp_insert_on_wheel(struct sctp_tcb *stcb,
5602 struct sctp_association *asoc,
5603 struct sctp_stream_out *strq, int holds_lock)
5605 struct sctp_stream_out *stre, *strn;
5607 if (holds_lock == 0) {
5608 SCTP_TCB_SEND_LOCK(stcb);
5610 if ((strq->next_spoke.tqe_next) ||
5611 (strq->next_spoke.tqe_prev)) {
5612 /* already on wheel */
5615 stre = TAILQ_FIRST(&asoc->out_wheel);
5617 /* only one on wheel */
5618 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
5621 for (; stre; stre = strn) {
5622 strn = TAILQ_NEXT(stre, next_spoke);
5623 if (stre->stream_no > strq->stream_no) {
5624 TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
5626 } else if (stre->stream_no == strq->stream_no) {
5627 /* huh, should not happen */
5629 } else if (strn == NULL) {
5630 /* next one is null */
5631 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
5636 if (holds_lock == 0) {
5637 SCTP_TCB_SEND_UNLOCK(stcb);
5642 sctp_remove_from_wheel(struct sctp_tcb *stcb,
5643 struct sctp_association *asoc,
5644 struct sctp_stream_out *strq,
5647 /* take off and then setup so we know it is not on the wheel */
5648 if (holds_lock == 0)
5649 SCTP_TCB_SEND_LOCK(stcb);
5650 if (TAILQ_FIRST(&strq->outqueue)) {
5651 /* more was added */
5652 if (holds_lock == 0)
5653 SCTP_TCB_SEND_UNLOCK(stcb);
5656 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
5657 strq->next_spoke.tqe_next = NULL;
5658 strq->next_spoke.tqe_prev = NULL;
5659 if (holds_lock == 0)
5660 SCTP_TCB_SEND_UNLOCK(stcb);
5664 sctp_prune_prsctp(struct sctp_tcb *stcb,
5665 struct sctp_association *asoc,
5666 struct sctp_sndrcvinfo *srcv,
5670 struct sctp_tmit_chunk *chk, *nchk;
5672 SCTP_TCB_LOCK_ASSERT(stcb);
5673 if ((asoc->peer_supports_prsctp) &&
5674 (asoc->sent_queue_cnt_removeable > 0)) {
5675 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5677 * Look for chunks marked with the PR_SCTP flag AND
5678 * the buffer space flag. If the one being sent is
5679 * equal or greater priority then purge the old one
5680 * and free some space.
5682 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5684 * This one is PR-SCTP AND buffer space
5687 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5689 * Lower numbers equates to higher
5690 * priority so if the one we are
5691 * looking at has a larger or equal
5692 * priority we want to drop the data
5693 * and NOT retransmit it.
5697 * We release the book_size
5698 * if the mbuf is here
5703 if (chk->sent > SCTP_DATAGRAM_UNSENT)
5704 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
5706 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
5707 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5710 freed_spc += ret_spc;
5711 if (freed_spc >= dataout) {
5714 } /* if chunk was present */
5715 } /* if of sufficent priority */
5716 } /* if chunk has enabled */
5717 } /* tailqforeach */
5719 chk = TAILQ_FIRST(&asoc->send_queue);
5721 nchk = TAILQ_NEXT(chk, sctp_next);
5722 /* Here we must move to the sent queue and mark */
5723 if (PR_SCTP_TTL_ENABLED(chk->flags)) {
5724 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5727 * We release the book_size
5728 * if the mbuf is here
5732 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5733 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
5736 freed_spc += ret_spc;
5737 if (freed_spc >= dataout) {
5740 } /* end if chk->data */
5741 } /* end if right class */
5742 } /* end if chk pr-sctp */
5744 } /* end while (chk) */
5745 } /* if enabled in asoc */
5749 sctp_get_frag_point(struct sctp_tcb *stcb,
5750 struct sctp_association *asoc)
5755 * For endpoints that have both v6 and v4 addresses we must reserve
5756 * room for the ipv6 header, for those that are only dealing with V4
5757 * we use a larger frag point.
5759 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5760 ovh = SCTP_MED_OVERHEAD;
5762 ovh = SCTP_MED_V4_OVERHEAD;
5765 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
5766 siz = asoc->smallest_mtu - ovh;
5768 siz = (stcb->asoc.sctp_frag_point - ovh);
5770 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
5772 /* A data chunk MUST fit in a cluster */
5773 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
5776 /* adjust for an AUTH chunk if DATA requires auth */
5777 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
5778 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5781 /* make it an even word boundary please */
5788 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
5792 * We assume that the user wants PR_SCTP_TTL if the user provides a
5793 * positive lifetime but does not specify any PR_SCTP policy. This
5794 * is a BAD assumption and causes problems at least with the
5795 * U-Vancovers MPI folks. I will change this to be no policy means
5798 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
5799 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
5804 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
5805 case CHUNK_FLAGS_PR_SCTP_BUF:
5807 * Time to live is a priority stored in tv_sec when doing
5808 * the buffer drop thing.
5810 sp->ts.tv_sec = sp->timetolive;
5813 case CHUNK_FLAGS_PR_SCTP_TTL:
5817 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5818 tv.tv_sec = sp->timetolive / 1000;
5819 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
5821 * TODO sctp_constants.h needs alternative time
5822 * macros when _KERNEL is undefined.
5824 timevaladd(&sp->ts, &tv);
5827 case CHUNK_FLAGS_PR_SCTP_RTX:
5829 * Time to live is a the number or retransmissions stored in
5832 sp->ts.tv_sec = sp->timetolive;
5836 SCTPDBG(SCTP_DEBUG_USRREQ1,
5837 "Unknown PR_SCTP policy %u.\n",
5838 PR_SCTP_POLICY(sp->sinfo_flags));
5844 sctp_msg_append(struct sctp_tcb *stcb,
5845 struct sctp_nets *net,
5847 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
5849 int error = 0, holds_lock;
5851 struct sctp_stream_queue_pending *sp = NULL;
5852 struct sctp_stream_out *strm;
5855 * Given an mbuf chain, put it into the association send queue and
5856 * place it on the wheel
5858 holds_lock = hold_stcb_lock;
5859 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
5860 /* Invalid stream number */
5861 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5865 if ((stcb->asoc.stream_locked) &&
5866 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
5867 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5871 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
5872 /* Now can we send this? */
5873 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
5874 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5875 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
5876 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
5877 /* got data while shutting down */
5878 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
5882 sctp_alloc_a_strmoq(stcb, sp);
5884 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5888 sp->sinfo_flags = srcv->sinfo_flags;
5889 sp->timetolive = srcv->sinfo_timetolive;
5890 sp->ppid = srcv->sinfo_ppid;
5891 sp->context = srcv->sinfo_context;
5893 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
5897 sp->net = stcb->asoc.primary_destination;
5900 atomic_add_int(&sp->net->ref_count, 1);
5901 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5902 sp->stream = srcv->sinfo_stream;
5903 sp->msg_is_complete = 1;
5904 sp->sender_all_done = 1;
5907 sp->tail_mbuf = NULL;
5910 sctp_set_prsctp_policy(sp);
5912 * We could in theory (for sendall) sifa the length in, but we would
5913 * still have to hunt through the chain since we need to setup the
5917 if (SCTP_BUF_NEXT(at) == NULL)
5919 sp->length += SCTP_BUF_LEN(at);
5920 at = SCTP_BUF_NEXT(at);
5922 SCTP_TCB_SEND_LOCK(stcb);
5923 sctp_snd_sb_alloc(stcb, sp->length);
5924 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
5925 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
5926 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
5927 sp->strseq = strm->next_sequence_sent;
5928 strm->next_sequence_sent++;
5930 if ((strm->next_spoke.tqe_next == NULL) &&
5931 (strm->next_spoke.tqe_prev == NULL)) {
5932 /* Not on wheel, insert */
5933 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
5936 SCTP_TCB_SEND_UNLOCK(stcb);
5945 static struct mbuf *
5946 sctp_copy_mbufchain(struct mbuf *clonechain,
5947 struct mbuf *outchain,
5948 struct mbuf **endofchain,
5951 uint8_t copy_by_ref)
5954 struct mbuf *appendchain;
5958 if (endofchain == NULL) {
5962 sctp_m_freem(outchain);
5965 if (can_take_mbuf) {
5966 appendchain = clonechain;
5969 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
5971 /* Its not in a cluster */
5972 if (*endofchain == NULL) {
5973 /* lets get a mbuf cluster */
5974 if (outchain == NULL) {
5975 /* This is the general case */
5977 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5978 if (outchain == NULL) {
5981 SCTP_BUF_LEN(outchain) = 0;
5982 *endofchain = outchain;
5983 /* get the prepend space */
5984 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
5987 * We really should not get a NULL
5993 if (SCTP_BUF_NEXT(m) == NULL) {
5997 m = SCTP_BUF_NEXT(m);
6000 if (*endofchain == NULL) {
6002 * huh, TSNH XXX maybe we
6005 sctp_m_freem(outchain);
6009 /* get the new end of length */
6010 len = M_TRAILINGSPACE(*endofchain);
6012 /* how much is left at the end? */
6013 len = M_TRAILINGSPACE(*endofchain);
6015 /* Find the end of the data, for appending */
6016 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6018 /* Now lets copy it out */
6019 if (len >= sizeofcpy) {
6020 /* It all fits, copy it in */
6021 m_copydata(clonechain, 0, sizeofcpy, cp);
6022 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6024 /* fill up the end of the chain */
6026 m_copydata(clonechain, 0, len, cp);
6027 SCTP_BUF_LEN((*endofchain)) += len;
6028 /* now we need another one */
6031 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6036 SCTP_BUF_NEXT((*endofchain)) = m;
6038 cp = mtod((*endofchain), caddr_t);
6039 m_copydata(clonechain, len, sizeofcpy, cp);
6040 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6044 /* copy the old fashion way */
6045 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6046 #ifdef SCTP_MBUF_LOGGING
6047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6052 if (SCTP_BUF_IS_EXTENDED(mat)) {
6053 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6055 mat = SCTP_BUF_NEXT(mat);
6061 if (appendchain == NULL) {
6064 sctp_m_freem(outchain);
6068 /* tack on to the end */
6069 if (*endofchain != NULL) {
6070 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6074 if (SCTP_BUF_NEXT(m) == NULL) {
6075 SCTP_BUF_NEXT(m) = appendchain;
6078 m = SCTP_BUF_NEXT(m);
6082 * save off the end and update the end-chain postion
6086 if (SCTP_BUF_NEXT(m) == NULL) {
6090 m = SCTP_BUF_NEXT(m);
6094 /* save off the end and update the end-chain postion */
6097 if (SCTP_BUF_NEXT(m) == NULL) {
6101 m = SCTP_BUF_NEXT(m);
6103 return (appendchain);
6108 sctp_med_chunk_output(struct sctp_inpcb *inp,
6109 struct sctp_tcb *stcb,
6110 struct sctp_association *asoc,
6113 int control_only, int from_where,
6114 struct timeval *now, int *now_filled, int frag_point, int so_locked
6115 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6121 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6124 struct sctp_copy_all *ca;
6127 int added_control = 0;
6128 int un_sent, do_chunk_output = 1;
6129 struct sctp_association *asoc;
6131 ca = (struct sctp_copy_all *)ptr;
6132 if (ca->m == NULL) {
6135 if (ca->inp != inp) {
6139 if ((ca->m) && ca->sndlen) {
6140 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6142 /* can't copy so we are done */
6146 #ifdef SCTP_MBUF_LOGGING
6147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6152 if (SCTP_BUF_IS_EXTENDED(mat)) {
6153 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6155 mat = SCTP_BUF_NEXT(mat);
6162 SCTP_TCB_LOCK_ASSERT(stcb);
6163 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6164 /* Abort this assoc with m as the user defined reason */
6166 struct sctp_paramhdr *ph;
6168 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6170 ph = mtod(m, struct sctp_paramhdr *);
6171 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6172 ph->param_length = htons(ca->sndlen);
6175 * We add one here to keep the assoc from
6176 * dis-appearing on us.
6178 atomic_add_int(&stcb->asoc.refcnt, 1);
6179 sctp_abort_an_association(inp, stcb,
6180 SCTP_RESPONSE_TO_USER_REQ,
6181 m, SCTP_SO_NOT_LOCKED);
6183 * sctp_abort_an_association calls sctp_free_asoc()
6184 * free association will NOT free it since we
6185 * incremented the refcnt .. we do this to prevent
6186 * it being freed and things getting tricky since we
6187 * could end up (from free_asoc) calling inpcb_free
6188 * which would get a recursive lock call to the
6189 * iterator lock.. But as a consequence of that the
6190 * stcb will return to us un-locked.. since
6191 * free_asoc returns with either no TCB or the TCB
6192 * unlocked, we must relock.. to unlock in the
6193 * iterator timer :-0
6195 SCTP_TCB_LOCK(stcb);
6196 atomic_add_int(&stcb->asoc.refcnt, -1);
6197 goto no_chunk_output;
6201 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
6205 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6206 /* shutdown this assoc */
6209 cnt = sctp_is_there_unsent_data(stcb);
6211 if (TAILQ_EMPTY(&asoc->send_queue) &&
6212 TAILQ_EMPTY(&asoc->sent_queue) &&
6214 if (asoc->locked_on_sending) {
6218 * there is nothing queued to send, so I'm
6221 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6222 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6223 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6225 * only send SHUTDOWN the first time
6228 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
6229 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6230 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6232 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6233 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6234 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6235 asoc->primary_destination);
6236 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6237 asoc->primary_destination);
6239 do_chunk_output = 0;
6243 * we still got (or just got) data to send,
6244 * so set SHUTDOWN_PENDING
6247 * XXX sockets draft says that SCTP_EOF
6248 * should be sent with no data. currently,
6249 * we will allow user data to be sent first
6250 * and move to SHUTDOWN-PENDING
6252 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6253 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6254 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6255 if (asoc->locked_on_sending) {
6257 * Locked to send out the
6260 struct sctp_stream_queue_pending *sp;
6262 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6264 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6265 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6268 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6269 if (TAILQ_EMPTY(&asoc->send_queue) &&
6270 TAILQ_EMPTY(&asoc->sent_queue) &&
6271 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6273 atomic_add_int(&stcb->asoc.refcnt, 1);
6274 sctp_abort_an_association(stcb->sctp_ep, stcb,
6275 SCTP_RESPONSE_TO_USER_REQ,
6276 NULL, SCTP_SO_NOT_LOCKED);
6277 atomic_add_int(&stcb->asoc.refcnt, -1);
6278 goto no_chunk_output;
6280 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6281 asoc->primary_destination);
6287 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6288 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6290 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6291 (stcb->asoc.total_flight > 0) &&
6292 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6294 do_chunk_output = 0;
6296 if (do_chunk_output)
6297 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6298 else if (added_control) {
6299 int num_out = 0, reason = 0, now_filled = 0;
6303 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6304 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6305 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6316 sctp_sendall_completes(void *ptr, uint32_t val)
6318 struct sctp_copy_all *ca;
6320 ca = (struct sctp_copy_all *)ptr;
6322 * Do a notify here? Kacheong suggests that the notify be done at
6323 * the send time.. so you would push up a notification if any send
6324 * failed. Don't know if this is feasable since the only failures we
6325 * have is "memory" related and if you cannot get an mbuf to send
6326 * the data you surely can't get an mbuf to send up to notify the
6327 * user you can't send the data :->
6330 /* now free everything */
6331 sctp_m_freem(ca->m);
6332 SCTP_FREE(ca, SCTP_M_COPYAL);
6336 #define MC_ALIGN(m, len) do { \
6337 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6342 static struct mbuf *
6343 sctp_copy_out_all(struct uio *uio, int len)
6345 struct mbuf *ret, *at;
6346 int left, willcpy, cancpy, error;
6348 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6354 SCTP_BUF_LEN(ret) = 0;
6355 /* save space for the data chunk header */
6356 cancpy = M_TRAILINGSPACE(ret);
6357 willcpy = min(cancpy, left);
6360 /* Align data to the end */
6361 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6367 SCTP_BUF_LEN(at) = willcpy;
6368 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6371 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6372 if (SCTP_BUF_NEXT(at) == NULL) {
6375 at = SCTP_BUF_NEXT(at);
6376 SCTP_BUF_LEN(at) = 0;
6377 cancpy = M_TRAILINGSPACE(at);
6378 willcpy = min(cancpy, left);
6385 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6386 struct sctp_sndrcvinfo *srcv)
6389 struct sctp_copy_all *ca;
6391 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6395 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6398 memset(ca, 0, sizeof(struct sctp_copy_all));
6401 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6403 * take off the sendall flag, it would be bad if we failed to do
6406 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6407 /* get length and mbuf chain */
6409 ca->sndlen = uio->uio_resid;
6410 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6411 if (ca->m == NULL) {
6412 SCTP_FREE(ca, SCTP_M_COPYAL);
6413 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6417 /* Gather the length of the send */
6423 ca->sndlen += SCTP_BUF_LEN(m);
6424 m = SCTP_BUF_NEXT(m);
6428 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6429 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6430 SCTP_ASOC_ANY_STATE,
6432 sctp_sendall_completes, inp, 1);
6434 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6435 SCTP_FREE(ca, SCTP_M_COPYAL);
6436 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6444 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6446 struct sctp_tmit_chunk *chk, *nchk;
6448 chk = TAILQ_FIRST(&asoc->control_send_queue);
6450 nchk = TAILQ_NEXT(chk, sctp_next);
6451 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6452 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6454 sctp_m_freem(chk->data);
6457 asoc->ctrl_queue_cnt--;
6458 sctp_free_a_chunk(stcb, chk);
6465 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6467 struct sctp_association *asoc;
6468 struct sctp_tmit_chunk *chk, *chk_tmp;
6469 struct sctp_asconf_chunk *acp;
6472 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL;
6475 chk_tmp = TAILQ_NEXT(chk, sctp_next);
6476 /* find SCTP_ASCONF chunk in queue */
6477 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6479 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6480 if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) {
6485 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6487 sctp_m_freem(chk->data);
6490 asoc->ctrl_queue_cnt--;
6491 sctp_free_a_chunk(stcb, chk);
6498 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6500 struct sctp_association *asoc,
6501 struct sctp_tmit_chunk **data_list,
6503 struct sctp_nets *net)
6506 struct sctp_tmit_chunk *tp1;
6508 for (i = 0; i < bundle_at; i++) {
6509 /* off of the send queue */
6512 * Any chunk NOT 0 you zap the time chunk 0 gets
6513 * zapped or set based on if a RTO measurment is
6516 data_list[i]->do_rtt = 0;
6519 data_list[i]->sent_rcv_time = net->last_sent_time;
6520 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6521 TAILQ_REMOVE(&asoc->send_queue,
6524 /* on to the sent queue */
6525 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6526 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
6527 data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
6528 struct sctp_tmit_chunk *tpp;
6530 /* need to move back */
6532 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6534 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6538 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6539 data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
6542 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6544 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6549 /* This does not lower until the cum-ack passes it */
6550 asoc->sent_queue_cnt++;
6551 asoc->send_queue_cnt--;
6552 if ((asoc->peers_rwnd <= 0) &&
6553 (asoc->total_flight == 0) &&
6555 /* Mark the chunk as being a window probe */
6556 SCTP_STAT_INCR(sctps_windowprobed);
6558 #ifdef SCTP_AUDITING_ENABLED
6559 sctp_audit_log(0xC2, 3);
6561 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6562 data_list[i]->snd_count = 1;
6563 data_list[i]->rec.data.chunk_was_revoked = 0;
6564 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6565 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6566 data_list[i]->whoTo->flight_size,
6567 data_list[i]->book_size,
6568 (uintptr_t) data_list[i]->whoTo,
6569 data_list[i]->rec.data.TSN_seq);
6571 sctp_flight_size_increase(data_list[i]);
6572 sctp_total_flight_increase(stcb, data_list[i]);
6573 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6574 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6575 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6577 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6578 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6579 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6580 /* SWS sender side engages */
6581 asoc->peers_rwnd = 0;
6587 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
6589 struct sctp_tmit_chunk *chk, *nchk;
6591 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
6593 nchk = TAILQ_NEXT(chk, sctp_next);
6594 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6595 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6596 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6597 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6598 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6599 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6600 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6601 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6602 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6603 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6604 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6605 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6606 /* Stray chunks must be cleaned up */
6608 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6610 sctp_m_freem(chk->data);
6613 asoc->ctrl_queue_cnt--;
6614 sctp_free_a_chunk(stcb, chk);
6615 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6616 /* special handling, we must look into the param */
6617 if (chk != asoc->str_reset) {
6618 goto clean_up_anyway;
6626 sctp_can_we_split_this(struct sctp_tcb *stcb,
6628 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6631 * Make a decision on if I should split a msg into multiple parts.
6632 * This is only asked of incomplete messages.
6636 * If we are doing EEOR we need to always send it if its the
6637 * entire thing, since it might be all the guy is putting in
6640 if (goal_mtu >= length) {
6642 * If we have data outstanding,
6643 * we get another chance when the sack
6644 * arrives to transmit - wait for more data
6646 if (stcb->asoc.total_flight == 0) {
6648 * If nothing is in flight, we zero the
6656 /* You can fill the rest */
6661 * For those strange folk that make the send buffer
6662 * smaller than our fragmentation point, we can't
6663 * get a full msg in so we have to allow splitting.
6665 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
6668 if ((length <= goal_mtu) ||
6669 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
6670 /* Sub-optimial residual don't split in non-eeor mode. */
6674 * If we reach here length is larger than the goal_mtu. Do we wish
6675 * to split it for the sake of packet putting together?
6677 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
6678 /* Its ok to split it */
6679 return (min(goal_mtu, frag_point));
6681 /* Nope, can't split */
6687 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
6688 struct sctp_stream_out *strq,
6690 uint32_t frag_point,
6696 /* Move from the stream to the send_queue keeping track of the total */
6697 struct sctp_association *asoc;
6698 struct sctp_stream_queue_pending *sp;
6699 struct sctp_tmit_chunk *chk;
6700 struct sctp_data_chunk *dchkh;
6701 uint32_t to_move, length;
6702 uint8_t rcv_flags = 0;
6704 uint8_t send_lock_up = 0;
6706 SCTP_TCB_LOCK_ASSERT(stcb);
6709 /* sa_ignore FREED_MEMORY */
6710 sp = TAILQ_FIRST(&strq->outqueue);
6713 if (send_lock_up == 0) {
6714 SCTP_TCB_SEND_LOCK(stcb);
6717 sp = TAILQ_FIRST(&strq->outqueue);
6721 if (strq->last_msg_incomplete) {
6722 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
6724 strq->last_msg_incomplete);
6725 strq->last_msg_incomplete = 0;
6729 SCTP_TCB_SEND_UNLOCK(stcb);
6734 if ((sp->msg_is_complete) && (sp->length == 0)) {
6735 if (sp->sender_all_done) {
6737 * We are doing differed cleanup. Last time through
6738 * when we took all the data the sender_all_done was
6741 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
6742 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
6743 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
6744 sp->sender_all_done,
6746 sp->msg_is_complete,
6750 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
6751 SCTP_TCB_SEND_LOCK(stcb);
6754 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
6755 TAILQ_REMOVE(&strq->outqueue, sp, next);
6756 sctp_free_remote_addr(sp->net);
6758 sctp_m_freem(sp->data);
6761 sctp_free_a_strmoq(stcb, sp);
6762 /* we can't be locked to it */
6764 stcb->asoc.locked_on_sending = NULL;
6766 SCTP_TCB_SEND_UNLOCK(stcb);
6769 /* back to get the next msg */
6773 * sender just finished this but still holds a
6782 /* is there some to get */
6783 if (sp->length == 0) {
6789 } else if (sp->discard_rest) {
6790 if (send_lock_up == 0) {
6791 SCTP_TCB_SEND_LOCK(stcb);
6794 /* Whack down the size */
6795 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
6796 if ((stcb->sctp_socket != NULL) && \
6797 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6798 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
6799 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
6802 sctp_m_freem(sp->data);
6804 sp->tail_mbuf = NULL;
6814 some_taken = sp->some_taken;
6815 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6816 sp->msg_is_complete = 1;
6819 length = sp->length;
6820 if (sp->msg_is_complete) {
6821 /* The message is complete */
6822 to_move = min(length, frag_point);
6823 if (to_move == length) {
6824 /* All of it fits in the MTU */
6825 if (sp->some_taken) {
6826 rcv_flags |= SCTP_DATA_LAST_FRAG;
6827 sp->put_last_out = 1;
6829 rcv_flags |= SCTP_DATA_NOT_FRAG;
6830 sp->put_last_out = 1;
6833 /* Not all of it fits, we fragment */
6834 if (sp->some_taken == 0) {
6835 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6840 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
6843 * We use a snapshot of length in case it
6844 * is expanding during the compare.
6849 if (to_move >= llen) {
6851 if (send_lock_up == 0) {
6853 * We are taking all of an incomplete msg
6854 * thus we need a send lock.
6856 SCTP_TCB_SEND_LOCK(stcb);
6858 if (sp->msg_is_complete) {
6860 * the sender finished the
6867 if (sp->some_taken == 0) {
6868 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6872 /* Nothing to take. */
6873 if (sp->some_taken) {
6882 /* If we reach here, we can copy out a chunk */
6883 sctp_alloc_a_chunk(stcb, chk);
6885 /* No chunk memory */
6891 * Setup for unordered if needed by looking at the user sent info
6894 if (sp->sinfo_flags & SCTP_UNORDERED) {
6895 rcv_flags |= SCTP_DATA_UNORDERED;
6897 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
6898 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
6899 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
6901 /* clear out the chunk before setting up */
6902 memset(chk, 0, sizeof(*chk));
6903 chk->rec.data.rcv_flags = rcv_flags;
6905 if (to_move >= length) {
6906 /* we think we can steal the whole thing */
6907 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
6908 SCTP_TCB_SEND_LOCK(stcb);
6911 if (to_move < sp->length) {
6912 /* bail, it changed */
6915 chk->data = sp->data;
6916 chk->last_mbuf = sp->tail_mbuf;
6917 /* register the stealing */
6918 sp->data = sp->tail_mbuf = NULL;
6923 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
6924 chk->last_mbuf = NULL;
6925 if (chk->data == NULL) {
6926 sp->some_taken = some_taken;
6927 sctp_free_a_chunk(stcb, chk);
6932 #ifdef SCTP_MBUF_LOGGING
6933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6938 if (SCTP_BUF_IS_EXTENDED(mat)) {
6939 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6941 mat = SCTP_BUF_NEXT(mat);
6945 /* Pull off the data */
6946 m_adj(sp->data, to_move);
6947 /* Now lets work our way down and compact it */
6949 while (m && (SCTP_BUF_LEN(m) == 0)) {
6950 sp->data = SCTP_BUF_NEXT(m);
6951 SCTP_BUF_NEXT(m) = NULL;
6952 if (sp->tail_mbuf == m) {
6954 * Freeing tail? TSNH since
6955 * we supposedly were taking less
6956 * than the sp->length.
6959 panic("Huh, freing tail? - TSNH");
6961 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
6962 sp->tail_mbuf = sp->data = NULL;
6971 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
6972 chk->copy_by_ref = 1;
6974 chk->copy_by_ref = 0;
6977 * get last_mbuf and counts of mb useage This is ugly but hopefully
6978 * its only one mbuf.
6980 if (chk->last_mbuf == NULL) {
6981 chk->last_mbuf = chk->data;
6982 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
6983 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
6986 if (to_move > length) {
6987 /*- This should not happen either
6988 * since we always lower to_move to the size
6989 * of sp->length if its larger.
6992 panic("Huh, how can to_move be larger?");
6994 SCTP_PRINTF("Huh, how can to_move be larger?\n");
6998 atomic_subtract_int(&sp->length, to_move);
7000 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7001 /* Not enough room for a chunk header, get some */
7004 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7007 * we're in trouble here. _PREPEND below will free
7008 * all the data if there is no leading space, so we
7009 * must put the data back and restore.
7011 if (send_lock_up == 0) {
7012 SCTP_TCB_SEND_LOCK(stcb);
7015 if (chk->data == NULL) {
7016 /* unsteal the data */
7017 sp->data = chk->data;
7018 sp->tail_mbuf = chk->last_mbuf;
7022 /* reassemble the data */
7024 sp->data = chk->data;
7025 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7027 sp->some_taken = some_taken;
7028 atomic_add_int(&sp->length, to_move);
7031 sctp_free_a_chunk(stcb, chk);
7035 SCTP_BUF_LEN(m) = 0;
7036 SCTP_BUF_NEXT(m) = chk->data;
7038 M_ALIGN(chk->data, 4);
7041 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7042 if (chk->data == NULL) {
7043 /* HELP, TSNH since we assured it would not above? */
7045 panic("prepend failes HELP?");
7047 SCTP_PRINTF("prepend fails HELP?\n");
7048 sctp_free_a_chunk(stcb, chk);
7054 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7055 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7056 chk->book_size_scale = 0;
7057 chk->sent = SCTP_DATAGRAM_UNSENT;
7060 chk->asoc = &stcb->asoc;
7061 chk->pad_inplace = 0;
7062 chk->no_fr_allowed = 0;
7063 chk->rec.data.stream_seq = sp->strseq;
7064 chk->rec.data.stream_number = sp->stream;
7065 chk->rec.data.payloadtype = sp->ppid;
7066 chk->rec.data.context = sp->context;
7067 chk->rec.data.doing_fast_retransmit = 0;
7068 chk->rec.data.ect_nonce = 0; /* ECN Nonce */
7070 chk->rec.data.timetodrop = sp->ts;
7071 chk->flags = sp->act_flags;
7072 chk->addr_over = sp->addr_over;
7075 atomic_add_int(&chk->whoTo->ref_count, 1);
7077 if (sp->holds_key_ref) {
7078 chk->auth_keyid = sp->auth_keyid;
7079 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7080 chk->holds_key_ref = 1;
7082 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7084 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7085 (uintptr_t) stcb, sp->length,
7086 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7087 chk->rec.data.TSN_seq);
7089 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7091 * Put the rest of the things in place now. Size was done earlier in
7092 * previous loop prior to padding.
7095 #ifdef SCTP_ASOCLOG_OF_TSNS
7096 SCTP_TCB_LOCK_ASSERT(stcb);
7097 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7098 asoc->tsn_out_at = 0;
7099 asoc->tsn_out_wrapped = 1;
7101 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7102 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7103 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7104 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7105 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7106 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7107 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7108 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7112 dchkh->ch.chunk_type = SCTP_DATA;
7113 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7114 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7115 dchkh->dp.stream_id = htons(strq->stream_no);
7116 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7117 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7118 dchkh->ch.chunk_length = htons(chk->send_size);
7119 /* Now advance the chk->send_size by the actual pad needed. */
7120 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7125 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7126 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7127 chk->pad_inplace = 1;
7129 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7130 /* pad added an mbuf */
7131 chk->last_mbuf = lm;
7133 chk->send_size += pads;
7135 /* We only re-set the policy if it is on */
7136 if (sp->pr_sctp_on) {
7137 sctp_set_prsctp_policy(sp);
7138 asoc->pr_sctp_cnt++;
7139 chk->pr_sctp_on = 1;
7141 chk->pr_sctp_on = 0;
7143 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7144 /* All done pull and kill the message */
7145 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7146 if (sp->put_last_out == 0) {
7147 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7148 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7149 sp->sender_all_done,
7151 sp->msg_is_complete,
7155 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7156 SCTP_TCB_SEND_LOCK(stcb);
7159 TAILQ_REMOVE(&strq->outqueue, sp, next);
7160 sctp_free_remote_addr(sp->net);
7162 sctp_m_freem(sp->data);
7165 sctp_free_a_strmoq(stcb, sp);
7167 /* we can't be locked to it */
7169 stcb->asoc.locked_on_sending = NULL;
7171 /* more to go, we are locked */
7174 asoc->chunks_on_out_queue++;
7175 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7176 asoc->send_queue_cnt++;
7179 SCTP_TCB_SEND_UNLOCK(stcb);
7186 static struct sctp_stream_out *
7187 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
7189 struct sctp_stream_out *strq;
7191 /* Find the next stream to use */
7192 if (asoc->last_out_stream == NULL) {
7193 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
7194 if (asoc->last_out_stream == NULL) {
7195 /* huh nothing on the wheel, TSNH */
7200 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
7203 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
7205 /* Save off the last stream */
7206 asoc->last_out_stream = strq;
7213 sctp_fill_outqueue(struct sctp_tcb *stcb,
7214 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now)
7216 struct sctp_association *asoc;
7217 struct sctp_stream_out *strq, *strqn, *strqt;
7218 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7220 struct sctp_stream_queue_pending *sp;
7222 SCTP_TCB_LOCK_ASSERT(stcb);
7225 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
7226 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7228 /* ?? not sure what else to do */
7229 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7232 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7234 /* Need an allowance for the data chunk header too */
7235 goal_mtu -= sizeof(struct sctp_data_chunk);
7237 /* must make even word boundary */
7238 goal_mtu &= 0xfffffffc;
7239 if (asoc->locked_on_sending) {
7240 /* We are stuck on one stream until the message completes. */
7241 strqn = strq = asoc->locked_on_sending;
7244 strqn = strq = sctp_select_a_stream(stcb, asoc);
7248 while ((goal_mtu > 0) && strq) {
7249 sp = TAILQ_FIRST(&strq->outqueue);
7251 * If CMT is off, we must validate that the stream in
7252 * question has the first item pointed towards are network
7253 * destionation requested by the caller. Note that if we
7254 * turn out to be locked to a stream (assigning TSN's then
7255 * we must stop, since we cannot look for another stream
7256 * with data to send to that destination). In CMT's case, by
7257 * skipping this check, we will send one data packet towards
7258 * the requested net.
7263 if ((sp->net != net) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
7264 /* none for this network */
7268 strq = sctp_select_a_stream(stcb, asoc);
7272 if (strqn == strq) {
7273 /* I have circled */
7281 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked,
7282 &giveup, eeor_mode, &bail);
7283 asoc->last_out_stream = strq;
7285 asoc->locked_on_sending = strq;
7286 if ((moved_how_much == 0) || (giveup) || bail)
7287 /* no more to move for now */
7290 asoc->locked_on_sending = NULL;
7291 strqt = sctp_select_a_stream(stcb, asoc);
7292 if (TAILQ_FIRST(&strq->outqueue) == NULL) {
7293 if (strq == strqn) {
7294 /* Must move start to next one */
7295 strqn = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
7296 if (strqn == NULL) {
7297 strqn = TAILQ_FIRST(&asoc->out_wheel);
7298 if (strqn == NULL) {
7303 sctp_remove_from_wheel(stcb, asoc, strq, 0);
7305 if ((giveup) || bail) {
7313 total_moved += moved_how_much;
7314 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7315 goal_mtu &= 0xfffffffc;
7320 if (total_moved == 0) {
7321 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) &&
7322 (net == stcb->asoc.primary_destination)) {
7323 /* ran dry for primary network net */
7324 SCTP_STAT_INCR(sctps_primary_randry);
7325 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7326 /* ran dry with CMT on */
7327 SCTP_STAT_INCR(sctps_cmt_randry);
7333 sctp_fix_ecn_echo(struct sctp_association *asoc)
7335 struct sctp_tmit_chunk *chk;
7337 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7338 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7339 chk->sent = SCTP_DATAGRAM_UNSENT;
7345 sctp_move_to_an_alt(struct sctp_tcb *stcb,
7346 struct sctp_association *asoc,
7347 struct sctp_nets *net)
7349 struct sctp_tmit_chunk *chk;
7350 struct sctp_nets *a_net;
7352 SCTP_TCB_LOCK_ASSERT(stcb);
7354 * JRS 5/14/07 - If CMT PF is turned on, find an alternate
7355 * destination using the PF algorithm for finding alternate
7358 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
7359 a_net = sctp_find_alternate_net(stcb, net, 2);
7361 a_net = sctp_find_alternate_net(stcb, net, 0);
7363 if ((a_net != net) &&
7364 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
7366 * We only proceed if a valid alternate is found that is not
7367 * this one and is reachable. Here we must move all chunks
7368 * queued in the send queue off of the destination address
7371 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7372 if (chk->whoTo == net) {
7373 /* Move the chunk to our alternate */
7374 sctp_free_remote_addr(chk->whoTo);
7376 atomic_add_int(&a_net->ref_count, 1);
7383 sctp_med_chunk_output(struct sctp_inpcb *inp,
7384 struct sctp_tcb *stcb,
7385 struct sctp_association *asoc,
7388 int control_only, int from_where,
7389 struct timeval *now, int *now_filled, int frag_point, int so_locked
7390 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7396 * Ok this is the generic chunk service queue. we must do the
7397 * following: - Service the stream queue that is next, moving any
7398 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7399 * LAST to the out queue in one pass) and assigning TSN's - Check to
7400 * see if the cwnd/rwnd allows any output, if so we go ahead and
7401 * fomulate and send the low level chunks. Making sure to combine
7402 * any control in the control chunk queue also.
7404 struct sctp_nets *net, *start_at, *old_start_at = NULL;
7405 struct mbuf *outchain, *endoutchain;
7406 struct sctp_tmit_chunk *chk, *nchk;
7408 /* temp arrays for unlinking */
7409 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7410 int no_fragmentflg, error;
7411 unsigned int max_rwnd_per_dest;
7412 int one_chunk, hbflag, skip_data_for_this_net;
7413 int asconf, cookie, no_out_cnt;
7414 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7415 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7417 uint32_t auth_offset = 0;
7418 struct sctp_auth_chunk *auth = NULL;
7419 uint16_t auth_keyid;
7420 int override_ok = 1;
7421 int data_auth_reqd = 0;
7424 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7431 auth_keyid = stcb->asoc.authinfo.active_keyid;
7433 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7434 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7435 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7440 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7442 * First lets prime the pump. For each destination, if there is room
7443 * in the flight size, attempt to pull an MTU's worth out of the
7444 * stream queues into the general send_queue
7446 #ifdef SCTP_AUDITING_ENABLED
7447 sctp_audit_log(0xC2, 2);
7449 SCTP_TCB_LOCK_ASSERT(stcb);
7451 if ((control_only) || (asoc->stream_reset_outstanding))
7456 /* Nothing to possible to send? */
7457 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7458 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7459 TAILQ_EMPTY(&asoc->send_queue) &&
7460 TAILQ_EMPTY(&asoc->out_wheel)) {
7464 if (asoc->peers_rwnd == 0) {
7465 /* No room in peers rwnd */
7467 if (asoc->total_flight > 0) {
7468 /* we are allowed one chunk in flight */
7472 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7473 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
7474 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7476 * This for loop we are in takes in each net, if
7477 * its's got space in cwnd and has data sent to it
7478 * (when CMT is off) then it calls
7479 * sctp_fill_outqueue for the net. This gets data on
7480 * the send queue for that network.
7482 * In sctp_fill_outqueue TSN's are assigned and data is
7483 * copied out of the stream buffers. Note mostly
7484 * copy by reference (we hope).
7486 net->window_probe = 0;
7487 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) || (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
7488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7489 sctp_log_cwnd(stcb, net, 1,
7490 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7494 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) && (net->ref_count < 2)) {
7495 /* nothing can be in queue for this guy */
7496 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7497 sctp_log_cwnd(stcb, net, 2,
7498 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7502 if (net->flight_size >= net->cwnd) {
7503 /* skip this network, no room - can't fill */
7504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7505 sctp_log_cwnd(stcb, net, 3,
7506 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7510 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7511 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7513 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
7515 /* memory alloc failure */
7521 /* now service each destination and send out what we can for it */
7522 /* Nothing to send? */
7523 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
7524 (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) &&
7525 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
7529 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
7530 /* get the last start point */
7531 start_at = asoc->last_net_cmt_send_started;
7532 if (start_at == NULL) {
7533 /* null so to beginning */
7534 start_at = TAILQ_FIRST(&asoc->nets);
7536 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7537 if (start_at == NULL) {
7538 start_at = TAILQ_FIRST(&asoc->nets);
7541 asoc->last_net_cmt_send_started = start_at;
7543 start_at = TAILQ_FIRST(&asoc->nets);
7545 old_start_at = NULL;
7546 again_one_more_time:
7547 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7548 /* how much can we send? */
7549 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7550 if (old_start_at && (old_start_at == net)) {
7551 /* through list ocmpletely. */
7555 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) && (net->ref_count < 2)) {
7557 * Ref-count of 1 so we cannot have data or control
7558 * queued to this address. Skip it (non-CMT).
7562 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
7563 (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) &&
7564 (net->flight_size >= net->cwnd)) {
7566 * Nothing on control or asconf and flight is full,
7567 * we can skip even in the CMT case.
7571 ctl_cnt = bundle_at = 0;
7572 endoutchain = outchain = NULL;
7575 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7576 skip_data_for_this_net = 1;
7578 skip_data_for_this_net = 0;
7580 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7582 * if we have a route and an ifp check to see if we
7583 * have room to send to this guy
7587 ifp = net->ro.ro_rt->rt_ifp;
7588 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7589 SCTP_STAT_INCR(sctps_ifnomemqueued);
7590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7591 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7596 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7598 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7602 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7612 if (mtu > asoc->peers_rwnd) {
7613 if (asoc->total_flight > 0) {
7614 /* We have a packet in flight somewhere */
7615 r_mtu = asoc->peers_rwnd;
7617 /* We are always allowed to send one MTU out */
7624 /************************/
7625 /* ASCONF transmission */
7626 /************************/
7627 /* Now first lets go through the asconf queue */
7628 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue);
7630 nchk = TAILQ_NEXT(chk, sctp_next);
7631 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7634 if (chk->whoTo != net) {
7636 * No, not sent to the network we are
7641 if (chk->data == NULL) {
7644 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7645 chk->sent != SCTP_DATAGRAM_RESEND) {
7649 * if no AUTH is yet included and this chunk
7650 * requires it, make sure to account for it. We
7651 * don't apply the size until the AUTH chunk is
7652 * actually added below in case there is no room for
7653 * this chunk. NOTE: we overload the use of "omtu"
7656 if ((auth == NULL) &&
7657 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7658 stcb->asoc.peer_auth_chunks)) {
7659 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7662 /* Here we do NOT factor the r_mtu */
7663 if ((chk->send_size < (int)(mtu - omtu)) ||
7664 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7666 * We probably should glom the mbuf chain
7667 * from the chk->data for control but the
7668 * problem is it becomes yet one more level
7669 * of tracking to do if for some reason
7670 * output fails. Then I have got to
7671 * reconstruct the merged control chain.. el
7672 * yucko.. for now we take the easy way and
7676 * Add an AUTH chunk, if chunk requires it
7677 * save the offset into the chain for AUTH
7679 if ((auth == NULL) &&
7680 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7681 stcb->asoc.peer_auth_chunks))) {
7682 outchain = sctp_add_auth_chunk(outchain,
7687 chk->rec.chunk_id.id);
7688 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7690 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7691 (int)chk->rec.chunk_id.can_take_data,
7692 chk->send_size, chk->copy_by_ref);
7693 if (outchain == NULL) {
7695 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7698 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7699 /* update our MTU size */
7700 if (mtu > (chk->send_size + omtu))
7701 mtu -= (chk->send_size + omtu);
7704 to_out += (chk->send_size + omtu);
7705 /* Do clear IP_DF ? */
7706 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7709 if (chk->rec.chunk_id.can_take_data)
7712 * set hb flag since we can use these for
7718 * should sysctl this: don't bundle data
7719 * with ASCONF since it requires AUTH
7722 chk->sent = SCTP_DATAGRAM_SENT;
7726 * Ok we are out of room but we can
7727 * output without effecting the
7728 * flight size since this little guy
7729 * is a control only packet.
7731 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7733 * do NOT clear the asconf flag as
7734 * it is used to do appropriate
7735 * source address selection.
7737 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7738 (struct sockaddr *)&net->ro._l_addr,
7739 outchain, auth_offset, auth,
7740 stcb->asoc.authinfo.active_keyid,
7741 no_fragmentflg, 0, NULL, asconf,
7742 inp->sctp_lport, stcb->rport,
7743 htonl(stcb->asoc.peer_vtag),
7744 net->port, so_locked, NULL))) {
7745 if (error == ENOBUFS) {
7746 asoc->ifp_had_enobuf = 1;
7747 SCTP_STAT_INCR(sctps_lowlevelerr);
7749 if (from_where == 0) {
7750 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7752 if (*now_filled == 0) {
7753 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7755 *now = net->last_sent_time;
7757 net->last_sent_time = *now;
7760 /* error, could not output */
7761 if (error == EHOSTUNREACH) {
7767 sctp_move_to_an_alt(stcb, asoc, net);
7772 asoc->ifp_had_enobuf = 0;
7773 if (*now_filled == 0) {
7774 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7776 *now = net->last_sent_time;
7778 net->last_sent_time = *now;
7782 * increase the number we sent, if a
7783 * cookie is sent we don't tell them
7786 outchain = endoutchain = NULL;
7790 *num_out += ctl_cnt;
7791 /* recalc a clean slate and setup */
7792 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7793 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7795 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
7802 /************************/
7803 /* Control transmission */
7804 /************************/
7805 /* Now first lets go through the control queue */
7806 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
7808 nchk = TAILQ_NEXT(chk, sctp_next);
7809 if (chk->whoTo != net) {
7811 * No, not sent to the network we are
7816 if (chk->data == NULL) {
7819 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
7821 * It must be unsent. Cookies and ASCONF's
7822 * hang around but there timers will force
7823 * when marked for resend.
7828 * if no AUTH is yet included and this chunk
7829 * requires it, make sure to account for it. We
7830 * don't apply the size until the AUTH chunk is
7831 * actually added below in case there is no room for
7832 * this chunk. NOTE: we overload the use of "omtu"
7835 if ((auth == NULL) &&
7836 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7837 stcb->asoc.peer_auth_chunks)) {
7838 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7841 /* Here we do NOT factor the r_mtu */
7842 if ((chk->send_size < (int)(mtu - omtu)) ||
7843 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7845 * We probably should glom the mbuf chain
7846 * from the chk->data for control but the
7847 * problem is it becomes yet one more level
7848 * of tracking to do if for some reason
7849 * output fails. Then I have got to
7850 * reconstruct the merged control chain.. el
7851 * yucko.. for now we take the easy way and
7855 * Add an AUTH chunk, if chunk requires it
7856 * save the offset into the chain for AUTH
7858 if ((auth == NULL) &&
7859 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7860 stcb->asoc.peer_auth_chunks))) {
7861 outchain = sctp_add_auth_chunk(outchain,
7866 chk->rec.chunk_id.id);
7867 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7869 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7870 (int)chk->rec.chunk_id.can_take_data,
7871 chk->send_size, chk->copy_by_ref);
7872 if (outchain == NULL) {
7874 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7877 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7878 /* update our MTU size */
7879 if (mtu > (chk->send_size + omtu))
7880 mtu -= (chk->send_size + omtu);
7883 to_out += (chk->send_size + omtu);
7884 /* Do clear IP_DF ? */
7885 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7888 if (chk->rec.chunk_id.can_take_data)
7890 /* Mark things to be removed, if needed */
7891 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7892 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7893 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7894 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7895 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7896 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7897 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7898 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7899 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7900 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7901 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7903 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
7906 * JRS 5/14/07 - Set the
7907 * flag to say a heartbeat
7912 /* remove these chunks at the end */
7913 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
7914 /* turn off the timer */
7915 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
7916 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7917 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
7921 * EY -Nr-sack version of the above
7924 if ((SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) &&
7925 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { /* EY !?! */
7926 /* turn off the timer */
7927 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
7928 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7929 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
7935 * Other chunks, since they have
7936 * timers running (i.e. COOKIE) we
7937 * just "trust" that it gets sent or
7941 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7945 chk->sent = SCTP_DATAGRAM_SENT;
7950 * Ok we are out of room but we can
7951 * output without effecting the
7952 * flight size since this little guy
7953 * is a control only packet.
7956 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7958 * do NOT clear the asconf
7959 * flag as it is used to do
7960 * appropriate source
7961 * address selection.
7965 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
7968 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7969 (struct sockaddr *)&net->ro._l_addr,
7972 stcb->asoc.authinfo.active_keyid,
7973 no_fragmentflg, 0, NULL, asconf,
7974 inp->sctp_lport, stcb->rport,
7975 htonl(stcb->asoc.peer_vtag),
7976 net->port, so_locked, NULL))) {
7977 if (error == ENOBUFS) {
7978 asoc->ifp_had_enobuf = 1;
7979 SCTP_STAT_INCR(sctps_lowlevelerr);
7981 if (from_where == 0) {
7982 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7984 /* error, could not output */
7986 if (*now_filled == 0) {
7987 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7989 *now = net->last_sent_time;
7991 net->last_sent_time = *now;
7995 if (error == EHOSTUNREACH) {
8001 sctp_move_to_an_alt(stcb, asoc, net);
8006 asoc->ifp_had_enobuf = 0;
8007 /* Only HB or ASCONF advances time */
8009 if (*now_filled == 0) {
8010 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8012 *now = net->last_sent_time;
8014 net->last_sent_time = *now;
8019 * increase the number we sent, if a
8020 * cookie is sent we don't tell them
8023 outchain = endoutchain = NULL;
8027 *num_out += ctl_cnt;
8028 /* recalc a clean slate and setup */
8029 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8030 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8032 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
8039 /* JRI: if dest is in PF state, do not send data to it */
8040 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
8041 SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
8042 (net->dest_state & SCTP_ADDR_PF)) {
8045 if (net->flight_size >= net->cwnd) {
8048 if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
8049 (net->flight_size > max_rwnd_per_dest)) {
8052 /*********************/
8053 /* Data transmission */
8054 /*********************/
8056 * if AUTH for DATA is required and no AUTH has been added
8057 * yet, account for this in the mtu now... if no data can be
8058 * bundled, this adjustment won't matter anyways since the
8059 * packet will be going out...
8061 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8062 stcb->asoc.peer_auth_chunks);
8063 if (data_auth_reqd && (auth == NULL)) {
8064 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8066 /* now lets add any data within the MTU constraints */
8067 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8069 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8070 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8076 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8077 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8087 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8088 (skip_data_for_this_net == 0)) ||
8090 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
8091 if (no_data_chunks) {
8092 /* let only control go out */
8096 if (net->flight_size >= net->cwnd) {
8097 /* skip this net, no room for data */
8101 nchk = TAILQ_NEXT(chk, sctp_next);
8102 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
8103 if (chk->whoTo != net) {
8105 * For CMT, steal the data
8106 * to this network if its
8109 sctp_free_remote_addr(chk->whoTo);
8111 atomic_add_int(&chk->whoTo->ref_count, 1);
8113 } else if (chk->whoTo != net) {
8114 /* No, not sent to this net */
8117 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8119 * strange, we have a chunk that is
8120 * to big for its destination and
8121 * yet no fragment ok flag.
8122 * Something went wrong when the
8123 * PMTU changed...we did not mark
8124 * this chunk for some reason?? I
8125 * will fix it here by letting IP
8126 * fragment it for now and printing
8127 * a warning. This really should not
8130 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8131 chk->send_size, mtu);
8132 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8134 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8135 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8136 struct sctp_data_chunk *dchkh;
8138 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8139 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8141 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8142 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8143 /* ok we will add this one */
8146 * Add an AUTH chunk, if chunk
8147 * requires it, save the offset into
8148 * the chain for AUTH
8150 if (data_auth_reqd) {
8152 outchain = sctp_add_auth_chunk(outchain,
8158 auth_keyid = chk->auth_keyid;
8160 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8161 } else if (override_ok) {
8166 auth_keyid = chk->auth_keyid;
8168 } else if (auth_keyid != chk->auth_keyid) {
8176 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8177 chk->send_size, chk->copy_by_ref);
8178 if (outchain == NULL) {
8179 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8180 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8181 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8184 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8187 /* upate our MTU size */
8188 /* Do clear IP_DF ? */
8189 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8192 /* unsigned subtraction of mtu */
8193 if (mtu > chk->send_size)
8194 mtu -= chk->send_size;
8197 /* unsigned subtraction of r_mtu */
8198 if (r_mtu > chk->send_size)
8199 r_mtu -= chk->send_size;
8203 to_out += chk->send_size;
8204 if ((to_out > mx_mtu) && no_fragmentflg) {
8206 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8208 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8212 chk->window_probe = 0;
8213 data_list[bundle_at++] = chk;
8214 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8218 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8219 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8220 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8222 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8224 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8225 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8235 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8237 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8238 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8239 data_list[0]->window_probe = 1;
8240 net->window_probe = 1;
8246 * Must be sent in order of the
8247 * TSN's (on a network)
8251 } /* for (chunk gather loop for this net) */
8252 } /* if asoc.state OPEN */
8254 /* Is there something to send for this destination? */
8256 /* We may need to start a control timer or two */
8258 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8261 * do NOT clear the asconf flag as it is
8262 * used to do appropriate source address
8267 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8270 /* must start a send timer if data is being sent */
8271 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8273 * no timer running on this destination
8276 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8277 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
8278 SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
8280 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
8281 (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8283 * JRS 5/14/07 - If a HB has been sent to a
8284 * PF destination and no T3 timer is
8285 * currently running, start the T3 timer to
8286 * track the HBs that were sent.
8288 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8290 /* Now send it, if there is anything to send :> */
8291 if ((error = sctp_lowlevel_chunk_output(inp,
8294 (struct sockaddr *)&net->ro._l_addr,
8303 inp->sctp_lport, stcb->rport,
8304 htonl(stcb->asoc.peer_vtag),
8305 net->port, so_locked, NULL))) {
8306 /* error, we could not output */
8307 if (error == ENOBUFS) {
8308 SCTP_STAT_INCR(sctps_lowlevelerr);
8309 asoc->ifp_had_enobuf = 1;
8311 if (from_where == 0) {
8312 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8314 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8316 if (*now_filled == 0) {
8317 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8319 *now = net->last_sent_time;
8321 net->last_sent_time = *now;
8325 if (error == EHOSTUNREACH) {
8327 * Destination went unreachable
8330 sctp_move_to_an_alt(stcb, asoc, net);
8334 * I add this line to be paranoid. As far as
8335 * I can tell the continue, takes us back to
8336 * the top of the for, but just to make sure
8337 * I will reset these again here.
8339 ctl_cnt = bundle_at = 0;
8340 continue; /* This takes us back to the
8341 * for() for the nets. */
8343 asoc->ifp_had_enobuf = 0;
8345 outchain = endoutchain = NULL;
8348 if (bundle_at || hbflag) {
8349 /* For data/asconf and hb set time */
8350 if (*now_filled == 0) {
8351 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8353 *now = net->last_sent_time;
8355 net->last_sent_time = *now;
8359 *num_out += (ctl_cnt + bundle_at);
8362 /* setup for a RTO measurement */
8363 tsns_sent = data_list[0]->rec.data.TSN_seq;
8364 /* fill time if not already filled */
8365 if (*now_filled == 0) {
8366 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8368 *now = asoc->time_last_sent;
8370 asoc->time_last_sent = *now;
8372 data_list[0]->do_rtt = 1;
8373 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8374 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8375 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8376 if (net->flight_size < net->cwnd) {
8377 /* start or restart it */
8378 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8379 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8380 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
8382 SCTP_STAT_INCR(sctps_earlyfrstrout);
8383 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
8385 /* stop it if its running */
8386 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8387 SCTP_STAT_INCR(sctps_earlyfrstpout);
8388 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8389 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
8398 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8399 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8402 if (old_start_at == NULL) {
8403 old_start_at = start_at;
8404 start_at = TAILQ_FIRST(&asoc->nets);
8406 goto again_one_more_time;
8409 * At the end there should be no NON timed chunks hanging on this
8412 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8413 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8415 if ((*num_out == 0) && (*reason_code == 0)) {
8420 sctp_clean_up_ctl(stcb, asoc);
8425 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8428 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8429 * the control chunk queue.
8431 struct sctp_chunkhdr *hdr;
8432 struct sctp_tmit_chunk *chk;
8435 SCTP_TCB_LOCK_ASSERT(stcb);
8436 sctp_alloc_a_chunk(stcb, chk);
8439 sctp_m_freem(op_err);
8442 chk->copy_by_ref = 0;
8443 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8444 if (op_err == NULL) {
8445 sctp_free_a_chunk(stcb, chk);
8450 while (mat != NULL) {
8451 chk->send_size += SCTP_BUF_LEN(mat);
8452 mat = SCTP_BUF_NEXT(mat);
8454 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8455 chk->rec.chunk_id.can_take_data = 1;
8456 chk->sent = SCTP_DATAGRAM_UNSENT;
8459 chk->asoc = &stcb->asoc;
8461 chk->whoTo = chk->asoc->primary_destination;
8462 atomic_add_int(&chk->whoTo->ref_count, 1);
8463 hdr = mtod(op_err, struct sctp_chunkhdr *);
8464 hdr->chunk_type = SCTP_OPERATION_ERROR;
8465 hdr->chunk_flags = 0;
8466 hdr->chunk_length = htons(chk->send_size);
8467 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8470 chk->asoc->ctrl_queue_cnt++;
8474 sctp_send_cookie_echo(struct mbuf *m,
8476 struct sctp_tcb *stcb,
8477 struct sctp_nets *net)
8480 * pull out the cookie and put it at the front of the control chunk
8484 struct mbuf *cookie;
8485 struct sctp_paramhdr parm, *phdr;
8486 struct sctp_chunkhdr *hdr;
8487 struct sctp_tmit_chunk *chk;
8488 uint16_t ptype, plen;
8490 /* First find the cookie in the param area */
8492 at = offset + sizeof(struct sctp_init_chunk);
8494 SCTP_TCB_LOCK_ASSERT(stcb);
8496 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8500 ptype = ntohs(phdr->param_type);
8501 plen = ntohs(phdr->param_length);
8502 if (ptype == SCTP_STATE_COOKIE) {
8505 /* found the cookie */
8506 if ((pad = (plen % 4))) {
8509 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8510 if (cookie == NULL) {
8514 #ifdef SCTP_MBUF_LOGGING
8515 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8520 if (SCTP_BUF_IS_EXTENDED(mat)) {
8521 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8523 mat = SCTP_BUF_NEXT(mat);
8529 at += SCTP_SIZE32(plen);
8531 if (cookie == NULL) {
8532 /* Did not find the cookie */
8535 /* ok, we got the cookie lets change it into a cookie echo chunk */
8537 /* first the change from param to cookie */
8538 hdr = mtod(cookie, struct sctp_chunkhdr *);
8539 hdr->chunk_type = SCTP_COOKIE_ECHO;
8540 hdr->chunk_flags = 0;
8541 /* get the chunk stuff now and place it in the FRONT of the queue */
8542 sctp_alloc_a_chunk(stcb, chk);
8545 sctp_m_freem(cookie);
8548 chk->copy_by_ref = 0;
8549 chk->send_size = plen;
8550 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8551 chk->rec.chunk_id.can_take_data = 0;
8552 chk->sent = SCTP_DATAGRAM_UNSENT;
8554 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8555 chk->asoc = &stcb->asoc;
8557 chk->whoTo = chk->asoc->primary_destination;
8558 atomic_add_int(&chk->whoTo->ref_count, 1);
8559 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8560 chk->asoc->ctrl_queue_cnt++;
8565 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8569 struct sctp_nets *net)
8572 * take a HB request and make it into a HB ack and send it.
8574 struct mbuf *outchain;
8575 struct sctp_chunkhdr *chdr;
8576 struct sctp_tmit_chunk *chk;
8580 /* must have a net pointer */
8583 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8584 if (outchain == NULL) {
8585 /* gak out of memory */
8588 #ifdef SCTP_MBUF_LOGGING
8589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8594 if (SCTP_BUF_IS_EXTENDED(mat)) {
8595 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8597 mat = SCTP_BUF_NEXT(mat);
8601 chdr = mtod(outchain, struct sctp_chunkhdr *);
8602 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8603 chdr->chunk_flags = 0;
8604 if (chk_length % 4) {
8606 uint32_t cpthis = 0;
8609 padlen = 4 - (chk_length % 4);
8610 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8612 sctp_alloc_a_chunk(stcb, chk);
8615 sctp_m_freem(outchain);
8618 chk->copy_by_ref = 0;
8619 chk->send_size = chk_length;
8620 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8621 chk->rec.chunk_id.can_take_data = 1;
8622 chk->sent = SCTP_DATAGRAM_UNSENT;
8625 chk->asoc = &stcb->asoc;
8626 chk->data = outchain;
8628 atomic_add_int(&chk->whoTo->ref_count, 1);
8629 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8630 chk->asoc->ctrl_queue_cnt++;
8634 sctp_send_cookie_ack(struct sctp_tcb *stcb)
8636 /* formulate and queue a cookie-ack back to sender */
8637 struct mbuf *cookie_ack;
8638 struct sctp_chunkhdr *hdr;
8639 struct sctp_tmit_chunk *chk;
8642 SCTP_TCB_LOCK_ASSERT(stcb);
8644 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
8645 if (cookie_ack == NULL) {
8649 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
8650 sctp_alloc_a_chunk(stcb, chk);
8653 sctp_m_freem(cookie_ack);
8656 chk->copy_by_ref = 0;
8657 chk->send_size = sizeof(struct sctp_chunkhdr);
8658 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
8659 chk->rec.chunk_id.can_take_data = 1;
8660 chk->sent = SCTP_DATAGRAM_UNSENT;
8663 chk->asoc = &stcb->asoc;
8664 chk->data = cookie_ack;
8665 if (chk->asoc->last_control_chunk_from != NULL) {
8666 chk->whoTo = chk->asoc->last_control_chunk_from;
8668 chk->whoTo = chk->asoc->primary_destination;
8670 atomic_add_int(&chk->whoTo->ref_count, 1);
8671 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
8672 hdr->chunk_type = SCTP_COOKIE_ACK;
8673 hdr->chunk_flags = 0;
8674 hdr->chunk_length = htons(chk->send_size);
8675 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
8676 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8677 chk->asoc->ctrl_queue_cnt++;
8683 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
8685 /* formulate and queue a SHUTDOWN-ACK back to the sender */
8686 struct mbuf *m_shutdown_ack;
8687 struct sctp_shutdown_ack_chunk *ack_cp;
8688 struct sctp_tmit_chunk *chk;
8690 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8691 if (m_shutdown_ack == NULL) {
8695 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
8696 sctp_alloc_a_chunk(stcb, chk);
8699 sctp_m_freem(m_shutdown_ack);
8702 chk->copy_by_ref = 0;
8703 chk->send_size = sizeof(struct sctp_chunkhdr);
8704 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
8705 chk->rec.chunk_id.can_take_data = 1;
8706 chk->sent = SCTP_DATAGRAM_UNSENT;
8709 chk->asoc = &stcb->asoc;
8710 chk->data = m_shutdown_ack;
8712 atomic_add_int(&net->ref_count, 1);
8714 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
8715 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
8716 ack_cp->ch.chunk_flags = 0;
8717 ack_cp->ch.chunk_length = htons(chk->send_size);
8718 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
8719 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8720 chk->asoc->ctrl_queue_cnt++;
8725 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
8727 /* formulate and queue a SHUTDOWN to the sender */
8728 struct mbuf *m_shutdown;
8729 struct sctp_shutdown_chunk *shutdown_cp;
8730 struct sctp_tmit_chunk *chk;
8732 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8733 if (m_shutdown == NULL) {
8737 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
8738 sctp_alloc_a_chunk(stcb, chk);
8741 sctp_m_freem(m_shutdown);
8744 chk->copy_by_ref = 0;
8745 chk->send_size = sizeof(struct sctp_shutdown_chunk);
8746 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
8747 chk->rec.chunk_id.can_take_data = 1;
8748 chk->sent = SCTP_DATAGRAM_UNSENT;
8751 chk->asoc = &stcb->asoc;
8752 chk->data = m_shutdown;
8754 atomic_add_int(&net->ref_count, 1);
8756 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
8757 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
8758 shutdown_cp->ch.chunk_flags = 0;
8759 shutdown_cp->ch.chunk_length = htons(chk->send_size);
8760 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
8761 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
8762 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8763 chk->asoc->ctrl_queue_cnt++;
8768 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
8771 * formulate and queue an ASCONF to the peer. ASCONF parameters
8772 * should be queued on the assoc queue.
8774 struct sctp_tmit_chunk *chk;
8775 struct mbuf *m_asconf;
8778 SCTP_TCB_LOCK_ASSERT(stcb);
8780 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
8781 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
8782 /* can't send a new one if there is one in flight already */
8785 /* compose an ASCONF chunk, maximum length is PMTU */
8786 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
8787 if (m_asconf == NULL) {
8790 sctp_alloc_a_chunk(stcb, chk);
8793 sctp_m_freem(m_asconf);
8796 chk->copy_by_ref = 0;
8797 chk->data = m_asconf;
8798 chk->send_size = len;
8799 chk->rec.chunk_id.id = SCTP_ASCONF;
8800 chk->rec.chunk_id.can_take_data = 0;
8801 chk->sent = SCTP_DATAGRAM_UNSENT;
8803 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8804 chk->asoc = &stcb->asoc;
8806 atomic_add_int(&chk->whoTo->ref_count, 1);
8807 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
8808 chk->asoc->ctrl_queue_cnt++;
8813 sctp_send_asconf_ack(struct sctp_tcb *stcb)
8816 * formulate and queue a asconf-ack back to sender. the asconf-ack
8817 * must be stored in the tcb.
8819 struct sctp_tmit_chunk *chk;
8820 struct sctp_asconf_ack *ack, *latest_ack;
8821 struct mbuf *m_ack, *m;
8822 struct sctp_nets *net = NULL;
8824 SCTP_TCB_LOCK_ASSERT(stcb);
8825 /* Get the latest ASCONF-ACK */
8826 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
8827 if (latest_ack == NULL) {
8830 if (latest_ack->last_sent_to != NULL &&
8831 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
8832 /* we're doing a retransmission */
8833 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
8836 if (stcb->asoc.last_control_chunk_from == NULL)
8837 net = stcb->asoc.primary_destination;
8839 net = stcb->asoc.last_control_chunk_from;
8843 if (stcb->asoc.last_control_chunk_from == NULL)
8844 net = stcb->asoc.primary_destination;
8846 net = stcb->asoc.last_control_chunk_from;
8848 latest_ack->last_sent_to = net;
8850 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
8851 if (ack->data == NULL) {
8854 /* copy the asconf_ack */
8855 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
8856 if (m_ack == NULL) {
8857 /* couldn't copy it */
8860 #ifdef SCTP_MBUF_LOGGING
8861 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8866 if (SCTP_BUF_IS_EXTENDED(mat)) {
8867 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8869 mat = SCTP_BUF_NEXT(mat);
8874 sctp_alloc_a_chunk(stcb, chk);
8878 sctp_m_freem(m_ack);
8881 chk->copy_by_ref = 0;
8888 chk->send_size = ack->len;
8889 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
8890 chk->rec.chunk_id.can_take_data = 1;
8891 chk->sent = SCTP_DATAGRAM_UNSENT;
8893 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
8894 chk->asoc = &stcb->asoc;
8895 atomic_add_int(&chk->whoTo->ref_count, 1);
8897 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8898 chk->asoc->ctrl_queue_cnt++;
8905 sctp_chunk_retransmission(struct sctp_inpcb *inp,
8906 struct sctp_tcb *stcb,
8907 struct sctp_association *asoc,
8908 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
8909 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8915 * send out one MTU of retransmission. If fast_retransmit is
8916 * happening we ignore the cwnd. Otherwise we obey the cwnd and
8917 * rwnd. For a Cookie or Asconf in the control chunk queue we
8918 * retransmit them by themselves.
8920 * For data chunks we will pick out the lowest TSN's in the sent_queue
8921 * marked for resend and bundle them all together (up to a MTU of
8922 * destination). The address to send to should have been
8923 * selected/changed where the retransmission was marked (i.e. in FR
8924 * or t3-timeout routines).
8926 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8927 struct sctp_tmit_chunk *chk, *fwd;
8928 struct mbuf *m, *endofchain;
8929 struct sctp_nets *net = NULL;
8930 uint32_t tsns_sent = 0;
8931 int no_fragmentflg, bundle_at, cnt_thru;
8933 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
8934 struct sctp_auth_chunk *auth = NULL;
8935 uint32_t auth_offset = 0;
8936 uint16_t auth_keyid;
8937 int override_ok = 1;
8938 int data_auth_reqd = 0;
8941 SCTP_TCB_LOCK_ASSERT(stcb);
8942 tmr_started = ctl_cnt = bundle_at = error = 0;
8947 endofchain = m = NULL;
8948 auth_keyid = stcb->asoc.authinfo.active_keyid;
8949 #ifdef SCTP_AUDITING_ENABLED
8950 sctp_audit_log(0xC3, 1);
8952 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
8953 (TAILQ_EMPTY(&asoc->control_send_queue))) {
8954 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
8955 asoc->sent_queue_retran_cnt);
8956 asoc->sent_queue_cnt = 0;
8957 asoc->sent_queue_cnt_removeable = 0;
8958 /* send back 0/0 so we enter normal transmission */
8962 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8963 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
8964 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
8965 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
8966 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
8967 if (chk != asoc->str_reset) {
8969 * not eligible for retran if its
8976 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
8981 * Add an AUTH chunk, if chunk requires it save the
8982 * offset into the chain for AUTH
8984 if ((auth == NULL) &&
8985 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8986 stcb->asoc.peer_auth_chunks))) {
8987 m = sctp_add_auth_chunk(m, &endofchain,
8988 &auth, &auth_offset,
8990 chk->rec.chunk_id.id);
8991 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8993 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
8999 /* do we have control chunks to retransmit? */
9001 /* Start a timer no matter if we suceed or fail */
9002 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
9003 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9004 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9005 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9006 chk->snd_count++; /* update our count */
9007 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9008 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9009 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9010 no_fragmentflg, 0, NULL, 0,
9011 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9012 chk->whoTo->port, so_locked, NULL))) {
9013 SCTP_STAT_INCR(sctps_lowlevelerr);
9016 m = endofchain = NULL;
9020 * We don't want to mark the net->sent time here since this
9021 * we use this for HB and retrans cannot measure RTT
9023 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9025 chk->sent = SCTP_DATAGRAM_SENT;
9026 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9030 /* Clean up the fwd-tsn list */
9031 sctp_clean_up_ctl(stcb, asoc);
9036 * Ok, it is just data retransmission we need to do or that and a
9037 * fwd-tsn with it all.
9039 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9040 return (SCTP_RETRAN_DONE);
9042 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9043 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9044 /* not yet open, resend the cookie and that is it */
9047 #ifdef SCTP_AUDITING_ENABLED
9048 sctp_auditing(20, inp, stcb, NULL);
9050 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9051 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9052 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9053 /* No, not sent to this net or not ready for rtx */
9056 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9057 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9058 /* Gak, we have exceeded max unlucky retran, abort! */
9059 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9061 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9062 atomic_add_int(&stcb->asoc.refcnt, 1);
9063 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
9064 SCTP_TCB_LOCK(stcb);
9065 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9066 return (SCTP_RETRAN_EXIT);
9068 /* pick up the net */
9070 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9071 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
9073 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9076 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9077 /* No room in peers rwnd */
9080 tsn = asoc->last_acked_seq + 1;
9081 if (tsn == chk->rec.data.TSN_seq) {
9083 * we make a special exception for this
9084 * case. The peer has no rwnd but is missing
9085 * the lowest chunk.. which is probably what
9086 * is holding up the rwnd.
9088 goto one_chunk_around;
9093 if (asoc->peers_rwnd < mtu) {
9095 if ((asoc->peers_rwnd == 0) &&
9096 (asoc->total_flight == 0)) {
9097 chk->window_probe = 1;
9098 chk->whoTo->window_probe = 1;
9101 #ifdef SCTP_AUDITING_ENABLED
9102 sctp_audit_log(0xC3, 2);
9106 net->fast_retran_ip = 0;
9107 if (chk->rec.data.doing_fast_retransmit == 0) {
9109 * if no FR in progress skip destination that have
9110 * flight_size > cwnd.
9112 if (net->flight_size >= net->cwnd) {
9117 * Mark the destination net to have FR recovery
9121 net->fast_retran_ip = 1;
9125 * if no AUTH is yet included and this chunk requires it,
9126 * make sure to account for it. We don't apply the size
9127 * until the AUTH chunk is actually added below in case
9128 * there is no room for this chunk.
9130 if (data_auth_reqd && (auth == NULL)) {
9131 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9135 if ((chk->send_size <= (mtu - dmtu)) ||
9136 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9137 /* ok we will add this one */
9138 if (data_auth_reqd) {
9140 m = sctp_add_auth_chunk(m,
9146 auth_keyid = chk->auth_keyid;
9148 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9149 } else if (override_ok) {
9150 auth_keyid = chk->auth_keyid;
9152 } else if (chk->auth_keyid != auth_keyid) {
9153 /* different keyid, so done bundling */
9157 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9159 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9162 /* Do clear IP_DF ? */
9163 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9166 /* upate our MTU size */
9167 if (mtu > (chk->send_size + dmtu))
9168 mtu -= (chk->send_size + dmtu);
9171 data_list[bundle_at++] = chk;
9172 if (one_chunk && (asoc->total_flight <= 0)) {
9173 SCTP_STAT_INCR(sctps_windowprobed);
9176 if (one_chunk == 0) {
9178 * now are there anymore forward from chk to pick
9181 fwd = TAILQ_NEXT(chk, sctp_next);
9183 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9184 /* Nope, not for retran */
9185 fwd = TAILQ_NEXT(fwd, sctp_next);
9188 if (fwd->whoTo != net) {
9189 /* Nope, not the net in question */
9190 fwd = TAILQ_NEXT(fwd, sctp_next);
9193 if (data_auth_reqd && (auth == NULL)) {
9194 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9197 if (fwd->send_size <= (mtu - dmtu)) {
9198 if (data_auth_reqd) {
9200 m = sctp_add_auth_chunk(m,
9206 auth_keyid = fwd->auth_keyid;
9208 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9209 } else if (override_ok) {
9210 auth_keyid = fwd->auth_keyid;
9212 } else if (fwd->auth_keyid != auth_keyid) {
9220 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9222 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9225 /* Do clear IP_DF ? */
9226 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9229 /* upate our MTU size */
9230 if (mtu > (fwd->send_size + dmtu))
9231 mtu -= (fwd->send_size + dmtu);
9234 data_list[bundle_at++] = fwd;
9235 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9238 fwd = TAILQ_NEXT(fwd, sctp_next);
9240 /* can't fit so we are done */
9245 /* Is there something to send for this destination? */
9248 * No matter if we fail/or suceed we should start a
9249 * timer. A failure is like a lost IP packet :-)
9251 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9253 * no timer running on this destination
9256 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9259 /* Now lets send it, if there is anything to send :> */
9260 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9261 (struct sockaddr *)&net->ro._l_addr, m,
9262 auth_offset, auth, auth_keyid,
9263 no_fragmentflg, 0, NULL, 0,
9264 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9265 net->port, so_locked, NULL))) {
9266 /* error, we could not output */
9267 SCTP_STAT_INCR(sctps_lowlevelerr);
9270 m = endofchain = NULL;
9275 * We don't want to mark the net->sent time here
9276 * since this we use this for HB and retrans cannot
9279 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9281 /* For auto-close */
9283 if (*now_filled == 0) {
9284 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9285 *now = asoc->time_last_sent;
9288 asoc->time_last_sent = *now;
9290 *cnt_out += bundle_at;
9291 #ifdef SCTP_AUDITING_ENABLED
9292 sctp_audit_log(0xC4, bundle_at);
9295 tsns_sent = data_list[0]->rec.data.TSN_seq;
9297 for (i = 0; i < bundle_at; i++) {
9298 SCTP_STAT_INCR(sctps_sendretransdata);
9299 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9301 * When we have a revoked data, and we
9302 * retransmit it, then we clear the revoked
9303 * flag since this flag dictates if we
9304 * subtracted from the fs
9306 if (data_list[i]->rec.data.chunk_was_revoked) {
9307 /* Deflate the cwnd */
9308 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9309 data_list[i]->rec.data.chunk_was_revoked = 0;
9311 data_list[i]->snd_count++;
9312 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9313 /* record the time */
9314 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9315 if (data_list[i]->book_size_scale) {
9317 * need to double the book size on
9320 data_list[i]->book_size_scale = 0;
9322 * Since we double the booksize, we
9323 * must also double the output queue
9324 * size, since this get shrunk when
9325 * we free by this amount.
9327 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9328 data_list[i]->book_size *= 2;
9332 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9333 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9334 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9336 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9337 (uint32_t) (data_list[i]->send_size +
9338 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9341 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9342 data_list[i]->whoTo->flight_size,
9343 data_list[i]->book_size,
9344 (uintptr_t) data_list[i]->whoTo,
9345 data_list[i]->rec.data.TSN_seq);
9347 sctp_flight_size_increase(data_list[i]);
9348 sctp_total_flight_increase(stcb, data_list[i]);
9349 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9350 /* SWS sender side engages */
9351 asoc->peers_rwnd = 0;
9354 (data_list[i]->rec.data.doing_fast_retransmit)) {
9355 SCTP_STAT_INCR(sctps_sendfastretrans);
9356 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9357 (tmr_started == 0)) {
9359 * ok we just fast-retrans'd
9360 * the lowest TSN, i.e the
9361 * first on the list. In
9362 * this case we want to give
9363 * some more time to get a
9364 * SACK back without a
9367 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9368 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9369 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9373 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9374 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9376 #ifdef SCTP_AUDITING_ENABLED
9377 sctp_auditing(21, inp, stcb, NULL);
9383 if (asoc->sent_queue_retran_cnt <= 0) {
9384 /* all done we have no more to retran */
9385 asoc->sent_queue_retran_cnt = 0;
9389 /* No more room in rwnd */
9392 /* stop the for loop here. we sent out a packet */
9400 sctp_timer_validation(struct sctp_inpcb *inp,
9401 struct sctp_tcb *stcb,
9402 struct sctp_association *asoc,
9405 struct sctp_nets *net;
9407 /* Validate that a timer is running somewhere */
9408 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9409 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9410 /* Here is a timer */
9414 SCTP_TCB_LOCK_ASSERT(stcb);
9415 /* Gak, we did not have a timer somewhere */
9416 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9417 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9422 sctp_chunk_output(struct sctp_inpcb *inp,
9423 struct sctp_tcb *stcb,
9426 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9432 * Ok this is the generic chunk service queue. we must do the
9434 * - See if there are retransmits pending, if so we must
9436 * - Service the stream queue that is next, moving any
9437 * message (note I must get a complete message i.e.
9438 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9440 * - Check to see if the cwnd/rwnd allows any output, if so we
9441 * go ahead and fomulate and send the low level chunks. Making sure
9442 * to combine any control in the control chunk queue also.
9444 struct sctp_association *asoc;
9445 struct sctp_nets *net;
9446 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
9447 burst_cnt = 0, burst_limit = 0;
9451 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9453 int fr_done, tot_frs = 0;
9456 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9457 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9463 SCTP_TCB_LOCK_ASSERT(stcb);
9465 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9467 if ((un_sent <= 0) &&
9468 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9469 (asoc->sent_queue_retran_cnt == 0)) {
9470 /* Nothing to do unless there is something to be sent left */
9474 * Do we have something to send, data or control AND a sack timer
9475 * running, if so piggy-back the sack.
9477 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9479 * EY if nr_sacks used then send an nr-sack , a sack
9482 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
9483 sctp_send_nr_sack(stcb);
9485 sctp_send_sack(stcb);
9486 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9488 while (asoc->sent_queue_retran_cnt) {
9490 * Ok, it is retransmission time only, we send out only ONE
9491 * packet with a single call off to the retran code.
9493 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9495 * Special hook for handling cookiess discarded
9496 * by peer that carried data. Send cookie-ack only
9497 * and then the next call with get the retran's.
9499 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9501 &now, &now_filled, frag_point, so_locked);
9503 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9504 /* if its not from a HB then do it */
9506 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9512 * its from any other place, we don't allow retran
9513 * output (only control)
9518 /* Can't send anymore */
9520 * now lets push out control by calling med-level
9521 * output once. this assures that we WILL send HB's
9524 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9526 &now, &now_filled, frag_point, so_locked);
9527 #ifdef SCTP_AUDITING_ENABLED
9528 sctp_auditing(8, inp, stcb, NULL);
9530 (void)sctp_timer_validation(inp, stcb, asoc, ret);
9535 * The count was off.. retran is not happening so do
9536 * the normal retransmission.
9538 #ifdef SCTP_AUDITING_ENABLED
9539 sctp_auditing(9, inp, stcb, NULL);
9541 if (ret == SCTP_RETRAN_EXIT) {
9546 if (from_where == SCTP_OUTPUT_FROM_T3) {
9547 /* Only one transmission allowed out of a timeout */
9548 #ifdef SCTP_AUDITING_ENABLED
9549 sctp_auditing(10, inp, stcb, NULL);
9551 /* Push out any control */
9552 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9553 &now, &now_filled, frag_point, so_locked);
9556 if (tot_frs > asoc->max_burst) {
9557 /* Hit FR burst limit */
9560 if ((num_out == 0) && (ret == 0)) {
9562 /* No more retrans to send */
9566 #ifdef SCTP_AUDITING_ENABLED
9567 sctp_auditing(12, inp, stcb, NULL);
9569 /* Check for bad destinations, if they exist move chunks around. */
9570 burst_limit = asoc->max_burst;
9571 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9572 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
9573 SCTP_ADDR_NOT_REACHABLE) {
9575 * if possible move things off of this address we
9576 * still may send below due to the dormant state but
9577 * we try to find an alternate address to send to
9578 * and if we have one we move all queued data on the
9579 * out wheel to this alternate address.
9581 if (net->ref_count > 1)
9582 sctp_move_to_an_alt(stcb, asoc, net);
9583 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) &&
9584 SCTP_BASE_SYSCTL(sctp_cmt_pf) &&
9585 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
9587 * JRS 5/14/07 - If CMT PF is on and the current
9588 * destination is in PF state, move all queued data
9589 * to an alternate desination.
9591 if (net->ref_count > 1)
9592 sctp_move_to_an_alt(stcb, asoc, net);
9595 * if ((asoc->sat_network) || (net->addr_is_local))
9596 * { burst_limit = asoc->max_burst *
9597 * SCTP_SAT_NETWORK_BURST_INCR; }
9599 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9600 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
9602 * JRS - Use the congestion control
9603 * given in the congestion control
9606 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit);
9607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9608 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
9610 SCTP_STAT_INCR(sctps_maxburstqueued);
9612 net->fast_retran_ip = 0;
9614 if (net->flight_size == 0) {
9615 /* Should be decaying the cwnd here */
9624 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
9625 &reason_code, 0, from_where,
9626 &now, &now_filled, frag_point, so_locked);
9628 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
9629 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9630 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
9632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9633 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
9634 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
9638 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
9642 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9643 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
9645 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
9650 * When nagle is on, we look at how much is un_sent, then
9651 * if its smaller than an MTU and we have data in
9654 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9655 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
9656 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
9657 (stcb->asoc.total_flight > 0)) {
9661 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
9662 TAILQ_EMPTY(&asoc->send_queue) &&
9663 TAILQ_EMPTY(&asoc->out_wheel)) {
9664 /* Nothing left to send */
9667 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
9668 /* Nothing left to send */
9671 } while (num_out && (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
9672 (burst_cnt < burst_limit)));
9674 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
9675 if (burst_cnt >= burst_limit) {
9676 SCTP_STAT_INCR(sctps_maxburstqueued);
9677 asoc->burst_limit_applied = 1;
9678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9679 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
9682 asoc->burst_limit_applied = 0;
9685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9686 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
9688 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
9692 * Now we need to clean up the control chunk chain if a ECNE is on
9693 * it. It must be marked as UNSENT again so next call will continue
9694 * to send it until such time that we get a CWR, to remove it.
9696 if (stcb->asoc.ecn_echo_cnt_onq)
9697 sctp_fix_ecn_echo(asoc);
9703 sctp_output(inp, m, addr, control, p, flags)
9704 struct sctp_inpcb *inp;
9706 struct sockaddr *addr;
9707 struct mbuf *control;
9712 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9715 if (inp->sctp_socket == NULL) {
9716 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9719 return (sctp_sosend(inp->sctp_socket,
9729 send_forward_tsn(struct sctp_tcb *stcb,
9730 struct sctp_association *asoc)
9732 struct sctp_tmit_chunk *chk;
9733 struct sctp_forward_tsn_chunk *fwdtsn;
9734 uint32_t advance_peer_ack_point;
9736 SCTP_TCB_LOCK_ASSERT(stcb);
9737 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9738 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9739 /* mark it to unsent */
9740 chk->sent = SCTP_DATAGRAM_UNSENT;
9742 /* Do we correct its output location? */
9743 if (chk->whoTo != asoc->primary_destination) {
9744 sctp_free_remote_addr(chk->whoTo);
9745 chk->whoTo = asoc->primary_destination;
9746 atomic_add_int(&chk->whoTo->ref_count, 1);
9748 goto sctp_fill_in_rest;
9751 /* Ok if we reach here we must build one */
9752 sctp_alloc_a_chunk(stcb, chk);
9756 chk->copy_by_ref = 0;
9757 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
9758 chk->rec.chunk_id.can_take_data = 0;
9762 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
9763 if (chk->data == NULL) {
9764 sctp_free_a_chunk(stcb, chk);
9767 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9768 chk->sent = SCTP_DATAGRAM_UNSENT;
9770 chk->whoTo = asoc->primary_destination;
9771 atomic_add_int(&chk->whoTo->ref_count, 1);
9772 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
9773 asoc->ctrl_queue_cnt++;
9776 * Here we go through and fill out the part that deals with
9777 * stream/seq of the ones we skip.
9779 SCTP_BUF_LEN(chk->data) = 0;
9781 struct sctp_tmit_chunk *at, *tp1, *last;
9782 struct sctp_strseq *strseq;
9783 unsigned int cnt_of_space, i, ovh;
9784 unsigned int space_needed;
9785 unsigned int cnt_of_skipped = 0;
9787 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
9788 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
9789 /* no more to look at */
9792 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9793 /* We don't report these */
9798 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
9799 (cnt_of_skipped * sizeof(struct sctp_strseq)));
9801 cnt_of_space = M_TRAILINGSPACE(chk->data);
9803 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9804 ovh = SCTP_MIN_OVERHEAD;
9806 ovh = SCTP_MIN_V4_OVERHEAD;
9808 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
9809 /* trim to a mtu size */
9810 cnt_of_space = asoc->smallest_mtu - ovh;
9812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9813 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9814 0xff, 0, cnt_of_skipped,
9815 asoc->advanced_peer_ack_point);
9818 advance_peer_ack_point = asoc->advanced_peer_ack_point;
9819 if (cnt_of_space < space_needed) {
9821 * ok we must trim down the chunk by lowering the
9822 * advance peer ack point.
9824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9825 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9826 0xff, 0xff, cnt_of_space,
9829 cnt_of_skipped = (cnt_of_space -
9830 ((sizeof(struct sctp_forward_tsn_chunk)) /
9831 sizeof(struct sctp_strseq)));
9833 * Go through and find the TSN that will be the one
9836 at = TAILQ_FIRST(&asoc->sent_queue);
9837 for (i = 0; i < cnt_of_skipped; i++) {
9838 tp1 = TAILQ_NEXT(at, sctp_next);
9841 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9842 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9843 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
9844 asoc->advanced_peer_ack_point);
9848 * last now points to last one I can report, update
9851 advance_peer_ack_point = last->rec.data.TSN_seq;
9852 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
9854 chk->send_size = space_needed;
9855 /* Setup the chunk */
9856 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
9857 fwdtsn->ch.chunk_length = htons(chk->send_size);
9858 fwdtsn->ch.chunk_flags = 0;
9859 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
9860 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
9861 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
9862 (cnt_of_skipped * sizeof(struct sctp_strseq)));
9863 SCTP_BUF_LEN(chk->data) = chk->send_size;
9866 * Move pointer to after the fwdtsn and transfer to the
9869 strseq = (struct sctp_strseq *)fwdtsn;
9871 * Now populate the strseq list. This is done blindly
9872 * without pulling out duplicate stream info. This is
9873 * inefficent but won't harm the process since the peer will
9874 * look at these in sequence and will thus release anything.
9875 * It could mean we exceed the PMTU and chop off some that
9876 * we could have included.. but this is unlikely (aka 1432/4
9877 * would mean 300+ stream seq's would have to be reported in
9878 * one FWD-TSN. With a bit of work we can later FIX this to
9879 * optimize and pull out duplcates.. but it does add more
9880 * overhead. So for now... not!
9882 at = TAILQ_FIRST(&asoc->sent_queue);
9883 for (i = 0; i < cnt_of_skipped; i++) {
9884 tp1 = TAILQ_NEXT(at, sctp_next);
9885 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9886 /* We don't report these */
9891 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
9892 at->rec.data.fwd_tsn_cnt = 0;
9894 strseq->stream = ntohs(at->rec.data.stream_number);
9895 strseq->sequence = ntohs(at->rec.data.stream_seq);
9905 sctp_send_sack(struct sctp_tcb *stcb)
9908 * Queue up a SACK in the control queue. We must first check to see
9909 * if a SACK is somehow on the control queue. If so, we will take
9910 * and and remove the old one.
9912 struct sctp_association *asoc;
9913 struct sctp_tmit_chunk *chk, *a_chk;
9914 struct sctp_sack_chunk *sack;
9915 struct sctp_gap_ack_block *gap_descriptor;
9916 struct sack_track *selector;
9921 int limit_reached = 0;
9922 unsigned int i, jstart, siz, j;
9923 unsigned int num_gap_blocks = 0, space;
9929 SCTP_TCB_LOCK_ASSERT(stcb);
9930 if (asoc->last_data_chunk_from == NULL) {
9931 /* Hmm we never received anything */
9934 sctp_set_rwnd(stcb, asoc);
9935 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9936 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
9937 /* Hmm, found a sack already on queue, remove it */
9938 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
9939 asoc->ctrl_queue_cnt++;
9942 sctp_m_freem(a_chk->data);
9945 sctp_free_remote_addr(a_chk->whoTo);
9946 a_chk->whoTo = NULL;
9950 if (a_chk == NULL) {
9951 sctp_alloc_a_chunk(stcb, a_chk);
9952 if (a_chk == NULL) {
9953 /* No memory so we drop the idea, and set a timer */
9954 if (stcb->asoc.delayed_ack) {
9955 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9956 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
9957 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
9958 stcb->sctp_ep, stcb, NULL);
9960 stcb->asoc.send_sack = 1;
9964 a_chk->copy_by_ref = 0;
9965 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
9966 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK;
9967 a_chk->rec.chunk_id.can_take_data = 1;
9969 /* Clear our pkt counts */
9970 asoc->data_pkts_seen = 0;
9973 a_chk->snd_count = 0;
9974 a_chk->send_size = 0; /* fill in later */
9975 a_chk->sent = SCTP_DATAGRAM_UNSENT;
9976 a_chk->whoTo = NULL;
9978 if ((asoc->numduptsns) ||
9979 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
9982 * Ok, we have some duplicates or the destination for the
9983 * sack is unreachable, lets see if we can select an
9984 * alternate than asoc->last_data_chunk_from
9986 if ((!(asoc->last_data_chunk_from->dest_state &
9987 SCTP_ADDR_NOT_REACHABLE)) &&
9988 (asoc->used_alt_onsack > asoc->numnets)) {
9989 /* We used an alt last time, don't this time */
9990 a_chk->whoTo = NULL;
9992 asoc->used_alt_onsack++;
9993 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
9995 if (a_chk->whoTo == NULL) {
9996 /* Nope, no alternate */
9997 a_chk->whoTo = asoc->last_data_chunk_from;
9998 asoc->used_alt_onsack = 0;
10002 * No duplicates so we use the last place we received data
10005 asoc->used_alt_onsack = 0;
10006 a_chk->whoTo = asoc->last_data_chunk_from;
10008 if (a_chk->whoTo) {
10009 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10011 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
10013 space_req = sizeof(struct sctp_sack_chunk);
10015 /* gaps get a cluster */
10016 space_req = MCLBYTES;
10018 /* Ok now lets formulate a MBUF with our sack */
10019 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10020 if ((a_chk->data == NULL) ||
10021 (a_chk->whoTo == NULL)) {
10022 /* rats, no mbuf memory */
10024 /* was a problem with the destination */
10025 sctp_m_freem(a_chk->data);
10026 a_chk->data = NULL;
10028 sctp_free_a_chunk(stcb, a_chk);
10029 /* sa_ignore NO_NULL_CHK */
10030 if (stcb->asoc.delayed_ack) {
10031 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10032 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10033 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10034 stcb->sctp_ep, stcb, NULL);
10036 stcb->asoc.send_sack = 1;
10040 /* ok, lets go through and fill it in */
10041 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10042 space = M_TRAILINGSPACE(a_chk->data);
10043 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10044 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10046 limit = mtod(a_chk->data, caddr_t);
10049 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10050 sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
10051 /* 0x01 is used by nonce for ecn */
10052 if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
10053 (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
10054 (asoc->peer_supports_ecn_nonce))
10055 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
10057 sack->ch.chunk_flags = 0;
10059 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10061 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10062 * received, then set high bit to 1, else 0. Reset
10065 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10066 asoc->cmt_dac_pkts_rcvd = 0;
10068 #ifdef SCTP_ASOCLOG_OF_TSNS
10069 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10070 stcb->asoc.cumack_log_atsnt++;
10071 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10072 stcb->asoc.cumack_log_atsnt = 0;
10075 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10076 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10077 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10079 /* reset the readers interpretation */
10080 stcb->freed_by_sorcv_sincelast = 0;
10082 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10084 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn)
10085 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10087 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10089 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
10092 * cum-ack behind the mapping array, so we start and use all
10097 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10099 * we skip the first one when the cum-ack is at or above the
10100 * mapping array base. Note this only works if
10104 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
10105 /* we have a gap .. maybe */
10106 for (i = 0; i < siz; i++) {
10107 selector = &sack_array[asoc->mapping_array[i]];
10108 if (mergeable && selector->right_edge) {
10110 * Backup, left and right edges were ok to
10116 if (selector->num_entries == 0)
10119 for (j = jstart; j < selector->num_entries; j++) {
10120 if (mergeable && selector->right_edge) {
10122 * do a merge by NOT setting
10128 * no merge, set the left
10132 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10134 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10137 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10143 if (selector->left_edge) {
10147 if (limit_reached) {
10148 /* Reached the limit stop */
10154 if (num_gap_blocks == 0) {
10156 * slide not yet happened, and somehow we got called
10157 * to send a sack. Cumack needs to move up.
10159 int abort_flag = 0;
10161 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
10162 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10163 sctp_sack_check(stcb, 0, 0, &abort_flag);
10166 /* now we must add any dups we are going to report. */
10167 if ((limit_reached == 0) && (asoc->numduptsns)) {
10168 dup = (uint32_t *) gap_descriptor;
10169 for (i = 0; i < asoc->numduptsns; i++) {
10170 *dup = htonl(asoc->dup_tsns[i]);
10173 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10178 asoc->numduptsns = 0;
10181 * now that the chunk is prepared queue it to the control chunk
10184 a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
10185 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
10186 (num_dups * sizeof(int32_t)));
10187 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10188 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10189 sack->sack.num_dup_tsns = htons(num_dups);
10190 sack->ch.chunk_length = htons(a_chk->send_size);
10191 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10192 asoc->ctrl_queue_cnt++;
10193 asoc->send_sack = 0;
10194 SCTP_STAT_INCR(sctps_sendsacks);
10198 /* EY - This method will replace sctp_send_sack method if nr_sacks negotiated*/
10200 sctp_send_nr_sack(struct sctp_tcb *stcb)
10203 * Queue up an NR-SACK in the control queue. We must first check to see
10204 * if an NR-SACK is somehow on the control queue. If so, we will take
10205 * and and remove the old one.
10207 struct sctp_association *asoc;
10208 struct sctp_tmit_chunk *chk, *a_chk;
10210 struct sctp_nr_sack_chunk *nr_sack;
10212 struct sctp_gap_ack_block *gap_descriptor;
10213 struct sctp_nr_gap_ack_block *nr_gap_descriptor;
10215 struct sack_track *selector;
10216 struct sack_track *nr_selector;
10218 /* EY do we need nr_mergeable, NO */
10223 int limit_reached = 0;
10224 unsigned int i, jstart, siz, j;
10225 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
10228 unsigned int reserved = 0;
10231 asoc = &stcb->asoc;
10232 SCTP_TCB_LOCK_ASSERT(stcb);
10233 if (asoc->last_data_chunk_from == NULL) {
10234 /* Hmm we never received anything */
10237 sctp_set_rwnd(stcb, asoc);
10238 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10239 if (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) {
10240 /* Hmm, found a sack already on queue, remove it */
10241 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
10242 asoc->ctrl_queue_cnt++;
10245 sctp_m_freem(a_chk->data);
10246 a_chk->data = NULL;
10248 sctp_free_remote_addr(a_chk->whoTo);
10249 a_chk->whoTo = NULL;
10253 if (a_chk == NULL) {
10254 sctp_alloc_a_chunk(stcb, a_chk);
10255 if (a_chk == NULL) {
10256 /* No memory so we drop the idea, and set a timer */
10257 if (stcb->asoc.delayed_ack) {
10258 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10259 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
10260 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10261 stcb->sctp_ep, stcb, NULL);
10263 stcb->asoc.send_sack = 1;
10267 a_chk->copy_by_ref = 0;
10268 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
10269 a_chk->rec.chunk_id.id = SCTP_NR_SELECTIVE_ACK;
10270 a_chk->rec.chunk_id.can_take_data = 1;
10272 /* Clear our pkt counts */
10273 asoc->data_pkts_seen = 0;
10275 a_chk->asoc = asoc;
10276 a_chk->snd_count = 0;
10277 a_chk->send_size = 0; /* fill in later */
10278 a_chk->sent = SCTP_DATAGRAM_UNSENT;
10279 a_chk->whoTo = NULL;
10281 if ((asoc->numduptsns) ||
10282 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
10285 * Ok, we have some duplicates or the destination for the
10286 * sack is unreachable, lets see if we can select an
10287 * alternate than asoc->last_data_chunk_from
10289 if ((!(asoc->last_data_chunk_from->dest_state &
10290 SCTP_ADDR_NOT_REACHABLE)) &&
10291 (asoc->used_alt_onsack > asoc->numnets)) {
10292 /* We used an alt last time, don't this time */
10293 a_chk->whoTo = NULL;
10295 asoc->used_alt_onsack++;
10296 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
10298 if (a_chk->whoTo == NULL) {
10299 /* Nope, no alternate */
10300 a_chk->whoTo = asoc->last_data_chunk_from;
10301 asoc->used_alt_onsack = 0;
10305 * No duplicates so we use the last place we received data
10308 asoc->used_alt_onsack = 0;
10309 a_chk->whoTo = asoc->last_data_chunk_from;
10311 if (a_chk->whoTo) {
10312 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10314 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
10316 space_req = sizeof(struct sctp_nr_sack_chunk);
10318 /* EY - what is this about? */
10319 /* gaps get a cluster */
10320 space_req = MCLBYTES;
10322 /* Ok now lets formulate a MBUF with our sack */
10323 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10324 if ((a_chk->data == NULL) ||
10325 (a_chk->whoTo == NULL)) {
10326 /* rats, no mbuf memory */
10328 /* was a problem with the destination */
10329 sctp_m_freem(a_chk->data);
10330 a_chk->data = NULL;
10332 sctp_free_a_chunk(stcb, a_chk);
10333 /* sa_ignore NO_NULL_CHK */
10334 if (stcb->asoc.delayed_ack) {
10335 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10336 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10337 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10338 stcb->sctp_ep, stcb, NULL);
10340 stcb->asoc.send_sack = 1;
10344 /* ok, lets go through and fill it in */
10345 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10346 space = M_TRAILINGSPACE(a_chk->data);
10347 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10348 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10350 limit = mtod(a_chk->data, caddr_t);
10353 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10354 nr_sack->ch.chunk_type = SCTP_NR_SELECTIVE_ACK;
10356 /* 0x01 is used by nonce for ecn */
10357 if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
10358 (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
10359 (asoc->peer_supports_ecn_nonce))
10360 nr_sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
10362 nr_sack->ch.chunk_flags = 0;
10364 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10366 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10367 * received, then set high bit to 1, else 0. Reset
10370 /* EY - TODO: which chunk flag is used in here? -The LSB */
10371 nr_sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10372 asoc->cmt_dac_pkts_rcvd = 0;
10374 #ifdef SCTP_ASOCLOG_OF_TSNS
10375 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10376 stcb->asoc.cumack_log_atsnt++;
10377 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10378 stcb->asoc.cumack_log_atsnt = 0;
10381 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10382 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10383 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10385 /* reset the readers interpretation */
10386 stcb->freed_by_sorcv_sincelast = 0;
10388 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10390 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn)
10391 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10393 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10395 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
10398 * cum-ack behind the mapping array, so we start and use all
10403 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10405 * we skip the first one when the cum-ack is at or above the
10406 * mapping array base. Note this only works if
10410 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
10411 /* we have a gap .. maybe */
10412 for (i = 0; i < siz; i++) {
10413 selector = &sack_array[asoc->mapping_array[i]];
10414 if (mergeable && selector->right_edge) {
10416 * Backup, left and right edges were ok to
10422 if (selector->num_entries == 0)
10425 for (j = jstart; j < selector->num_entries; j++) {
10426 if (mergeable && selector->right_edge) {
10428 * do a merge by NOT setting
10434 * no merge, set the left
10438 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10440 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10443 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10449 if (selector->left_edge) {
10453 if (limit_reached) {
10454 /* Reached the limit stop */
10460 if (num_gap_blocks == 0) {
10462 * slide not yet happened, and somehow we got called
10463 * to send a sack. Cumack needs to move up.
10465 int abort_flag = 0;
10467 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
10468 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10469 sctp_sack_check(stcb, 0, 0, &abort_flag);
10472 /*---------------------------------------------------------filling the nr_gap_ack blocks----------------------------------------------------*/
10474 nr_gap_descriptor = (struct sctp_nr_gap_ack_block *)gap_descriptor;
10476 /* EY - there will be gaps + nr_gaps if draining is possible */
10477 if ((SCTP_BASE_SYSCTL(sctp_do_drain)) && (limit_reached == 0)) {
10481 if (asoc->highest_tsn_inside_nr_map > asoc->nr_mapping_array_base_tsn)
10482 siz = (((asoc->highest_tsn_inside_nr_map - asoc->nr_mapping_array_base_tsn) + 1) + 7) / 8;
10484 siz = (((MAX_TSN - asoc->nr_mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10486 if (compare_with_wrap(asoc->nr_mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
10489 * cum-ack behind the mapping array, so we start and use all
10494 offset = asoc->nr_mapping_array_base_tsn - asoc->cumulative_tsn;
10496 * we skip the first one when the cum-ack is at or above the
10497 * mapping array base. Note this only works if
10501 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
10502 /* we have a gap .. maybe */
10503 for (i = 0; i < siz; i++) {
10504 nr_selector = &sack_array[asoc->nr_mapping_array[i]];
10505 if (mergeable && nr_selector->right_edge) {
10507 * Backup, left and right edges were
10510 num_nr_gap_blocks--;
10511 nr_gap_descriptor--;
10513 if (nr_selector->num_entries == 0)
10516 for (j = jstart; j < nr_selector->num_entries; j++) {
10517 if (mergeable && nr_selector->right_edge) {
10519 * do a merge by NOT
10526 * no merge, set the
10530 nr_gap_descriptor->start = htons((nr_selector->gaps[j].start + offset));
10532 nr_gap_descriptor->end = htons((nr_selector->gaps[j].end + offset));
10533 num_nr_gap_blocks++;
10534 nr_gap_descriptor++;
10535 if (((caddr_t)nr_gap_descriptor + sizeof(struct sctp_nr_gap_ack_block)) > limit) {
10541 if (nr_selector->left_edge) {
10545 if (limit_reached) {
10546 /* Reached the limit stop */
10554 /*---------------------------------------------End of---filling the nr_gap_ack blocks----------------------------------------------------*/
10556 /* now we must add any dups we are going to report. */
10557 if ((limit_reached == 0) && (asoc->numduptsns)) {
10558 dup = (uint32_t *) nr_gap_descriptor;
10559 for (i = 0; i < asoc->numduptsns; i++) {
10560 *dup = htonl(asoc->dup_tsns[i]);
10563 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10568 asoc->numduptsns = 0;
10571 * now that the chunk is prepared queue it to the control chunk
10574 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
10575 num_nr_gap_blocks = num_gap_blocks;
10576 num_gap_blocks = 0;
10578 a_chk->send_size = (sizeof(struct sctp_nr_sack_chunk) +
10579 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
10580 (num_nr_gap_blocks * sizeof(struct sctp_nr_gap_ack_block)) +
10581 (num_dups * sizeof(int32_t)));
10583 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10584 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10585 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10586 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10587 nr_sack->nr_sack.reserved = htons(reserved);
10588 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10589 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10590 asoc->ctrl_queue_cnt++;
10591 asoc->send_sack = 0;
10592 SCTP_STAT_INCR(sctps_sendsacks);
10597 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10598 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10603 struct mbuf *m_abort;
10604 struct mbuf *m_out = NULL, *m_end = NULL;
10605 struct sctp_abort_chunk *abort = NULL;
10607 uint32_t auth_offset = 0;
10608 struct sctp_auth_chunk *auth = NULL;
10611 * Add an AUTH chunk, if chunk requires it and save the offset into
10612 * the chain for AUTH
10614 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10615 stcb->asoc.peer_auth_chunks)) {
10616 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10617 stcb, SCTP_ABORT_ASSOCIATION);
10618 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10620 SCTP_TCB_LOCK_ASSERT(stcb);
10621 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10622 if (m_abort == NULL) {
10625 sctp_m_freem(m_out);
10628 /* link in any error */
10629 SCTP_BUF_NEXT(m_abort) = operr;
10636 sz += SCTP_BUF_LEN(n);
10637 n = SCTP_BUF_NEXT(n);
10640 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10641 if (m_out == NULL) {
10642 /* NO Auth chunk prepended, so reserve space in front */
10643 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10646 /* Put AUTH chunk at the front of the chain */
10647 SCTP_BUF_NEXT(m_end) = m_abort;
10650 /* fill in the ABORT chunk */
10651 abort = mtod(m_abort, struct sctp_abort_chunk *);
10652 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10653 abort->ch.chunk_flags = 0;
10654 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10656 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
10657 stcb->asoc.primary_destination,
10658 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
10659 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0,
10660 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10661 stcb->asoc.primary_destination->port, so_locked, NULL);
10662 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10666 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10667 struct sctp_nets *net)
10669 /* formulate and SEND a SHUTDOWN-COMPLETE */
10670 struct mbuf *m_shutdown_comp;
10671 struct sctp_shutdown_complete_chunk *shutdown_complete;
10673 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10674 if (m_shutdown_comp == NULL) {
10678 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10679 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10680 shutdown_complete->ch.chunk_flags = 0;
10681 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10682 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10683 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10684 (struct sockaddr *)&net->ro._l_addr,
10685 m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0,
10686 stcb->sctp_ep->sctp_lport, stcb->rport,
10687 htonl(stcb->asoc.peer_vtag),
10688 net->port, SCTP_SO_NOT_LOCKED, NULL);
10689 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10694 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
10695 uint32_t vrf_id, uint16_t port)
10697 /* formulate and SEND a SHUTDOWN-COMPLETE */
10698 struct mbuf *o_pak;
10700 struct ip *iph, *iph_out;
10701 struct udphdr *udp = NULL;
10704 struct ip6_hdr *ip6, *ip6_out;
10707 int offset_out, len, mlen;
10708 struct sctp_shutdown_complete_msg *comp_cp;
10710 iph = mtod(m, struct ip *);
10711 switch (iph->ip_v) {
10713 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10716 case IPV6_VERSION >> 4:
10717 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10724 len += sizeof(struct udphdr);
10726 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10727 if (mout == NULL) {
10730 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10731 SCTP_BUF_LEN(mout) = len;
10732 SCTP_BUF_NEXT(mout) = NULL;
10739 switch (iph->ip_v) {
10741 iph_out = mtod(mout, struct ip *);
10743 /* Fill in the IP header for the ABORT */
10744 iph_out->ip_v = IPVERSION;
10745 iph_out->ip_hl = (sizeof(struct ip) / 4);
10746 iph_out->ip_tos = (u_char)0;
10747 iph_out->ip_id = 0;
10748 iph_out->ip_off = 0;
10749 iph_out->ip_ttl = MAXTTL;
10751 iph_out->ip_p = IPPROTO_UDP;
10753 iph_out->ip_p = IPPROTO_SCTP;
10755 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10756 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10758 /* let IP layer calculate this */
10759 iph_out->ip_sum = 0;
10760 offset_out += sizeof(*iph_out);
10761 comp_cp = (struct sctp_shutdown_complete_msg *)(
10762 (caddr_t)iph_out + offset_out);
10765 case IPV6_VERSION >> 4:
10766 ip6 = (struct ip6_hdr *)iph;
10767 ip6_out = mtod(mout, struct ip6_hdr *);
10769 /* Fill in the IPv6 header for the ABORT */
10770 ip6_out->ip6_flow = ip6->ip6_flow;
10771 ip6_out->ip6_hlim = MODULE_GLOBAL(MOD_INET6, ip6_defhlim);
10773 ip6_out->ip6_nxt = IPPROTO_UDP;
10775 ip6_out->ip6_nxt = IPPROTO_SCTP;
10777 ip6_out->ip6_src = ip6->ip6_dst;
10778 ip6_out->ip6_dst = ip6->ip6_src;
10780 * ?? The old code had both the iph len + payload, I think
10781 * this is wrong and would never have worked
10783 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10784 offset_out += sizeof(*ip6_out);
10785 comp_cp = (struct sctp_shutdown_complete_msg *)(
10786 (caddr_t)ip6_out + offset_out);
10790 /* Currently not supported. */
10791 sctp_m_freem(mout);
10795 udp = (struct udphdr *)comp_cp;
10796 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10797 udp->uh_dport = port;
10798 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
10799 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10800 offset_out += sizeof(struct udphdr);
10801 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
10803 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10805 sctp_m_freem(mout);
10808 /* Now copy in and fill in the ABORT tags etc. */
10809 comp_cp->sh.src_port = sh->dest_port;
10810 comp_cp->sh.dest_port = sh->src_port;
10811 comp_cp->sh.checksum = 0;
10812 comp_cp->sh.v_tag = sh->v_tag;
10813 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
10814 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10815 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10817 if (iph_out != NULL) {
10820 struct sctp_tcb *stcb = NULL;
10822 mlen = SCTP_BUF_LEN(mout);
10823 bzero(&ro, sizeof ro);
10824 /* set IPv4 length */
10825 iph_out->ip_len = mlen;
10826 #ifdef SCTP_PACKET_LOGGING
10827 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10828 sctp_packet_log(mout, mlen);
10831 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
10832 SCTP_STAT_INCR(sctps_sendswcrc);
10833 SCTP_ENABLE_UDP_CSUM(mout);
10835 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10836 mout->m_pkthdr.csum_data = 0; /* FIXME MT */
10837 SCTP_STAT_INCR(sctps_sendhwcrc);
10839 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10841 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
10843 /* Free the route if we got one back */
10848 if (ip6_out != NULL) {
10849 struct route_in6 ro;
10851 struct sctp_tcb *stcb = NULL;
10852 struct ifnet *ifp = NULL;
10854 bzero(&ro, sizeof(ro));
10855 mlen = SCTP_BUF_LEN(mout);
10856 #ifdef SCTP_PACKET_LOGGING
10857 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10858 sctp_packet_log(mout, mlen);
10860 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
10861 SCTP_STAT_INCR(sctps_sendswcrc);
10862 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10864 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr),
10865 sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr))) == 0) {
10866 udp->uh_sum = 0xffff;
10869 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
10871 /* Free the route if we got one back */
10876 SCTP_STAT_INCR(sctps_sendpackets);
10877 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10878 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10883 static struct sctp_nets *
10884 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
10886 struct sctp_nets *net, *hnet;
10887 int ms_goneby, highest_ms, state_overide = 0;
10889 (void)SCTP_GETTIME_TIMEVAL(now);
10892 SCTP_TCB_LOCK_ASSERT(stcb);
10893 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
10895 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
10896 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
10899 * Skip this guy from consideration if HB is off AND
10904 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
10905 /* skip this dest net from consideration */
10908 if (net->last_sent_time.tv_sec) {
10909 /* Sent to so we subtract */
10910 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
10912 /* Never been sent to */
10913 ms_goneby = 0x7fffffff;
10915 * When the address state is unconfirmed but still
10916 * considered reachable, we HB at a higher rate. Once it
10917 * goes confirmed OR reaches the "unreachable" state, thenw
10918 * we cut it back to HB at a more normal pace.
10920 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
10926 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
10927 (ms_goneby > highest_ms)) {
10928 highest_ms = ms_goneby;
10933 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
10939 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
10941 * Found the one with longest delay bounds OR it is
10942 * unconfirmed and still not marked unreachable.
10944 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
10947 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
10948 (struct sockaddr *)&hnet->ro._l_addr);
10950 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
10953 /* update the timer now */
10954 hnet->last_sent_time = *now;
10957 /* Nothing to HB */
10962 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
10964 struct sctp_tmit_chunk *chk;
10965 struct sctp_nets *net;
10966 struct sctp_heartbeat_chunk *hb;
10967 struct timeval now;
10968 struct sockaddr_in *sin;
10969 struct sockaddr_in6 *sin6;
10971 SCTP_TCB_LOCK_ASSERT(stcb);
10972 if (user_req == 0) {
10973 net = sctp_select_hb_destination(stcb, &now);
10976 * All our busy none to send to, just start the
10979 if (stcb->asoc.state == 0) {
10982 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
10993 (void)SCTP_GETTIME_TIMEVAL(&now);
10995 sin = (struct sockaddr_in *)&net->ro._l_addr;
10996 if (sin->sin_family != AF_INET) {
10997 if (sin->sin_family != AF_INET6) {
11002 sctp_alloc_a_chunk(stcb, chk);
11004 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
11007 chk->copy_by_ref = 0;
11008 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
11009 chk->rec.chunk_id.can_take_data = 1;
11010 chk->asoc = &stcb->asoc;
11011 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
11013 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11014 if (chk->data == NULL) {
11015 sctp_free_a_chunk(stcb, chk);
11018 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11019 SCTP_BUF_LEN(chk->data) = chk->send_size;
11020 chk->sent = SCTP_DATAGRAM_UNSENT;
11021 chk->snd_count = 0;
11023 atomic_add_int(&chk->whoTo->ref_count, 1);
11024 /* Now we have a mbuf that we can fill in with the details */
11025 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
11026 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
11027 /* fill out chunk header */
11028 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
11029 hb->ch.chunk_flags = 0;
11030 hb->ch.chunk_length = htons(chk->send_size);
11031 /* Fill out hb parameter */
11032 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
11033 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
11034 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
11035 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
11036 /* Did our user request this one, put it in */
11037 hb->heartbeat.hb_info.user_req = user_req;
11038 hb->heartbeat.hb_info.addr_family = sin->sin_family;
11039 hb->heartbeat.hb_info.addr_len = sin->sin_len;
11040 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
11042 * we only take from the entropy pool if the address is not
11045 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11046 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
11048 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
11049 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
11051 if (sin->sin_family == AF_INET) {
11052 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
11053 } else if (sin->sin_family == AF_INET6) {
11054 /* We leave the scope the way it is in our lookup table. */
11055 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
11056 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
11058 /* huh compiler bug */
11063 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
11064 * PF-heartbeats. Because of this, threshold management is done by
11065 * the t3 timer handler, and does not need to be done upon the send
11066 * of a PF-heartbeat. If CMT PF is on and the destination to which a
11067 * heartbeat is being sent is in PF state, do NOT do threshold
11070 if ((SCTP_BASE_SYSCTL(sctp_cmt_pf) == 0) || ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
11071 /* ok we have a destination that needs a beat */
11072 /* lets do the theshold management Qiaobing style */
11073 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
11074 stcb->asoc.max_send_times)) {
11076 * we have lost the association, in a way this is
11077 * quite bad since we really are one less time since
11078 * we really did not send yet. This is the down side
11079 * to the Q's style as defined in the RFC and not my
11080 * alternate style defined in the RFC.
11082 if (chk->data != NULL) {
11083 sctp_m_freem(chk->data);
11087 * Here we do NOT use the macro since the
11088 * association is now gone.
11091 sctp_free_remote_addr(chk->whoTo);
11094 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
11098 net->hb_responded = 0;
11099 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11100 stcb->asoc.ctrl_queue_cnt++;
11101 SCTP_STAT_INCR(sctps_sendheartbeat);
11103 * Call directly med level routine to put out the chunk. It will
11104 * always tumble out control chunks aka HB but it may even tumble
11111 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
11114 struct sctp_association *asoc;
11115 struct sctp_ecne_chunk *ecne;
11116 struct sctp_tmit_chunk *chk;
11118 asoc = &stcb->asoc;
11119 SCTP_TCB_LOCK_ASSERT(stcb);
11120 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11121 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
11122 /* found a previous ECN_ECHO update it if needed */
11123 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11124 ecne->tsn = htonl(high_tsn);
11128 /* nope could not find one to update so we must build one */
11129 sctp_alloc_a_chunk(stcb, chk);
11133 chk->copy_by_ref = 0;
11134 SCTP_STAT_INCR(sctps_sendecne);
11135 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
11136 chk->rec.chunk_id.can_take_data = 0;
11137 chk->asoc = &stcb->asoc;
11138 chk->send_size = sizeof(struct sctp_ecne_chunk);
11139 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11140 if (chk->data == NULL) {
11141 sctp_free_a_chunk(stcb, chk);
11144 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11145 SCTP_BUF_LEN(chk->data) = chk->send_size;
11146 chk->sent = SCTP_DATAGRAM_UNSENT;
11147 chk->snd_count = 0;
11149 atomic_add_int(&chk->whoTo->ref_count, 1);
11150 stcb->asoc.ecn_echo_cnt_onq++;
11151 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
11152 ecne->ch.chunk_type = SCTP_ECN_ECHO;
11153 ecne->ch.chunk_flags = 0;
11154 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
11155 ecne->tsn = htonl(high_tsn);
11156 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11157 asoc->ctrl_queue_cnt++;
11161 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
11162 struct mbuf *m, int iphlen, int bad_crc)
11164 struct sctp_association *asoc;
11165 struct sctp_pktdrop_chunk *drp;
11166 struct sctp_tmit_chunk *chk;
11173 struct ip6_hdr *ip6h;
11176 int fullsz = 0, extra = 0;
11179 struct sctp_chunkhdr *ch, chunk_buf;
11180 unsigned int chk_length;
11185 asoc = &stcb->asoc;
11186 SCTP_TCB_LOCK_ASSERT(stcb);
11187 if (asoc->peer_supports_pktdrop == 0) {
11189 * peer must declare support before I send one.
11193 if (stcb->sctp_socket == NULL) {
11196 sctp_alloc_a_chunk(stcb, chk);
11200 chk->copy_by_ref = 0;
11201 iph = mtod(m, struct ip *);
11203 sctp_free_a_chunk(stcb, chk);
11206 switch (iph->ip_v) {
11209 len = chk->send_size = iph->ip_len;
11212 case IPV6_VERSION >> 4:
11214 ip6h = mtod(m, struct ip6_hdr *);
11215 len = chk->send_size = htons(ip6h->ip6_plen);
11221 /* Validate that we do not have an ABORT in here. */
11222 offset = iphlen + sizeof(struct sctphdr);
11223 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11224 sizeof(*ch), (uint8_t *) & chunk_buf);
11225 while (ch != NULL) {
11226 chk_length = ntohs(ch->chunk_length);
11227 if (chk_length < sizeof(*ch)) {
11228 /* break to abort land */
11231 switch (ch->chunk_type) {
11232 case SCTP_PACKET_DROPPED:
11233 case SCTP_ABORT_ASSOCIATION:
11235 * we don't respond with an PKT-DROP to an ABORT
11238 sctp_free_a_chunk(stcb, chk);
11243 offset += SCTP_SIZE32(chk_length);
11244 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11245 sizeof(*ch), (uint8_t *) & chunk_buf);
11248 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11249 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11251 * only send 1 mtu worth, trim off the excess on the end.
11253 fullsz = len - extra;
11254 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11257 chk->asoc = &stcb->asoc;
11258 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11259 if (chk->data == NULL) {
11261 sctp_free_a_chunk(stcb, chk);
11264 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11265 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11267 sctp_m_freem(chk->data);
11271 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11272 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11273 chk->book_size_scale = 0;
11275 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11276 drp->trunc_len = htons(fullsz);
11278 * Len is already adjusted to size minus overhead above take
11279 * out the pkt_drop chunk itself from it.
11281 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11282 len = chk->send_size;
11284 /* no truncation needed */
11285 drp->ch.chunk_flags = 0;
11286 drp->trunc_len = htons(0);
11289 drp->ch.chunk_flags |= SCTP_BADCRC;
11291 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11292 SCTP_BUF_LEN(chk->data) = chk->send_size;
11293 chk->sent = SCTP_DATAGRAM_UNSENT;
11294 chk->snd_count = 0;
11296 /* we should hit here */
11299 chk->whoTo = asoc->primary_destination;
11301 atomic_add_int(&chk->whoTo->ref_count, 1);
11302 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11303 chk->rec.chunk_id.can_take_data = 1;
11304 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11305 drp->ch.chunk_length = htons(chk->send_size);
11306 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11310 drp->bottle_bw = htonl(spc);
11311 if (asoc->my_rwnd) {
11312 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11313 asoc->size_on_all_streams +
11314 asoc->my_rwnd_control_len +
11315 stcb->sctp_socket->so_rcv.sb_cc);
11318 * If my rwnd is 0, possibly from mbuf depletion as well as
11319 * space used, tell the peer there is NO space aka onq == bw
11321 drp->current_onq = htonl(spc);
11325 m_copydata(m, iphlen, len, (caddr_t)datap);
11326 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11327 asoc->ctrl_queue_cnt++;
11331 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
11333 struct sctp_association *asoc;
11334 struct sctp_cwr_chunk *cwr;
11335 struct sctp_tmit_chunk *chk;
11337 asoc = &stcb->asoc;
11338 SCTP_TCB_LOCK_ASSERT(stcb);
11339 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11340 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
11341 /* found a previous ECN_CWR update it if needed */
11342 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11343 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
11345 cwr->tsn = htonl(high_tsn);
11350 /* nope could not find one to update so we must build one */
11351 sctp_alloc_a_chunk(stcb, chk);
11355 chk->copy_by_ref = 0;
11356 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11357 chk->rec.chunk_id.can_take_data = 1;
11358 chk->asoc = &stcb->asoc;
11359 chk->send_size = sizeof(struct sctp_cwr_chunk);
11360 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11361 if (chk->data == NULL) {
11362 sctp_free_a_chunk(stcb, chk);
11365 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11366 SCTP_BUF_LEN(chk->data) = chk->send_size;
11367 chk->sent = SCTP_DATAGRAM_UNSENT;
11368 chk->snd_count = 0;
11370 atomic_add_int(&chk->whoTo->ref_count, 1);
11371 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11372 cwr->ch.chunk_type = SCTP_ECN_CWR;
11373 cwr->ch.chunk_flags = 0;
11374 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11375 cwr->tsn = htonl(high_tsn);
11376 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11377 asoc->ctrl_queue_cnt++;
11381 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11382 int number_entries, uint16_t * list,
11383 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11385 int len, old_len, i;
11386 struct sctp_stream_reset_out_request *req_out;
11387 struct sctp_chunkhdr *ch;
11389 ch = mtod(chk->data, struct sctp_chunkhdr *);
11392 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11394 /* get to new offset for the param. */
11395 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11396 /* now how long will this param be? */
11397 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11398 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11399 req_out->ph.param_length = htons(len);
11400 req_out->request_seq = htonl(seq);
11401 req_out->response_seq = htonl(resp_seq);
11402 req_out->send_reset_at_tsn = htonl(last_sent);
11403 if (number_entries) {
11404 for (i = 0; i < number_entries; i++) {
11405 req_out->list_of_streams[i] = htons(list[i]);
11408 if (SCTP_SIZE32(len) > len) {
11410 * Need to worry about the pad we may end up adding to the
11411 * end. This is easy since the struct is either aligned to 4
11412 * bytes or 2 bytes off.
11414 req_out->list_of_streams[number_entries] = 0;
11416 /* now fix the chunk length */
11417 ch->chunk_length = htons(len + old_len);
11418 chk->book_size = len + old_len;
11419 chk->book_size_scale = 0;
11420 chk->send_size = SCTP_SIZE32(chk->book_size);
11421 SCTP_BUF_LEN(chk->data) = chk->send_size;
11427 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11428 int number_entries, uint16_t * list,
11431 int len, old_len, i;
11432 struct sctp_stream_reset_in_request *req_in;
11433 struct sctp_chunkhdr *ch;
11435 ch = mtod(chk->data, struct sctp_chunkhdr *);
11438 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11440 /* get to new offset for the param. */
11441 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11442 /* now how long will this param be? */
11443 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11444 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11445 req_in->ph.param_length = htons(len);
11446 req_in->request_seq = htonl(seq);
11447 if (number_entries) {
11448 for (i = 0; i < number_entries; i++) {
11449 req_in->list_of_streams[i] = htons(list[i]);
11452 if (SCTP_SIZE32(len) > len) {
11454 * Need to worry about the pad we may end up adding to the
11455 * end. This is easy since the struct is either aligned to 4
11456 * bytes or 2 bytes off.
11458 req_in->list_of_streams[number_entries] = 0;
11460 /* now fix the chunk length */
11461 ch->chunk_length = htons(len + old_len);
11462 chk->book_size = len + old_len;
11463 chk->book_size_scale = 0;
11464 chk->send_size = SCTP_SIZE32(chk->book_size);
11465 SCTP_BUF_LEN(chk->data) = chk->send_size;
11471 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11475 struct sctp_stream_reset_tsn_request *req_tsn;
11476 struct sctp_chunkhdr *ch;
11478 ch = mtod(chk->data, struct sctp_chunkhdr *);
11481 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11483 /* get to new offset for the param. */
11484 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11485 /* now how long will this param be? */
11486 len = sizeof(struct sctp_stream_reset_tsn_request);
11487 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11488 req_tsn->ph.param_length = htons(len);
11489 req_tsn->request_seq = htonl(seq);
11491 /* now fix the chunk length */
11492 ch->chunk_length = htons(len + old_len);
11493 chk->send_size = len + old_len;
11494 chk->book_size = SCTP_SIZE32(chk->send_size);
11495 chk->book_size_scale = 0;
11496 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11501 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11502 uint32_t resp_seq, uint32_t result)
11505 struct sctp_stream_reset_response *resp;
11506 struct sctp_chunkhdr *ch;
11508 ch = mtod(chk->data, struct sctp_chunkhdr *);
11511 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11513 /* get to new offset for the param. */
11514 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11515 /* now how long will this param be? */
11516 len = sizeof(struct sctp_stream_reset_response);
11517 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11518 resp->ph.param_length = htons(len);
11519 resp->response_seq = htonl(resp_seq);
11520 resp->result = ntohl(result);
11522 /* now fix the chunk length */
11523 ch->chunk_length = htons(len + old_len);
11524 chk->book_size = len + old_len;
11525 chk->book_size_scale = 0;
11526 chk->send_size = SCTP_SIZE32(chk->book_size);
11527 SCTP_BUF_LEN(chk->data) = chk->send_size;
11534 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11535 uint32_t resp_seq, uint32_t result,
11536 uint32_t send_una, uint32_t recv_next)
11539 struct sctp_stream_reset_response_tsn *resp;
11540 struct sctp_chunkhdr *ch;
11542 ch = mtod(chk->data, struct sctp_chunkhdr *);
11545 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11547 /* get to new offset for the param. */
11548 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11549 /* now how long will this param be? */
11550 len = sizeof(struct sctp_stream_reset_response_tsn);
11551 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11552 resp->ph.param_length = htons(len);
11553 resp->response_seq = htonl(resp_seq);
11554 resp->result = htonl(result);
11555 resp->senders_next_tsn = htonl(send_una);
11556 resp->receivers_next_tsn = htonl(recv_next);
11558 /* now fix the chunk length */
11559 ch->chunk_length = htons(len + old_len);
11560 chk->book_size = len + old_len;
11561 chk->send_size = SCTP_SIZE32(chk->book_size);
11562 chk->book_size_scale = 0;
11563 SCTP_BUF_LEN(chk->data) = chk->send_size;
11568 sctp_add_a_stream(struct sctp_tmit_chunk *chk,
11573 struct sctp_chunkhdr *ch;
11574 struct sctp_stream_reset_add_strm *addstr;
11576 ch = mtod(chk->data, struct sctp_chunkhdr *);
11577 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11579 /* get to new offset for the param. */
11580 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11581 /* now how long will this param be? */
11582 len = sizeof(struct sctp_stream_reset_add_strm);
11585 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
11586 addstr->ph.param_length = htons(len);
11587 addstr->request_seq = htonl(seq);
11588 addstr->number_of_streams = htons(adding);
11589 addstr->reserved = 0;
11591 /* now fix the chunk length */
11592 ch->chunk_length = htons(len + old_len);
11593 chk->send_size = len + old_len;
11594 chk->book_size = SCTP_SIZE32(chk->send_size);
11595 chk->book_size_scale = 0;
11596 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11601 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11602 int number_entries, uint16_t * list,
11603 uint8_t send_out_req,
11605 uint8_t send_in_req,
11606 uint8_t send_tsn_req,
11607 uint8_t add_stream,
11612 struct sctp_association *asoc;
11613 struct sctp_tmit_chunk *chk;
11614 struct sctp_chunkhdr *ch;
11617 asoc = &stcb->asoc;
11618 if (asoc->stream_reset_outstanding) {
11620 * Already one pending, must get ACK back to clear the flag.
11622 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11625 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11626 (add_stream == 0)) {
11627 /* nothing to do */
11628 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11631 if (send_tsn_req && (send_out_req || send_in_req)) {
11632 /* error, can't do that */
11633 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11636 sctp_alloc_a_chunk(stcb, chk);
11638 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11641 chk->copy_by_ref = 0;
11642 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11643 chk->rec.chunk_id.can_take_data = 0;
11644 chk->asoc = &stcb->asoc;
11645 chk->book_size = sizeof(struct sctp_chunkhdr);
11646 chk->send_size = SCTP_SIZE32(chk->book_size);
11647 chk->book_size_scale = 0;
11649 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11650 if (chk->data == NULL) {
11651 sctp_free_a_chunk(stcb, chk);
11652 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11655 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11657 /* setup chunk parameters */
11658 chk->sent = SCTP_DATAGRAM_UNSENT;
11659 chk->snd_count = 0;
11660 chk->whoTo = asoc->primary_destination;
11661 atomic_add_int(&chk->whoTo->ref_count, 1);
11663 ch = mtod(chk->data, struct sctp_chunkhdr *);
11664 ch->chunk_type = SCTP_STREAM_RESET;
11665 ch->chunk_flags = 0;
11666 ch->chunk_length = htons(chk->book_size);
11667 SCTP_BUF_LEN(chk->data) = chk->send_size;
11669 seq = stcb->asoc.str_reset_seq_out;
11670 if (send_out_req) {
11671 sctp_add_stream_reset_out(chk, number_entries, list,
11672 seq, resp_seq, (stcb->asoc.sending_seq - 1));
11673 asoc->stream_reset_out_is_outstanding = 1;
11675 asoc->stream_reset_outstanding++;
11678 sctp_add_a_stream(chk, seq, adding);
11680 asoc->stream_reset_outstanding++;
11683 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11684 asoc->stream_reset_outstanding++;
11686 if (send_tsn_req) {
11687 sctp_add_stream_reset_tsn(chk, seq);
11688 asoc->stream_reset_outstanding++;
11690 asoc->str_reset = chk;
11692 /* insert the chunk for sending */
11693 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11696 asoc->ctrl_queue_cnt++;
11697 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11702 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11703 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11706 * Formulate the abort message, and send it back down.
11708 struct mbuf *o_pak;
11710 struct sctp_abort_msg *abm;
11711 struct ip *iph, *iph_out;
11712 struct udphdr *udp;
11715 struct ip6_hdr *ip6, *ip6_out;
11718 int iphlen_out, len;
11720 /* don't respond to ABORT with ABORT */
11721 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11723 sctp_m_freem(err_cause);
11726 iph = mtod(m, struct ip *);
11727 switch (iph->ip_v) {
11729 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11732 case IPV6_VERSION >> 4:
11733 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11738 sctp_m_freem(err_cause);
11743 len += sizeof(struct udphdr);
11745 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11746 if (mout == NULL) {
11748 sctp_m_freem(err_cause);
11752 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11753 SCTP_BUF_LEN(mout) = len;
11754 SCTP_BUF_NEXT(mout) = err_cause;
11759 switch (iph->ip_v) {
11761 iph_out = mtod(mout, struct ip *);
11763 /* Fill in the IP header for the ABORT */
11764 iph_out->ip_v = IPVERSION;
11765 iph_out->ip_hl = (sizeof(struct ip) / 4);
11766 iph_out->ip_tos = (u_char)0;
11767 iph_out->ip_id = 0;
11768 iph_out->ip_off = 0;
11769 iph_out->ip_ttl = MAXTTL;
11771 iph_out->ip_p = IPPROTO_UDP;
11773 iph_out->ip_p = IPPROTO_SCTP;
11775 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11776 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11777 /* let IP layer calculate this */
11778 iph_out->ip_sum = 0;
11780 iphlen_out = sizeof(*iph_out);
11781 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
11784 case IPV6_VERSION >> 4:
11785 ip6 = (struct ip6_hdr *)iph;
11786 ip6_out = mtod(mout, struct ip6_hdr *);
11788 /* Fill in the IP6 header for the ABORT */
11789 ip6_out->ip6_flow = ip6->ip6_flow;
11790 ip6_out->ip6_hlim = MODULE_GLOBAL(MOD_INET6, ip6_defhlim);
11792 ip6_out->ip6_nxt = IPPROTO_UDP;
11794 ip6_out->ip6_nxt = IPPROTO_SCTP;
11796 ip6_out->ip6_src = ip6->ip6_dst;
11797 ip6_out->ip6_dst = ip6->ip6_src;
11799 iphlen_out = sizeof(*ip6_out);
11800 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
11804 /* Currently not supported */
11805 sctp_m_freem(mout);
11809 udp = (struct udphdr *)abm;
11811 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11812 udp->uh_dport = port;
11813 /* set udp->uh_ulen later */
11815 iphlen_out += sizeof(struct udphdr);
11816 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
11818 abm->sh.src_port = sh->dest_port;
11819 abm->sh.dest_port = sh->src_port;
11820 abm->sh.checksum = 0;
11822 abm->sh.v_tag = sh->v_tag;
11823 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
11825 abm->sh.v_tag = htonl(vtag);
11826 abm->msg.ch.chunk_flags = 0;
11828 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11831 struct mbuf *m_tmp = err_cause;
11834 /* get length of the err_cause chain */
11835 while (m_tmp != NULL) {
11836 err_len += SCTP_BUF_LEN(m_tmp);
11837 m_tmp = SCTP_BUF_NEXT(m_tmp);
11839 len = SCTP_BUF_LEN(mout) + err_len;
11841 /* need pad at end of chunk */
11842 uint32_t cpthis = 0;
11845 padlen = 4 - (len % 4);
11846 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11849 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
11851 len = SCTP_BUF_LEN(mout);
11852 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
11855 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11857 sctp_m_freem(mout);
11860 if (iph_out != NULL) {
11862 struct sctp_tcb *stcb = NULL;
11865 /* zap the stack pointer to the route */
11866 bzero(&ro, sizeof ro);
11868 udp->uh_ulen = htons(len - sizeof(struct ip));
11869 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11871 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
11872 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
11873 /* set IPv4 length */
11874 iph_out->ip_len = len;
11876 #ifdef SCTP_PACKET_LOGGING
11877 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11878 sctp_packet_log(mout, len);
11880 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11882 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
11883 SCTP_STAT_INCR(sctps_sendswcrc);
11884 SCTP_ENABLE_UDP_CSUM(o_pak);
11886 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11887 mout->m_pkthdr.csum_data = 0; /* FIXME MT */
11888 SCTP_STAT_INCR(sctps_sendhwcrc);
11890 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
11892 /* Free the route if we got one back */
11897 if (ip6_out != NULL) {
11898 struct route_in6 ro;
11900 struct sctp_tcb *stcb = NULL;
11901 struct ifnet *ifp = NULL;
11903 /* zap the stack pointer to the route */
11904 bzero(&ro, sizeof(ro));
11906 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11908 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
11909 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
11910 ip6_out->ip6_plen = len - sizeof(*ip6_out);
11911 #ifdef SCTP_PACKET_LOGGING
11912 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11913 sctp_packet_log(mout, len);
11915 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
11916 SCTP_STAT_INCR(sctps_sendswcrc);
11917 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11919 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11920 udp->uh_sum = 0xffff;
11923 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
11925 /* Free the route if we got one back */
11930 SCTP_STAT_INCR(sctps_sendpackets);
11931 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11932 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11936 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
11937 uint32_t vrf_id, uint16_t port)
11939 struct mbuf *o_pak;
11940 struct sctphdr *sh, *sh_out;
11941 struct sctp_chunkhdr *ch;
11942 struct ip *iph, *iph_out;
11943 struct udphdr *udp = NULL;
11947 struct ip6_hdr *ip6, *ip6_out;
11950 int iphlen_out, len;
11952 iph = mtod(m, struct ip *);
11953 sh = (struct sctphdr *)((caddr_t)iph + iphlen);
11954 switch (iph->ip_v) {
11956 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
11959 case IPV6_VERSION >> 4:
11960 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
11970 len += sizeof(struct udphdr);
11972 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11973 if (mout == NULL) {
11979 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11980 SCTP_BUF_LEN(mout) = len;
11981 SCTP_BUF_NEXT(mout) = scm;
11986 switch (iph->ip_v) {
11988 iph_out = mtod(mout, struct ip *);
11990 /* Fill in the IP header for the ABORT */
11991 iph_out->ip_v = IPVERSION;
11992 iph_out->ip_hl = (sizeof(struct ip) / 4);
11993 iph_out->ip_tos = (u_char)0;
11994 iph_out->ip_id = 0;
11995 iph_out->ip_off = 0;
11996 iph_out->ip_ttl = MAXTTL;
11998 iph_out->ip_p = IPPROTO_UDP;
12000 iph_out->ip_p = IPPROTO_SCTP;
12002 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
12003 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
12004 /* let IP layer calculate this */
12005 iph_out->ip_sum = 0;
12007 iphlen_out = sizeof(struct ip);
12008 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
12011 case IPV6_VERSION >> 4:
12012 ip6 = (struct ip6_hdr *)iph;
12013 ip6_out = mtod(mout, struct ip6_hdr *);
12015 /* Fill in the IP6 header for the ABORT */
12016 ip6_out->ip6_flow = ip6->ip6_flow;
12017 ip6_out->ip6_hlim = MODULE_GLOBAL(MOD_INET6, ip6_defhlim);
12019 ip6_out->ip6_nxt = IPPROTO_UDP;
12021 ip6_out->ip6_nxt = IPPROTO_SCTP;
12023 ip6_out->ip6_src = ip6->ip6_dst;
12024 ip6_out->ip6_dst = ip6->ip6_src;
12026 iphlen_out = sizeof(struct ip6_hdr);
12027 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
12031 /* Currently not supported */
12032 sctp_m_freem(mout);
12036 udp = (struct udphdr *)sh_out;
12038 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
12039 udp->uh_dport = port;
12040 /* set udp->uh_ulen later */
12042 iphlen_out += sizeof(struct udphdr);
12043 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
12045 sh_out->src_port = sh->dest_port;
12046 sh_out->dest_port = sh->src_port;
12047 sh_out->v_tag = vtag;
12048 sh_out->checksum = 0;
12050 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
12051 ch->chunk_type = SCTP_OPERATION_ERROR;
12052 ch->chunk_flags = 0;
12055 struct mbuf *m_tmp = scm;
12058 /* get length of the err_cause chain */
12059 while (m_tmp != NULL) {
12060 cause_len += SCTP_BUF_LEN(m_tmp);
12061 m_tmp = SCTP_BUF_NEXT(m_tmp);
12063 len = SCTP_BUF_LEN(mout) + cause_len;
12064 if (cause_len % 4) {
12065 /* need pad at end of chunk */
12066 uint32_t cpthis = 0;
12069 padlen = 4 - (len % 4);
12070 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
12073 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
12075 len = SCTP_BUF_LEN(mout);
12076 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
12079 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
12081 sctp_m_freem(mout);
12084 if (iph_out != NULL) {
12086 struct sctp_tcb *stcb = NULL;
12089 /* zap the stack pointer to the route */
12090 bzero(&ro, sizeof ro);
12092 udp->uh_ulen = htons(len - sizeof(struct ip));
12093 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
12095 /* set IPv4 length */
12096 iph_out->ip_len = len;
12098 #ifdef SCTP_PACKET_LOGGING
12099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12100 sctp_packet_log(mout, len);
12102 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12104 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
12105 SCTP_STAT_INCR(sctps_sendswcrc);
12106 SCTP_ENABLE_UDP_CSUM(o_pak);
12108 mout->m_pkthdr.csum_flags = CSUM_SCTP;
12109 mout->m_pkthdr.csum_data = 0; /* FIXME MT */
12110 SCTP_STAT_INCR(sctps_sendhwcrc);
12112 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
12114 /* Free the route if we got one back */
12119 if (ip6_out != NULL) {
12120 struct route_in6 ro;
12122 struct sctp_tcb *stcb = NULL;
12123 struct ifnet *ifp = NULL;
12125 /* zap the stack pointer to the route */
12126 bzero(&ro, sizeof(ro));
12128 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
12130 ip6_out->ip6_plen = len - sizeof(*ip6_out);
12131 #ifdef SCTP_PACKET_LOGGING
12132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
12133 sctp_packet_log(mout, len);
12135 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
12136 SCTP_STAT_INCR(sctps_sendswcrc);
12137 SCTP_ATTACH_CHAIN(o_pak, mout, len);
12139 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
12140 udp->uh_sum = 0xffff;
12143 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
12145 /* Free the route if we got one back */
12150 SCTP_STAT_INCR(sctps_sendpackets);
12151 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
12152 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
12155 static struct mbuf *
12156 sctp_copy_resume(struct sctp_stream_queue_pending *sp,
12158 struct sctp_sndrcvinfo *srcv,
12160 int user_marks_eor,
12163 struct mbuf **new_tail)
12167 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
12168 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
12170 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12173 *sndout = m_length(m, NULL);
12174 *new_tail = m_last(m);
12180 sctp_copy_one(struct sctp_stream_queue_pending *sp,
12187 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
12189 if (sp->data == NULL) {
12190 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12193 sp->tail_mbuf = m_last(sp->data);
12199 static struct sctp_stream_queue_pending *
12200 sctp_copy_it_in(struct sctp_tcb *stcb,
12201 struct sctp_association *asoc,
12202 struct sctp_sndrcvinfo *srcv,
12204 struct sctp_nets *net,
12206 int user_marks_eor,
12211 * This routine must be very careful in its work. Protocol
12212 * processing is up and running so care must be taken to spl...()
12213 * when you need to do something that may effect the stcb/asoc. The
12214 * sb is locked however. When data is copied the protocol processing
12215 * should be enabled since this is a slower operation...
12217 struct sctp_stream_queue_pending *sp = NULL;
12221 /* Now can we send this? */
12222 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12223 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12224 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12225 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12226 /* got data while shutting down */
12227 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12228 *error = ECONNRESET;
12231 sctp_alloc_a_strmoq(stcb, sp);
12233 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12238 sp->sender_all_done = 0;
12239 sp->sinfo_flags = srcv->sinfo_flags;
12240 sp->timetolive = srcv->sinfo_timetolive;
12241 sp->ppid = srcv->sinfo_ppid;
12242 sp->context = srcv->sinfo_context;
12244 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12246 sp->stream = srcv->sinfo_stream;
12247 sp->length = min(uio->uio_resid, max_send_len);
12248 if ((sp->length == (uint32_t) uio->uio_resid) &&
12249 ((user_marks_eor == 0) ||
12250 (srcv->sinfo_flags & SCTP_EOF) ||
12251 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12252 sp->msg_is_complete = 1;
12254 sp->msg_is_complete = 0;
12256 sp->sender_all_done = 0;
12257 sp->some_taken = 0;
12258 sp->put_last_out = 0;
12259 resv_in_first = sizeof(struct sctp_data_chunk);
12260 sp->data = sp->tail_mbuf = NULL;
12261 if (sp->length == 0) {
12265 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12266 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12267 sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid);
12268 sp->holds_key_ref = 1;
12270 *error = sctp_copy_one(sp, uio, resv_in_first);
12273 sctp_free_a_strmoq(stcb, sp);
12276 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12280 sp->net = asoc->primary_destination;
12283 atomic_add_int(&sp->net->ref_count, 1);
12284 sctp_set_prsctp_policy(sp);
12292 sctp_sosend(struct socket *so,
12293 struct sockaddr *addr,
12296 struct mbuf *control,
12301 struct sctp_inpcb *inp;
12302 int error, use_rcvinfo = 0;
12303 struct sctp_sndrcvinfo srcv;
12304 struct sockaddr *addr_to_use;
12307 struct sockaddr_in sin;
12311 inp = (struct sctp_inpcb *)so->so_pcb;
12313 /* process cmsg snd/rcv info (maybe a assoc-id) */
12314 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
12320 addr_to_use = addr;
12321 #if defined(INET6) && !defined(__Userspace__) /* TODO port in6_sin6_2_sin */
12322 if ((addr) && (addr->sa_family == AF_INET6)) {
12323 struct sockaddr_in6 *sin6;
12325 sin6 = (struct sockaddr_in6 *)addr;
12326 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12327 in6_sin6_2_sin(&sin, sin6);
12328 addr_to_use = (struct sockaddr *)&sin;
12332 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12343 sctp_lower_sosend(struct socket *so,
12344 struct sockaddr *addr,
12346 struct mbuf *i_pak,
12347 struct mbuf *control,
12350 struct sctp_sndrcvinfo *srcv
12355 unsigned int sndlen = 0, max_len;
12357 struct mbuf *top = NULL;
12358 int queue_only = 0, queue_only_for_init = 0;
12359 int free_cnt_applied = 0;
12361 int now_filled = 0;
12362 unsigned int inqueue_bytes = 0;
12363 struct sctp_block_entry be;
12364 struct sctp_inpcb *inp;
12365 struct sctp_tcb *stcb = NULL;
12366 struct timeval now;
12367 struct sctp_nets *net;
12368 struct sctp_association *asoc;
12369 struct sctp_inpcb *t_inp;
12370 int user_marks_eor;
12371 int create_lock_applied = 0;
12372 int nagle_applies = 0;
12373 int some_on_control = 0;
12374 int got_all_of_the_send = 0;
12375 int hold_tcblock = 0;
12376 int non_blocking = 0;
12377 int temp_flags = 0;
12378 uint32_t local_add_more, local_soresv = 0;
12385 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12387 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12390 SCTP_RELEASE_PKT(i_pak);
12394 if ((uio == NULL) && (i_pak == NULL)) {
12395 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12398 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12399 atomic_add_int(&inp->total_sends, 1);
12401 if (uio->uio_resid < 0) {
12402 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12405 sndlen = uio->uio_resid;
12407 top = SCTP_HEADER_TO_CHAIN(i_pak);
12408 sndlen = SCTP_HEADER_LEN(i_pak);
12410 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12414 * Pre-screen address, if one is given the sin-len
12415 * must be set correctly!
12418 if ((addr->sa_family == AF_INET) &&
12419 (addr->sa_len != sizeof(struct sockaddr_in))) {
12420 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12423 } else if ((addr->sa_family == AF_INET6) &&
12424 (addr->sa_len != sizeof(struct sockaddr_in6))) {
12425 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12432 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12433 (inp->sctp_socket->so_qlimit)) {
12434 /* The listener can NOT send */
12435 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12439 if ((use_rcvinfo) && srcv) {
12440 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) ||
12441 PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) {
12442 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12446 if (srcv->sinfo_flags)
12447 SCTP_STAT_INCR(sctps_sends_with_flags);
12449 if (srcv->sinfo_flags & SCTP_SENDALL) {
12450 /* its a sendall */
12451 error = sctp_sendall(inp, uio, top, srcv);
12456 /* now we must find the assoc */
12457 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12458 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12459 SCTP_INP_RLOCK(inp);
12460 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12461 if (stcb == NULL) {
12462 SCTP_INP_RUNLOCK(inp);
12463 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12467 SCTP_TCB_LOCK(stcb);
12469 SCTP_INP_RUNLOCK(inp);
12471 /* Must locate the net structure if addr given */
12472 net = sctp_findnet(stcb, addr);
12474 /* validate port was 0 or correct */
12475 struct sockaddr_in *sin;
12477 sin = (struct sockaddr_in *)addr;
12478 if ((sin->sin_port != 0) &&
12479 (sin->sin_port != stcb->rport)) {
12483 temp_flags |= SCTP_ADDR_OVER;
12485 net = stcb->asoc.primary_destination;
12486 if (addr && (net == NULL)) {
12487 /* Could not find address, was it legal */
12488 if (addr->sa_family == AF_INET) {
12489 struct sockaddr_in *sin;
12491 sin = (struct sockaddr_in *)addr;
12492 if (sin->sin_addr.s_addr == 0) {
12493 if ((sin->sin_port == 0) ||
12494 (sin->sin_port == stcb->rport)) {
12495 net = stcb->asoc.primary_destination;
12499 struct sockaddr_in6 *sin6;
12501 sin6 = (struct sockaddr_in6 *)addr;
12502 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
12503 if ((sin6->sin6_port == 0) ||
12504 (sin6->sin6_port == stcb->rport)) {
12505 net = stcb->asoc.primary_destination;
12511 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12515 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) {
12516 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0);
12520 * Must locate the net structure if addr
12523 net = sctp_findnet(stcb, addr);
12525 net = stcb->asoc.primary_destination;
12526 if ((srcv->sinfo_flags & SCTP_ADDR_OVER) &&
12527 ((net == NULL) || (addr == NULL))) {
12528 struct sockaddr_in *sin;
12530 if (addr == NULL) {
12531 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12535 sin = (struct sockaddr_in *)addr;
12536 /* Validate port is 0 or correct */
12537 if ((sin->sin_port != 0) &&
12538 (sin->sin_port != stcb->rport)) {
12546 * Since we did not use findep we must
12547 * increment it, and if we don't find a tcb
12550 SCTP_INP_WLOCK(inp);
12551 SCTP_INP_INCR_REF(inp);
12552 SCTP_INP_WUNLOCK(inp);
12553 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12554 if (stcb == NULL) {
12555 SCTP_INP_WLOCK(inp);
12556 SCTP_INP_DECR_REF(inp);
12557 SCTP_INP_WUNLOCK(inp);
12562 if ((stcb == NULL) && (addr)) {
12563 /* Possible implicit send? */
12564 SCTP_ASOC_CREATE_LOCK(inp);
12565 create_lock_applied = 1;
12566 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12567 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12568 /* Should I really unlock ? */
12569 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12574 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12575 (addr->sa_family == AF_INET6)) {
12576 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12580 SCTP_INP_WLOCK(inp);
12581 SCTP_INP_INCR_REF(inp);
12582 SCTP_INP_WUNLOCK(inp);
12583 /* With the lock applied look again */
12584 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12585 if (stcb == NULL) {
12586 SCTP_INP_WLOCK(inp);
12587 SCTP_INP_DECR_REF(inp);
12588 SCTP_INP_WUNLOCK(inp);
12592 if (t_inp != inp) {
12593 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12598 if (stcb == NULL) {
12599 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
12600 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12601 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12605 if (addr == NULL) {
12606 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12611 * UDP style, we must go ahead and start the INIT
12616 if ((use_rcvinfo) && (srcv) &&
12617 ((srcv->sinfo_flags & SCTP_ABORT) ||
12618 ((srcv->sinfo_flags & SCTP_EOF) &&
12621 * User asks to abort a non-existant assoc,
12622 * or EOF a non-existant assoc with no data
12624 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12628 /* get an asoc/stcb struct */
12629 vrf_id = inp->def_vrf_id;
12631 if (create_lock_applied == 0) {
12632 panic("Error, should hold create lock and I don't?");
12635 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id,
12638 if (stcb == NULL) {
12639 /* Error is setup for us in the call */
12642 if (create_lock_applied) {
12643 SCTP_ASOC_CREATE_UNLOCK(inp);
12644 create_lock_applied = 0;
12646 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12649 * Turn on queue only flag to prevent data from
12653 asoc = &stcb->asoc;
12654 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12655 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12657 /* initialize authentication params for the assoc */
12658 sctp_initialize_auth_params(inp, stcb);
12662 * see if a init structure exists in cmsg
12665 struct sctp_initmsg initm;
12668 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
12671 * we have an INIT override of the
12674 if (initm.sinit_max_attempts)
12675 asoc->max_init_times = initm.sinit_max_attempts;
12676 if (initm.sinit_num_ostreams)
12677 asoc->pre_open_streams = initm.sinit_num_ostreams;
12678 if (initm.sinit_max_instreams)
12679 asoc->max_inbound_streams = initm.sinit_max_instreams;
12680 if (initm.sinit_max_init_timeo)
12681 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
12682 if (asoc->streamoutcnt < asoc->pre_open_streams) {
12683 struct sctp_stream_out *tmp_str;
12686 /* Default is NOT correct */
12687 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
12688 asoc->streamoutcnt, asoc->pre_open_streams);
12690 * What happens if this
12691 * fails? we panic ...
12694 if (hold_tcblock) {
12696 SCTP_TCB_UNLOCK(stcb);
12698 SCTP_MALLOC(tmp_str,
12699 struct sctp_stream_out *,
12700 (asoc->pre_open_streams *
12701 sizeof(struct sctp_stream_out)),
12704 SCTP_TCB_LOCK(stcb);
12706 if (tmp_str != NULL) {
12707 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
12708 asoc->strmout = tmp_str;
12709 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
12711 asoc->pre_open_streams = asoc->streamoutcnt;
12713 for (i = 0; i < asoc->streamoutcnt; i++) {
12715 * inbound side must be set
12716 * to 0xffff, also NOTE when
12717 * we get the INIT-ACK back
12718 * (for INIT sender) we MUST
12720 * (streamoutcnt) but first
12721 * check if we sent to any
12722 * of the upper streams that
12723 * were dropped (if some
12724 * were). Those that were
12725 * dropped must be notified
12726 * to the upper layer as
12729 asoc->strmout[i].next_sequence_sent = 0x0;
12730 TAILQ_INIT(&asoc->strmout[i].outqueue);
12731 asoc->strmout[i].stream_no = i;
12732 asoc->strmout[i].last_msg_incomplete = 0;
12733 asoc->strmout[i].next_spoke.tqe_next = 0;
12734 asoc->strmout[i].next_spoke.tqe_prev = 0;
12740 /* out with the INIT */
12741 queue_only_for_init = 1;
12743 * we may want to dig in after this call and adjust the MTU
12744 * value. It defaulted to 1500 (constant) but the ro
12745 * structure may now have an update and thus we may need to
12746 * change it BEFORE we append the message.
12748 net = stcb->asoc.primary_destination;
12749 asoc = &stcb->asoc;
12752 if ((SCTP_SO_IS_NBIO(so)
12753 || (flags & MSG_NBIO)
12757 asoc = &stcb->asoc;
12758 atomic_add_int(&stcb->total_sends, 1);
12760 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12761 if (sndlen > asoc->smallest_mtu) {
12762 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12767 /* would we block? */
12768 if (non_blocking) {
12769 if (hold_tcblock == 0) {
12770 SCTP_TCB_LOCK(stcb);
12773 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12774 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12775 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12776 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12777 if (sndlen > SCTP_SB_LIMIT_SND(so))
12780 error = EWOULDBLOCK;
12783 stcb->asoc.sb_send_resv += sndlen;
12784 SCTP_TCB_UNLOCK(stcb);
12787 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12789 local_soresv = sndlen;
12790 /* Keep the stcb from being freed under our feet */
12791 if (free_cnt_applied) {
12793 panic("refcnt already incremented");
12795 printf("refcnt:1 already incremented?\n");
12798 atomic_add_int(&stcb->asoc.refcnt, 1);
12799 free_cnt_applied = 1;
12801 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12802 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12803 error = ECONNRESET;
12806 if (create_lock_applied) {
12807 SCTP_ASOC_CREATE_UNLOCK(inp);
12808 create_lock_applied = 0;
12810 if (asoc->stream_reset_outstanding) {
12812 * Can't queue any data while stream reset is underway.
12814 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12818 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12819 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12822 if ((use_rcvinfo == 0) || (srcv == NULL)) {
12823 /* Grab the default stuff from the asoc */
12824 srcv = (struct sctp_sndrcvinfo *)&stcb->asoc.def_send;
12826 /* we are now done with all control */
12828 sctp_m_freem(control);
12831 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12832 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12833 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12834 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12835 if ((use_rcvinfo) &&
12836 (srcv->sinfo_flags & SCTP_ABORT)) {
12839 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12840 error = ECONNRESET;
12844 /* Ok, we will attempt a msgsnd :> */
12846 p->td_ru.ru_msgsnd++;
12849 if (((srcv->sinfo_flags | temp_flags) & SCTP_ADDR_OVER) == 0) {
12850 net = stcb->asoc.primary_destination;
12854 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12858 if ((net->flight_size > net->cwnd) && (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
12860 * CMT: Added check for CMT above. net above is the primary
12861 * dest. If CMT is ON, sender should always attempt to send
12862 * with the output routine sctp_fill_outqueue() that loops
12863 * through all destination addresses. Therefore, if CMT is
12864 * ON, queue_only is NOT set to 1 here, so that
12865 * sctp_chunk_output() can be called below.
12868 } else if (asoc->ifp_had_enobuf) {
12869 SCTP_STAT_INCR(sctps_ifnomemqueued);
12870 if (net->flight_size > (net->mtu * 2))
12872 asoc->ifp_had_enobuf = 0;
12874 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12875 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12877 /* Are we aborting? */
12878 if (srcv->sinfo_flags & SCTP_ABORT) {
12880 int tot_demand, tot_out = 0, max_out;
12882 SCTP_STAT_INCR(sctps_sends_with_abort);
12883 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12884 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12885 /* It has to be up before we abort */
12886 /* how big is the user initiated abort? */
12887 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12891 if (hold_tcblock) {
12892 SCTP_TCB_UNLOCK(stcb);
12896 struct mbuf *cntm = NULL;
12898 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
12902 tot_out += SCTP_BUF_LEN(cntm);
12903 cntm = SCTP_BUF_NEXT(cntm);
12906 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12908 /* Must fit in a MTU */
12910 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12911 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12913 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12917 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12920 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12924 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12925 max_out -= sizeof(struct sctp_abort_msg);
12926 if (tot_out > max_out) {
12930 struct sctp_paramhdr *ph;
12932 /* now move forward the data pointer */
12933 ph = mtod(mm, struct sctp_paramhdr *);
12934 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12935 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12937 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12939 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12942 * Here if we can't get his data we
12943 * still abort we just don't get to
12944 * send the users note :-0
12951 SCTP_BUF_NEXT(mm) = top;
12955 if (hold_tcblock == 0) {
12956 SCTP_TCB_LOCK(stcb);
12959 atomic_add_int(&stcb->asoc.refcnt, -1);
12960 free_cnt_applied = 0;
12961 /* release this lock, otherwise we hang on ourselves */
12962 sctp_abort_an_association(stcb->sctp_ep, stcb,
12963 SCTP_RESPONSE_TO_USER_REQ,
12964 mm, SCTP_SO_LOCKED);
12965 /* now relock the stcb so everything is sane */
12969 * In this case top is already chained to mm avoid double
12970 * free, since we free it below if top != NULL and driver
12971 * would free it after sending the packet out
12978 /* Calculate the maximum we can send */
12979 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12980 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12981 if (non_blocking) {
12982 /* we already checked for non-blocking above. */
12985 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12990 if (hold_tcblock) {
12991 SCTP_TCB_UNLOCK(stcb);
12994 /* Is the stream no. valid? */
12995 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12996 /* Invalid stream number */
12997 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13001 if (asoc->strmout == NULL) {
13002 /* huh? software error */
13003 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
13007 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
13008 if ((user_marks_eor == 0) &&
13009 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
13010 /* It will NEVER fit */
13011 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
13015 if ((uio == NULL) && user_marks_eor) {
13017 * We do not support eeor mode for
13018 * sending with mbuf chains (like sendfile).
13020 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13024 if (user_marks_eor) {
13025 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
13028 * For non-eeor the whole message must fit in
13029 * the socket send buffer.
13031 local_add_more = sndlen;
13034 if (non_blocking) {
13035 goto skip_preblock;
13037 if (((max_len <= local_add_more) &&
13038 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
13040 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13041 /* No room right now ! */
13042 SOCKBUF_LOCK(&so->so_snd);
13043 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13044 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
13045 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
13046 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
13047 (unsigned int)SCTP_SB_LIMIT_SND(so),
13050 stcb->asoc.stream_queue_cnt,
13051 stcb->asoc.chunks_on_out_queue,
13052 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
13053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13054 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
13057 stcb->block_entry = &be;
13058 error = sbwait(&so->so_snd);
13059 stcb->block_entry = NULL;
13060 if (error || so->so_error || be.error) {
13063 error = so->so_error;
13068 SOCKBUF_UNLOCK(&so->so_snd);
13071 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13072 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13073 so, asoc, stcb->asoc.total_output_queue_size);
13075 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13078 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13080 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13081 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
13082 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13086 SOCKBUF_UNLOCK(&so->so_snd);
13089 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13093 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
13094 * case NOTE: uio will be null when top/mbuf is passed
13097 if (srcv->sinfo_flags & SCTP_EOF) {
13098 got_all_of_the_send = 1;
13101 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13107 struct sctp_stream_queue_pending *sp;
13108 struct sctp_stream_out *strm;
13109 uint32_t sndout, initial_out;
13111 initial_out = uio->uio_resid;
13113 SCTP_TCB_SEND_LOCK(stcb);
13114 if ((asoc->stream_locked) &&
13115 (asoc->stream_locked_on != srcv->sinfo_stream)) {
13116 SCTP_TCB_SEND_UNLOCK(stcb);
13117 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
13121 SCTP_TCB_SEND_UNLOCK(stcb);
13123 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
13124 if (strm->last_msg_incomplete == 0) {
13126 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
13127 if ((sp == NULL) || (error)) {
13130 SCTP_TCB_SEND_LOCK(stcb);
13131 if (sp->msg_is_complete) {
13132 strm->last_msg_incomplete = 0;
13133 asoc->stream_locked = 0;
13136 * Just got locked to this guy in case of an
13139 strm->last_msg_incomplete = 1;
13140 asoc->stream_locked = 1;
13141 asoc->stream_locked_on = srcv->sinfo_stream;
13142 sp->sender_all_done = 0;
13144 sctp_snd_sb_alloc(stcb, sp->length);
13145 atomic_add_int(&asoc->stream_queue_cnt, 1);
13146 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
13147 sp->strseq = strm->next_sequence_sent;
13148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
13149 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
13150 (uintptr_t) stcb, sp->length,
13151 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
13153 strm->next_sequence_sent++;
13155 SCTP_STAT_INCR(sctps_sends_with_unord);
13157 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
13158 if ((strm->next_spoke.tqe_next == NULL) &&
13159 (strm->next_spoke.tqe_prev == NULL)) {
13160 /* Not on wheel, insert */
13161 sctp_insert_on_wheel(stcb, asoc, strm, 1);
13163 SCTP_TCB_SEND_UNLOCK(stcb);
13165 SCTP_TCB_SEND_LOCK(stcb);
13166 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
13167 SCTP_TCB_SEND_UNLOCK(stcb);
13169 /* ???? Huh ??? last msg is gone */
13171 panic("Warning: Last msg marked incomplete, yet nothing left?");
13173 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
13174 strm->last_msg_incomplete = 0;
13180 while (uio->uio_resid > 0) {
13181 /* How much room do we have? */
13182 struct mbuf *new_tail, *mm;
13184 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13185 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13189 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13190 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13191 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13194 if (hold_tcblock) {
13195 SCTP_TCB_UNLOCK(stcb);
13198 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
13199 if ((mm == NULL) || error) {
13205 /* Update the mbuf and count */
13206 SCTP_TCB_SEND_LOCK(stcb);
13207 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13209 * we need to get out. Peer probably
13213 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13214 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13215 error = ECONNRESET;
13217 SCTP_TCB_SEND_UNLOCK(stcb);
13220 if (sp->tail_mbuf) {
13221 /* tack it to the end */
13222 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13223 sp->tail_mbuf = new_tail;
13225 /* A stolen mbuf */
13227 sp->tail_mbuf = new_tail;
13229 sctp_snd_sb_alloc(stcb, sndout);
13230 atomic_add_int(&sp->length, sndout);
13233 /* Did we reach EOR? */
13234 if ((uio->uio_resid == 0) &&
13235 ((user_marks_eor == 0) ||
13236 (srcv->sinfo_flags & SCTP_EOF) ||
13237 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13238 sp->msg_is_complete = 1;
13240 sp->msg_is_complete = 0;
13242 SCTP_TCB_SEND_UNLOCK(stcb);
13244 if (uio->uio_resid == 0) {
13249 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13251 * This is ugly but we must assure locking
13254 if (hold_tcblock == 0) {
13255 SCTP_TCB_LOCK(stcb);
13258 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13259 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13260 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13261 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13267 SCTP_TCB_UNLOCK(stcb);
13270 /* wait for space now */
13271 if (non_blocking) {
13272 /* Non-blocking io in place out */
13275 if ((net->flight_size > net->cwnd) &&
13276 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
13278 } else if (asoc->ifp_had_enobuf) {
13279 SCTP_STAT_INCR(sctps_ifnomemqueued);
13280 if (net->flight_size > (net->mtu * 2)) {
13285 asoc->ifp_had_enobuf = 0;
13286 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13287 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13289 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13290 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13291 if (net->flight_size > net->cwnd) {
13293 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13298 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13299 (stcb->asoc.total_flight > 0) &&
13300 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13301 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13304 * Ok, Nagle is set on and we have data outstanding.
13305 * Don't send anything and let SACKs drive out the
13306 * data unless wen have a "full" segment to send.
13308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13309 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13311 SCTP_STAT_INCR(sctps_naglequeued);
13314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13315 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13316 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13318 SCTP_STAT_INCR(sctps_naglesent);
13321 /* What about the INIT, send it maybe */
13322 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13324 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13325 nagle_applies, un_sent);
13326 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13327 stcb->asoc.total_flight,
13328 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13330 if (queue_only_for_init) {
13331 if (hold_tcblock == 0) {
13332 SCTP_TCB_LOCK(stcb);
13335 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13336 /* a collision took us forward? */
13337 queue_only_for_init = 0;
13340 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13341 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13342 queue_only_for_init = 0;
13346 if ((queue_only == 0) && (nagle_applies == 0)) {
13348 * need to start chunk output
13349 * before blocking.. note that if
13350 * a lock is already applied, then
13351 * the input via the net is happening
13352 * and I don't need to start output :-D
13354 if (hold_tcblock == 0) {
13355 if (SCTP_TCB_TRYLOCK(stcb)) {
13357 sctp_chunk_output(inp,
13359 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13362 sctp_chunk_output(inp,
13364 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13366 if (hold_tcblock == 1) {
13367 SCTP_TCB_UNLOCK(stcb);
13371 SOCKBUF_LOCK(&so->so_snd);
13373 * This is a bit strange, but I think it will
13374 * work. The total_output_queue_size is locked and
13375 * protected by the TCB_LOCK, which we just released.
13376 * There is a race that can occur between releasing it
13377 * above, and me getting the socket lock, where sacks
13378 * come in but we have not put the SB_WAIT on the
13379 * so_snd buffer to get the wakeup. After the LOCK
13380 * is applied the sack_processing will also need to
13381 * LOCK the so->so_snd to do the actual sowwakeup(). So
13382 * once we have the socket buffer lock if we recheck the
13383 * size we KNOW we will get to sleep safely with the
13384 * wakeup flag in place.
13386 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13387 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13388 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13389 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13390 so, asoc, uio->uio_resid);
13393 stcb->block_entry = &be;
13394 error = sbwait(&so->so_snd);
13395 stcb->block_entry = NULL;
13397 if (error || so->so_error || be.error) {
13400 error = so->so_error;
13405 SOCKBUF_UNLOCK(&so->so_snd);
13408 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13409 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13410 so, asoc, stcb->asoc.total_output_queue_size);
13413 SOCKBUF_UNLOCK(&so->so_snd);
13414 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13418 SCTP_TCB_SEND_LOCK(stcb);
13420 if (sp->msg_is_complete == 0) {
13421 strm->last_msg_incomplete = 1;
13422 asoc->stream_locked = 1;
13423 asoc->stream_locked_on = srcv->sinfo_stream;
13425 sp->sender_all_done = 1;
13426 strm->last_msg_incomplete = 0;
13427 asoc->stream_locked = 0;
13430 SCTP_PRINTF("Huh no sp TSNH?\n");
13431 strm->last_msg_incomplete = 0;
13432 asoc->stream_locked = 0;
13434 SCTP_TCB_SEND_UNLOCK(stcb);
13435 if (uio->uio_resid == 0) {
13436 got_all_of_the_send = 1;
13439 /* We send in a 0, since we do NOT have any locks */
13440 error = sctp_msg_append(stcb, net, top, srcv, 0);
13442 if (srcv->sinfo_flags & SCTP_EOF) {
13444 * This should only happen for Panda for the mbuf
13445 * send case, which does NOT yet support EEOR mode.
13446 * Thus, we can just set this flag to do the proper
13449 got_all_of_the_send = 1;
13457 if ((srcv->sinfo_flags & SCTP_EOF) &&
13458 (got_all_of_the_send == 1) &&
13459 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
13462 SCTP_STAT_INCR(sctps_sends_with_eof);
13464 if (hold_tcblock == 0) {
13465 SCTP_TCB_LOCK(stcb);
13468 cnt = sctp_is_there_unsent_data(stcb);
13469 if (TAILQ_EMPTY(&asoc->send_queue) &&
13470 TAILQ_EMPTY(&asoc->sent_queue) &&
13472 if (asoc->locked_on_sending) {
13475 /* there is nothing queued to send, so I'm done... */
13476 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13477 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13478 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13479 /* only send SHUTDOWN the first time through */
13480 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
13481 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13482 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13484 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13485 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13486 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13487 asoc->primary_destination);
13488 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13489 asoc->primary_destination);
13493 * we still got (or just got) data to send, so set
13497 * XXX sockets draft says that SCTP_EOF should be
13498 * sent with no data. currently, we will allow user
13499 * data to be sent first and move to
13502 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13503 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13504 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13505 if (hold_tcblock == 0) {
13506 SCTP_TCB_LOCK(stcb);
13509 if (asoc->locked_on_sending) {
13510 /* Locked to send out the data */
13511 struct sctp_stream_queue_pending *sp;
13513 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13515 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13516 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13519 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13520 if (TAILQ_EMPTY(&asoc->send_queue) &&
13521 TAILQ_EMPTY(&asoc->sent_queue) &&
13522 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13524 if (free_cnt_applied) {
13525 atomic_add_int(&stcb->asoc.refcnt, -1);
13526 free_cnt_applied = 0;
13528 sctp_abort_an_association(stcb->sctp_ep, stcb,
13529 SCTP_RESPONSE_TO_USER_REQ,
13530 NULL, SCTP_SO_LOCKED);
13532 * now relock the stcb so everything
13539 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13540 asoc->primary_destination);
13541 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13546 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13547 some_on_control = 1;
13549 if ((net->flight_size > net->cwnd) &&
13550 (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0)) {
13552 } else if (asoc->ifp_had_enobuf) {
13553 SCTP_STAT_INCR(sctps_ifnomemqueued);
13554 if (net->flight_size > (net->mtu * 2)) {
13559 asoc->ifp_had_enobuf = 0;
13560 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13561 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13563 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13564 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13565 if (net->flight_size > net->cwnd) {
13567 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13572 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13573 (stcb->asoc.total_flight > 0) &&
13574 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13575 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13577 * Ok, Nagle is set on and we have data outstanding.
13578 * Don't send anything and let SACKs drive out the
13579 * data unless wen have a "full" segment to send.
13581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13582 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13584 SCTP_STAT_INCR(sctps_naglequeued);
13587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13588 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13589 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13591 SCTP_STAT_INCR(sctps_naglesent);
13594 if (queue_only_for_init) {
13595 if (hold_tcblock == 0) {
13596 SCTP_TCB_LOCK(stcb);
13599 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13600 /* a collision took us forward? */
13601 queue_only_for_init = 0;
13604 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13605 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13606 queue_only_for_init = 0;
13610 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13611 /* we can attempt to send too. */
13612 if (hold_tcblock == 0) {
13614 * If there is activity recv'ing sacks no need to
13617 if (SCTP_TCB_TRYLOCK(stcb)) {
13618 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13622 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13624 } else if ((queue_only == 0) &&
13625 (stcb->asoc.peers_rwnd == 0) &&
13626 (stcb->asoc.total_flight == 0)) {
13627 /* We get to have a probe outstanding */
13628 if (hold_tcblock == 0) {
13630 SCTP_TCB_LOCK(stcb);
13632 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13633 } else if (some_on_control) {
13634 int num_out, reason, frag_point;
13636 /* Here we do control only */
13637 if (hold_tcblock == 0) {
13639 SCTP_TCB_LOCK(stcb);
13641 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13642 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13643 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13645 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13646 queue_only, stcb->asoc.peers_rwnd, un_sent,
13647 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13648 stcb->asoc.total_output_queue_size, error);
13653 if (local_soresv && stcb) {
13654 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13657 if (create_lock_applied) {
13658 SCTP_ASOC_CREATE_UNLOCK(inp);
13659 create_lock_applied = 0;
13661 if ((stcb) && hold_tcblock) {
13662 SCTP_TCB_UNLOCK(stcb);
13664 if (stcb && free_cnt_applied) {
13665 atomic_add_int(&stcb->asoc.refcnt, -1);
13669 if (mtx_owned(&stcb->tcb_mtx)) {
13670 panic("Leaving with tcb mtx owned?");
13672 if (mtx_owned(&stcb->tcb_send_mtx)) {
13673 panic("Leaving with tcb send mtx owned?");
13681 sctp_m_freem(control);
13688 * generate an AUTHentication chunk, if required
13691 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13692 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13693 struct sctp_tcb *stcb, uint8_t chunk)
13695 struct mbuf *m_auth;
13696 struct sctp_auth_chunk *auth;
13699 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13703 /* sysctl disabled auth? */
13704 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13707 /* peer doesn't do auth... */
13708 if (!stcb->asoc.peer_supports_auth) {
13711 /* does the requested chunk require auth? */
13712 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13715 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13716 if (m_auth == NULL) {
13720 /* reserve some space if this will be the first mbuf */
13722 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13723 /* fill in the AUTH chunk details */
13724 auth = mtod(m_auth, struct sctp_auth_chunk *);
13725 bzero(auth, sizeof(*auth));
13726 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13727 auth->ch.chunk_flags = 0;
13728 chunk_len = sizeof(*auth) +
13729 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13730 auth->ch.chunk_length = htons(chunk_len);
13731 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13732 /* key id and hmac digest will be computed and filled in upon send */
13734 /* save the offset where the auth was inserted into the chain */
13741 *offset += SCTP_BUF_LEN(cn);
13742 cn = SCTP_BUF_NEXT(cn);
13747 /* update length and return pointer to the auth chunk */
13748 SCTP_BUF_LEN(m_auth) = chunk_len;
13749 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13750 if (auth_ret != NULL)
13758 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13760 struct nd_prefix *pfx = NULL;
13761 struct nd_pfxrouter *pfxrtr = NULL;
13762 struct sockaddr_in6 gw6;
13764 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13767 /* get prefix entry of address */
13768 LIST_FOREACH(pfx, &MODULE_GLOBAL(MOD_INET6, nd_prefix), ndpr_entry) {
13769 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13771 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13772 &src6->sin6_addr, &pfx->ndpr_mask))
13775 /* no prefix entry in the prefix list */
13777 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13778 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13781 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13782 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13784 /* search installed gateway from prefix entry */
13785 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
13786 pfxrtr->pfr_next) {
13787 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13788 gw6.sin6_family = AF_INET6;
13789 gw6.sin6_len = sizeof(struct sockaddr_in6);
13790 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13791 sizeof(struct in6_addr));
13792 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13793 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13794 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13795 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13796 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13797 ro->ro_rt->rt_gateway)) {
13798 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13802 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13809 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13811 struct sockaddr_in *sin, *mask;
13812 struct ifaddr *ifa;
13813 struct in_addr srcnetaddr, gwnetaddr;
13815 if (ro == NULL || ro->ro_rt == NULL ||
13816 sifa->address.sa.sa_family != AF_INET) {
13819 ifa = (struct ifaddr *)sifa->ifa;
13820 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13821 sin = (struct sockaddr_in *)&sifa->address.sin;
13822 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13823 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13824 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13825 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13827 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13828 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13829 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13830 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13831 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13832 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {