2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_var.h>
39 #include <netinet/sctp_sysctl.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctputil.h>
43 #include <netinet/sctp_output.h>
44 #include <netinet/sctp_uio.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_auth.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_indata.h>
50 #include <netinet/sctp_bsd_addr.h>
51 #include <netinet/sctp_input.h>
52 #include <netinet/sctp_crc32.h>
53 #include <netinet/udp.h>
54 #include <machine/in_cksum.h>
58 #define SCTP_MAX_GAPS_INARRAY 4
60 uint8_t right_edge; /* mergable on the right edge */
61 uint8_t left_edge; /* mergable on the left edge */
64 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
67 struct sack_track sack_array[256] = {
68 {0, 0, 0, 0, /* 0x00 */
75 {1, 0, 1, 0, /* 0x01 */
82 {0, 0, 1, 0, /* 0x02 */
89 {1, 0, 1, 0, /* 0x03 */
96 {0, 0, 1, 0, /* 0x04 */
103 {1, 0, 2, 0, /* 0x05 */
110 {0, 0, 1, 0, /* 0x06 */
117 {1, 0, 1, 0, /* 0x07 */
124 {0, 0, 1, 0, /* 0x08 */
131 {1, 0, 2, 0, /* 0x09 */
138 {0, 0, 2, 0, /* 0x0a */
145 {1, 0, 2, 0, /* 0x0b */
152 {0, 0, 1, 0, /* 0x0c */
159 {1, 0, 2, 0, /* 0x0d */
166 {0, 0, 1, 0, /* 0x0e */
173 {1, 0, 1, 0, /* 0x0f */
180 {0, 0, 1, 0, /* 0x10 */
187 {1, 0, 2, 0, /* 0x11 */
194 {0, 0, 2, 0, /* 0x12 */
201 {1, 0, 2, 0, /* 0x13 */
208 {0, 0, 2, 0, /* 0x14 */
215 {1, 0, 3, 0, /* 0x15 */
222 {0, 0, 2, 0, /* 0x16 */
229 {1, 0, 2, 0, /* 0x17 */
236 {0, 0, 1, 0, /* 0x18 */
243 {1, 0, 2, 0, /* 0x19 */
250 {0, 0, 2, 0, /* 0x1a */
257 {1, 0, 2, 0, /* 0x1b */
264 {0, 0, 1, 0, /* 0x1c */
271 {1, 0, 2, 0, /* 0x1d */
278 {0, 0, 1, 0, /* 0x1e */
285 {1, 0, 1, 0, /* 0x1f */
292 {0, 0, 1, 0, /* 0x20 */
299 {1, 0, 2, 0, /* 0x21 */
306 {0, 0, 2, 0, /* 0x22 */
313 {1, 0, 2, 0, /* 0x23 */
320 {0, 0, 2, 0, /* 0x24 */
327 {1, 0, 3, 0, /* 0x25 */
334 {0, 0, 2, 0, /* 0x26 */
341 {1, 0, 2, 0, /* 0x27 */
348 {0, 0, 2, 0, /* 0x28 */
355 {1, 0, 3, 0, /* 0x29 */
362 {0, 0, 3, 0, /* 0x2a */
369 {1, 0, 3, 0, /* 0x2b */
376 {0, 0, 2, 0, /* 0x2c */
383 {1, 0, 3, 0, /* 0x2d */
390 {0, 0, 2, 0, /* 0x2e */
397 {1, 0, 2, 0, /* 0x2f */
404 {0, 0, 1, 0, /* 0x30 */
411 {1, 0, 2, 0, /* 0x31 */
418 {0, 0, 2, 0, /* 0x32 */
425 {1, 0, 2, 0, /* 0x33 */
432 {0, 0, 2, 0, /* 0x34 */
439 {1, 0, 3, 0, /* 0x35 */
446 {0, 0, 2, 0, /* 0x36 */
453 {1, 0, 2, 0, /* 0x37 */
460 {0, 0, 1, 0, /* 0x38 */
467 {1, 0, 2, 0, /* 0x39 */
474 {0, 0, 2, 0, /* 0x3a */
481 {1, 0, 2, 0, /* 0x3b */
488 {0, 0, 1, 0, /* 0x3c */
495 {1, 0, 2, 0, /* 0x3d */
502 {0, 0, 1, 0, /* 0x3e */
509 {1, 0, 1, 0, /* 0x3f */
516 {0, 0, 1, 0, /* 0x40 */
523 {1, 0, 2, 0, /* 0x41 */
530 {0, 0, 2, 0, /* 0x42 */
537 {1, 0, 2, 0, /* 0x43 */
544 {0, 0, 2, 0, /* 0x44 */
551 {1, 0, 3, 0, /* 0x45 */
558 {0, 0, 2, 0, /* 0x46 */
565 {1, 0, 2, 0, /* 0x47 */
572 {0, 0, 2, 0, /* 0x48 */
579 {1, 0, 3, 0, /* 0x49 */
586 {0, 0, 3, 0, /* 0x4a */
593 {1, 0, 3, 0, /* 0x4b */
600 {0, 0, 2, 0, /* 0x4c */
607 {1, 0, 3, 0, /* 0x4d */
614 {0, 0, 2, 0, /* 0x4e */
621 {1, 0, 2, 0, /* 0x4f */
628 {0, 0, 2, 0, /* 0x50 */
635 {1, 0, 3, 0, /* 0x51 */
642 {0, 0, 3, 0, /* 0x52 */
649 {1, 0, 3, 0, /* 0x53 */
656 {0, 0, 3, 0, /* 0x54 */
663 {1, 0, 4, 0, /* 0x55 */
670 {0, 0, 3, 0, /* 0x56 */
677 {1, 0, 3, 0, /* 0x57 */
684 {0, 0, 2, 0, /* 0x58 */
691 {1, 0, 3, 0, /* 0x59 */
698 {0, 0, 3, 0, /* 0x5a */
705 {1, 0, 3, 0, /* 0x5b */
712 {0, 0, 2, 0, /* 0x5c */
719 {1, 0, 3, 0, /* 0x5d */
726 {0, 0, 2, 0, /* 0x5e */
733 {1, 0, 2, 0, /* 0x5f */
740 {0, 0, 1, 0, /* 0x60 */
747 {1, 0, 2, 0, /* 0x61 */
754 {0, 0, 2, 0, /* 0x62 */
761 {1, 0, 2, 0, /* 0x63 */
768 {0, 0, 2, 0, /* 0x64 */
775 {1, 0, 3, 0, /* 0x65 */
782 {0, 0, 2, 0, /* 0x66 */
789 {1, 0, 2, 0, /* 0x67 */
796 {0, 0, 2, 0, /* 0x68 */
803 {1, 0, 3, 0, /* 0x69 */
810 {0, 0, 3, 0, /* 0x6a */
817 {1, 0, 3, 0, /* 0x6b */
824 {0, 0, 2, 0, /* 0x6c */
831 {1, 0, 3, 0, /* 0x6d */
838 {0, 0, 2, 0, /* 0x6e */
845 {1, 0, 2, 0, /* 0x6f */
852 {0, 0, 1, 0, /* 0x70 */
859 {1, 0, 2, 0, /* 0x71 */
866 {0, 0, 2, 0, /* 0x72 */
873 {1, 0, 2, 0, /* 0x73 */
880 {0, 0, 2, 0, /* 0x74 */
887 {1, 0, 3, 0, /* 0x75 */
894 {0, 0, 2, 0, /* 0x76 */
901 {1, 0, 2, 0, /* 0x77 */
908 {0, 0, 1, 0, /* 0x78 */
915 {1, 0, 2, 0, /* 0x79 */
922 {0, 0, 2, 0, /* 0x7a */
929 {1, 0, 2, 0, /* 0x7b */
936 {0, 0, 1, 0, /* 0x7c */
943 {1, 0, 2, 0, /* 0x7d */
950 {0, 0, 1, 0, /* 0x7e */
957 {1, 0, 1, 0, /* 0x7f */
964 {0, 1, 1, 0, /* 0x80 */
971 {1, 1, 2, 0, /* 0x81 */
978 {0, 1, 2, 0, /* 0x82 */
985 {1, 1, 2, 0, /* 0x83 */
992 {0, 1, 2, 0, /* 0x84 */
999 {1, 1, 3, 0, /* 0x85 */
1006 {0, 1, 2, 0, /* 0x86 */
1013 {1, 1, 2, 0, /* 0x87 */
1020 {0, 1, 2, 0, /* 0x88 */
1027 {1, 1, 3, 0, /* 0x89 */
1034 {0, 1, 3, 0, /* 0x8a */
1041 {1, 1, 3, 0, /* 0x8b */
1048 {0, 1, 2, 0, /* 0x8c */
1055 {1, 1, 3, 0, /* 0x8d */
1062 {0, 1, 2, 0, /* 0x8e */
1069 {1, 1, 2, 0, /* 0x8f */
1076 {0, 1, 2, 0, /* 0x90 */
1083 {1, 1, 3, 0, /* 0x91 */
1090 {0, 1, 3, 0, /* 0x92 */
1097 {1, 1, 3, 0, /* 0x93 */
1104 {0, 1, 3, 0, /* 0x94 */
1111 {1, 1, 4, 0, /* 0x95 */
1118 {0, 1, 3, 0, /* 0x96 */
1125 {1, 1, 3, 0, /* 0x97 */
1132 {0, 1, 2, 0, /* 0x98 */
1139 {1, 1, 3, 0, /* 0x99 */
1146 {0, 1, 3, 0, /* 0x9a */
1153 {1, 1, 3, 0, /* 0x9b */
1160 {0, 1, 2, 0, /* 0x9c */
1167 {1, 1, 3, 0, /* 0x9d */
1174 {0, 1, 2, 0, /* 0x9e */
1181 {1, 1, 2, 0, /* 0x9f */
1188 {0, 1, 2, 0, /* 0xa0 */
1195 {1, 1, 3, 0, /* 0xa1 */
1202 {0, 1, 3, 0, /* 0xa2 */
1209 {1, 1, 3, 0, /* 0xa3 */
1216 {0, 1, 3, 0, /* 0xa4 */
1223 {1, 1, 4, 0, /* 0xa5 */
1230 {0, 1, 3, 0, /* 0xa6 */
1237 {1, 1, 3, 0, /* 0xa7 */
1244 {0, 1, 3, 0, /* 0xa8 */
1251 {1, 1, 4, 0, /* 0xa9 */
1258 {0, 1, 4, 0, /* 0xaa */
1265 {1, 1, 4, 0, /* 0xab */
1272 {0, 1, 3, 0, /* 0xac */
1279 {1, 1, 4, 0, /* 0xad */
1286 {0, 1, 3, 0, /* 0xae */
1293 {1, 1, 3, 0, /* 0xaf */
1300 {0, 1, 2, 0, /* 0xb0 */
1307 {1, 1, 3, 0, /* 0xb1 */
1314 {0, 1, 3, 0, /* 0xb2 */
1321 {1, 1, 3, 0, /* 0xb3 */
1328 {0, 1, 3, 0, /* 0xb4 */
1335 {1, 1, 4, 0, /* 0xb5 */
1342 {0, 1, 3, 0, /* 0xb6 */
1349 {1, 1, 3, 0, /* 0xb7 */
1356 {0, 1, 2, 0, /* 0xb8 */
1363 {1, 1, 3, 0, /* 0xb9 */
1370 {0, 1, 3, 0, /* 0xba */
1377 {1, 1, 3, 0, /* 0xbb */
1384 {0, 1, 2, 0, /* 0xbc */
1391 {1, 1, 3, 0, /* 0xbd */
1398 {0, 1, 2, 0, /* 0xbe */
1405 {1, 1, 2, 0, /* 0xbf */
1412 {0, 1, 1, 0, /* 0xc0 */
1419 {1, 1, 2, 0, /* 0xc1 */
1426 {0, 1, 2, 0, /* 0xc2 */
1433 {1, 1, 2, 0, /* 0xc3 */
1440 {0, 1, 2, 0, /* 0xc4 */
1447 {1, 1, 3, 0, /* 0xc5 */
1454 {0, 1, 2, 0, /* 0xc6 */
1461 {1, 1, 2, 0, /* 0xc7 */
1468 {0, 1, 2, 0, /* 0xc8 */
1475 {1, 1, 3, 0, /* 0xc9 */
1482 {0, 1, 3, 0, /* 0xca */
1489 {1, 1, 3, 0, /* 0xcb */
1496 {0, 1, 2, 0, /* 0xcc */
1503 {1, 1, 3, 0, /* 0xcd */
1510 {0, 1, 2, 0, /* 0xce */
1517 {1, 1, 2, 0, /* 0xcf */
1524 {0, 1, 2, 0, /* 0xd0 */
1531 {1, 1, 3, 0, /* 0xd1 */
1538 {0, 1, 3, 0, /* 0xd2 */
1545 {1, 1, 3, 0, /* 0xd3 */
1552 {0, 1, 3, 0, /* 0xd4 */
1559 {1, 1, 4, 0, /* 0xd5 */
1566 {0, 1, 3, 0, /* 0xd6 */
1573 {1, 1, 3, 0, /* 0xd7 */
1580 {0, 1, 2, 0, /* 0xd8 */
1587 {1, 1, 3, 0, /* 0xd9 */
1594 {0, 1, 3, 0, /* 0xda */
1601 {1, 1, 3, 0, /* 0xdb */
1608 {0, 1, 2, 0, /* 0xdc */
1615 {1, 1, 3, 0, /* 0xdd */
1622 {0, 1, 2, 0, /* 0xde */
1629 {1, 1, 2, 0, /* 0xdf */
1636 {0, 1, 1, 0, /* 0xe0 */
1643 {1, 1, 2, 0, /* 0xe1 */
1650 {0, 1, 2, 0, /* 0xe2 */
1657 {1, 1, 2, 0, /* 0xe3 */
1664 {0, 1, 2, 0, /* 0xe4 */
1671 {1, 1, 3, 0, /* 0xe5 */
1678 {0, 1, 2, 0, /* 0xe6 */
1685 {1, 1, 2, 0, /* 0xe7 */
1692 {0, 1, 2, 0, /* 0xe8 */
1699 {1, 1, 3, 0, /* 0xe9 */
1706 {0, 1, 3, 0, /* 0xea */
1713 {1, 1, 3, 0, /* 0xeb */
1720 {0, 1, 2, 0, /* 0xec */
1727 {1, 1, 3, 0, /* 0xed */
1734 {0, 1, 2, 0, /* 0xee */
1741 {1, 1, 2, 0, /* 0xef */
1748 {0, 1, 1, 0, /* 0xf0 */
1755 {1, 1, 2, 0, /* 0xf1 */
1762 {0, 1, 2, 0, /* 0xf2 */
1769 {1, 1, 2, 0, /* 0xf3 */
1776 {0, 1, 2, 0, /* 0xf4 */
1783 {1, 1, 3, 0, /* 0xf5 */
1790 {0, 1, 2, 0, /* 0xf6 */
1797 {1, 1, 2, 0, /* 0xf7 */
1804 {0, 1, 1, 0, /* 0xf8 */
1811 {1, 1, 2, 0, /* 0xf9 */
1818 {0, 1, 2, 0, /* 0xfa */
1825 {1, 1, 2, 0, /* 0xfb */
1832 {0, 1, 1, 0, /* 0xfc */
1839 {1, 1, 2, 0, /* 0xfd */
1846 {0, 1, 1, 0, /* 0xfe */
1853 {1, 1, 1, 0, /* 0xff */
1864 sctp_is_address_in_scope(struct sctp_ifa *ifa,
1865 int ipv4_addr_legal,
1866 int ipv6_addr_legal,
1868 int ipv4_local_scope,
1873 if ((loopback_scope == 0) &&
1874 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1876 * skip loopback if not in scope *
1880 switch (ifa->address.sa.sa_family) {
1882 if (ipv4_addr_legal) {
1883 struct sockaddr_in *sin;
1885 sin = (struct sockaddr_in *)&ifa->address.sin;
1886 if (sin->sin_addr.s_addr == 0) {
1887 /* not in scope , unspecified */
1890 if ((ipv4_local_scope == 0) &&
1891 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1892 /* private address not in scope */
1901 if (ipv6_addr_legal) {
1902 struct sockaddr_in6 *sin6;
1905 * Must update the flags, bummer, which means any
1906 * IFA locks must now be applied HERE <->
1909 sctp_gather_internal_ifa_flags(ifa);
1911 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1914 /* ok to use deprecated addresses? */
1915 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1916 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1917 /* skip unspecifed addresses */
1920 if ( /* (local_scope == 0) && */
1921 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1924 if ((site_scope == 0) &&
1925 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1939 static struct mbuf *
1940 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1942 struct sctp_paramhdr *parmh;
1946 if (ifa->address.sa.sa_family == AF_INET) {
1947 len = sizeof(struct sctp_ipv4addr_param);
1948 } else if (ifa->address.sa.sa_family == AF_INET6) {
1949 len = sizeof(struct sctp_ipv6addr_param);
1954 if (M_TRAILINGSPACE(m) >= len) {
1955 /* easy side we just drop it on the end */
1956 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1959 /* Need more space */
1961 while (SCTP_BUF_NEXT(mret) != NULL) {
1962 mret = SCTP_BUF_NEXT(mret);
1964 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1965 if (SCTP_BUF_NEXT(mret) == NULL) {
1966 /* We are hosed, can't add more addresses */
1969 mret = SCTP_BUF_NEXT(mret);
1970 parmh = mtod(mret, struct sctp_paramhdr *);
1972 /* now add the parameter */
1973 switch (ifa->address.sa.sa_family) {
1976 struct sctp_ipv4addr_param *ipv4p;
1977 struct sockaddr_in *sin;
1979 sin = (struct sockaddr_in *)&ifa->address.sin;
1980 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1981 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1982 parmh->param_length = htons(len);
1983 ipv4p->addr = sin->sin_addr.s_addr;
1984 SCTP_BUF_LEN(mret) += len;
1990 struct sctp_ipv6addr_param *ipv6p;
1991 struct sockaddr_in6 *sin6;
1993 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1994 ipv6p = (struct sctp_ipv6addr_param *)parmh;
1995 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
1996 parmh->param_length = htons(len);
1997 memcpy(ipv6p->addr, &sin6->sin6_addr,
1998 sizeof(ipv6p->addr));
1999 /* clear embedded scope in the address */
2000 in6_clearscope((struct in6_addr *)ipv6p->addr);
2001 SCTP_BUF_LEN(mret) += len;
2013 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
2014 struct mbuf *m_at, int cnt_inits_to)
2016 struct sctp_vrf *vrf = NULL;
2017 int cnt, limit_out = 0, total_count;
2020 vrf_id = inp->def_vrf_id;
2021 SCTP_IPI_ADDR_RLOCK();
2022 vrf = sctp_find_vrf(vrf_id);
2024 SCTP_IPI_ADDR_RUNLOCK();
2027 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2028 struct sctp_ifa *sctp_ifap;
2029 struct sctp_ifn *sctp_ifnp;
2032 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2034 cnt = SCTP_ADDRESS_LIMIT;
2037 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2038 if ((scope->loopback_scope == 0) &&
2039 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2041 * Skip loopback devices if loopback_scope
2046 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2047 if (sctp_is_address_in_scope(sctp_ifap,
2048 scope->ipv4_addr_legal,
2049 scope->ipv6_addr_legal,
2050 scope->loopback_scope,
2051 scope->ipv4_local_scope,
2053 scope->site_scope, 1) == 0) {
2057 if (cnt > SCTP_ADDRESS_LIMIT) {
2061 if (cnt > SCTP_ADDRESS_LIMIT) {
2068 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2070 if ((scope->loopback_scope == 0) &&
2071 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2073 * Skip loopback devices if
2074 * loopback_scope not set
2078 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2079 if (sctp_is_address_in_scope(sctp_ifap,
2080 scope->ipv4_addr_legal,
2081 scope->ipv6_addr_legal,
2082 scope->loopback_scope,
2083 scope->ipv4_local_scope,
2085 scope->site_scope, 0) == 0) {
2088 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2099 if (total_count > SCTP_ADDRESS_LIMIT) {
2100 /* No more addresses */
2108 struct sctp_laddr *laddr;
2111 /* First, how many ? */
2112 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2113 if (laddr->ifa == NULL) {
2116 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2118 * Address being deleted by the system, dont
2122 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2124 * Address being deleted on this ep don't
2129 if (sctp_is_address_in_scope(laddr->ifa,
2130 scope->ipv4_addr_legal,
2131 scope->ipv6_addr_legal,
2132 scope->loopback_scope,
2133 scope->ipv4_local_scope,
2135 scope->site_scope, 1) == 0) {
2140 if (cnt > SCTP_ADDRESS_LIMIT) {
2144 * To get through a NAT we only list addresses if we have
2145 * more than one. That way if you just bind a single address
2146 * we let the source of the init dictate our address.
2149 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2151 if (laddr->ifa == NULL) {
2154 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2157 if (sctp_is_address_in_scope(laddr->ifa,
2158 scope->ipv4_addr_legal,
2159 scope->ipv6_addr_legal,
2160 scope->loopback_scope,
2161 scope->ipv4_local_scope,
2163 scope->site_scope, 0) == 0) {
2166 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2168 if (cnt >= SCTP_ADDRESS_LIMIT) {
2174 SCTP_IPI_ADDR_RUNLOCK();
2178 static struct sctp_ifa *
2179 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
2180 uint8_t dest_is_loop,
2181 uint8_t dest_is_priv,
2184 uint8_t dest_is_global = 0;
2186 /* dest_is_priv is true if destination is a private address */
2187 /* dest_is_loop is true if destination is a loopback addresses */
2190 * Here we determine if its a preferred address. A preferred address
2191 * means it is the same scope or higher scope then the destination.
2192 * L = loopback, P = private, G = global
2193 * -----------------------------------------
2194 * src | dest | result
2195 * ----------------------------------------
2197 * -----------------------------------------
2198 * P | L | yes-v4 no-v6
2199 * -----------------------------------------
2200 * G | L | yes-v4 no-v6
2201 * -----------------------------------------
2203 * -----------------------------------------
2205 * -----------------------------------------
2207 * -----------------------------------------
2209 * -----------------------------------------
2211 * -----------------------------------------
2213 * -----------------------------------------
2216 if (ifa->address.sa.sa_family != fam) {
2217 /* forget mis-matched family */
2220 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2223 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
2224 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
2225 /* Ok the address may be ok */
2226 if (fam == AF_INET6) {
2227 /* ok to use deprecated addresses? no lets not! */
2228 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2229 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
2232 if (ifa->src_is_priv && !ifa->src_is_loop) {
2234 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
2238 if (ifa->src_is_glob) {
2240 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
2246 * Now that we know what is what, implement or table this could in
2247 * theory be done slicker (it used to be), but this is
2248 * straightforward and easier to validate :-)
2250 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
2251 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
2252 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
2253 dest_is_loop, dest_is_priv, dest_is_global);
2255 if ((ifa->src_is_loop) && (dest_is_priv)) {
2256 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
2259 if ((ifa->src_is_glob) && (dest_is_priv)) {
2260 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
2263 if ((ifa->src_is_loop) && (dest_is_global)) {
2264 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
2267 if ((ifa->src_is_priv) && (dest_is_global)) {
2268 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
2271 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
2272 /* its a preferred address */
2276 static struct sctp_ifa *
2277 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2278 uint8_t dest_is_loop,
2279 uint8_t dest_is_priv,
2282 uint8_t dest_is_global = 0;
2285 * Here we determine if its a acceptable address. A acceptable
2286 * address means it is the same scope or higher scope but we can
2287 * allow for NAT which means its ok to have a global dest and a
2290 * L = loopback, P = private, G = global
2291 * ----------------------------------------- src | dest | result
2292 * ----------------------------------------- L | L | yes
2293 * ----------------------------------------- P | L |
2294 * yes-v4 no-v6 ----------------------------------------- G |
2295 * L | yes ----------------------------------------- L |
2296 * P | no ----------------------------------------- P | P
2297 * | yes ----------------------------------------- G | P
2298 * | yes - May not work -----------------------------------------
2299 * L | G | no ----------------------------------------- P
2300 * | G | yes - May not work
2301 * ----------------------------------------- G | G | yes
2302 * -----------------------------------------
2305 if (ifa->address.sa.sa_family != fam) {
2306 /* forget non matching family */
2309 /* Ok the address may be ok */
2310 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2313 if (fam == AF_INET6) {
2314 /* ok to use deprecated addresses? */
2315 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2318 if (ifa->src_is_priv) {
2319 /* Special case, linklocal to loop */
2325 * Now that we know what is what, implement our table. This could in
2326 * theory be done slicker (it used to be), but this is
2327 * straightforward and easier to validate :-)
2329 if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
2332 if ((ifa->src_is_loop == 1) && (dest_is_global)) {
2335 /* its an acceptable address */
2340 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2342 struct sctp_laddr *laddr;
2345 /* There are no restrictions, no TCB :-) */
2348 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2349 if (laddr->ifa == NULL) {
2350 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2354 if (laddr->ifa == ifa) {
2355 /* Yes it is on the list */
2364 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2366 struct sctp_laddr *laddr;
2370 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2371 if (laddr->ifa == NULL) {
2372 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
2376 if ((laddr->ifa == ifa) && laddr->action == 0)
2385 static struct sctp_ifa *
2386 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2389 int non_asoc_addr_ok,
2390 uint8_t dest_is_priv,
2391 uint8_t dest_is_loop,
2394 struct sctp_laddr *laddr, *starting_point;
2397 struct sctp_ifn *sctp_ifn;
2398 struct sctp_ifa *sctp_ifa, *sifa;
2399 struct sctp_vrf *vrf;
2402 vrf = sctp_find_vrf(vrf_id);
2406 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2407 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2408 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2410 * first question, is the ifn we will emit on in our list, if so, we
2411 * want such an address. Note that we first looked for a preferred
2415 /* is a preferred one on the interface we route out? */
2416 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2417 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2418 (non_asoc_addr_ok == 0))
2420 sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
2425 if (sctp_is_addr_in_ep(inp, sifa)) {
2426 atomic_add_int(&sifa->refcount, 1);
2432 * ok, now we now need to find one on the list of the addresses. We
2433 * can't get one on the emitting interface so let's find first a
2434 * preferred one. If not that an acceptable one otherwise... we
2437 starting_point = inp->next_addr_touse;
2439 if (inp->next_addr_touse == NULL) {
2440 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2443 for (laddr = inp->next_addr_touse; laddr;
2444 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2445 if (laddr->ifa == NULL) {
2446 /* address has been removed */
2449 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2450 /* address is being deleted */
2453 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
2457 atomic_add_int(&sifa->refcount, 1);
2460 if (resettotop == 0) {
2461 inp->next_addr_touse = NULL;
2464 inp->next_addr_touse = starting_point;
2467 if (inp->next_addr_touse == NULL) {
2468 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2471 /* ok, what about an acceptable address in the inp */
2472 for (laddr = inp->next_addr_touse; laddr;
2473 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2474 if (laddr->ifa == NULL) {
2475 /* address has been removed */
2478 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2479 /* address is being deleted */
2482 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2486 atomic_add_int(&sifa->refcount, 1);
2489 if (resettotop == 0) {
2490 inp->next_addr_touse = NULL;
2491 goto once_again_too;
2494 * no address bound can be a source for the destination we are in
2502 static struct sctp_ifa *
2503 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2504 struct sctp_tcb *stcb,
2505 struct sctp_nets *net,
2508 uint8_t dest_is_priv,
2509 uint8_t dest_is_loop,
2510 int non_asoc_addr_ok,
2513 struct sctp_laddr *laddr, *starting_point;
2515 struct sctp_ifn *sctp_ifn;
2516 struct sctp_ifa *sctp_ifa, *sifa;
2517 uint8_t start_at_beginning = 0;
2518 struct sctp_vrf *vrf;
2522 * first question, is the ifn we will emit on in our list, if so, we
2525 vrf = sctp_find_vrf(vrf_id);
2529 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2530 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2531 sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2534 * first question, is the ifn we will emit on in our list? If so,
2535 * we want that one. First we look for a preferred. Second, we go
2536 * for an acceptable.
2539 /* first try for a preferred address on the ep */
2540 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2541 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2543 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2544 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2547 if (((non_asoc_addr_ok == 0) &&
2548 (sctp_is_addr_restricted(stcb, sifa))) ||
2549 (non_asoc_addr_ok &&
2550 (sctp_is_addr_restricted(stcb, sifa)) &&
2551 (!sctp_is_addr_pending(stcb, sifa)))) {
2552 /* on the no-no list */
2555 atomic_add_int(&sifa->refcount, 1);
2559 /* next try for an acceptable address on the ep */
2560 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2561 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2563 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2564 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2567 if (((non_asoc_addr_ok == 0) &&
2568 (sctp_is_addr_restricted(stcb, sifa))) ||
2569 (non_asoc_addr_ok &&
2570 (sctp_is_addr_restricted(stcb, sifa)) &&
2571 (!sctp_is_addr_pending(stcb, sifa)))) {
2572 /* on the no-no list */
2575 atomic_add_int(&sifa->refcount, 1);
2582 * if we can't find one like that then we must look at all addresses
2583 * bound to pick one at first preferable then secondly acceptable.
2585 starting_point = stcb->asoc.last_used_address;
2587 if (stcb->asoc.last_used_address == NULL) {
2588 start_at_beginning = 1;
2589 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2591 /* search beginning with the last used address */
2592 for (laddr = stcb->asoc.last_used_address; laddr;
2593 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2594 if (laddr->ifa == NULL) {
2595 /* address has been removed */
2598 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2599 /* address is being deleted */
2602 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2605 if (((non_asoc_addr_ok == 0) &&
2606 (sctp_is_addr_restricted(stcb, sifa))) ||
2607 (non_asoc_addr_ok &&
2608 (sctp_is_addr_restricted(stcb, sifa)) &&
2609 (!sctp_is_addr_pending(stcb, sifa)))) {
2610 /* on the no-no list */
2613 stcb->asoc.last_used_address = laddr;
2614 atomic_add_int(&sifa->refcount, 1);
2617 if (start_at_beginning == 0) {
2618 stcb->asoc.last_used_address = NULL;
2619 goto sctp_from_the_top;
2621 /* now try for any higher scope than the destination */
2622 stcb->asoc.last_used_address = starting_point;
2623 start_at_beginning = 0;
2625 if (stcb->asoc.last_used_address == NULL) {
2626 start_at_beginning = 1;
2627 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2629 /* search beginning with the last used address */
2630 for (laddr = stcb->asoc.last_used_address; laddr;
2631 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2632 if (laddr->ifa == NULL) {
2633 /* address has been removed */
2636 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2637 /* address is being deleted */
2640 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
2644 if (((non_asoc_addr_ok == 0) &&
2645 (sctp_is_addr_restricted(stcb, sifa))) ||
2646 (non_asoc_addr_ok &&
2647 (sctp_is_addr_restricted(stcb, sifa)) &&
2648 (!sctp_is_addr_pending(stcb, sifa)))) {
2649 /* on the no-no list */
2652 stcb->asoc.last_used_address = laddr;
2653 atomic_add_int(&sifa->refcount, 1);
2656 if (start_at_beginning == 0) {
2657 stcb->asoc.last_used_address = NULL;
2658 goto sctp_from_the_top2;
2663 static struct sctp_ifa *
2664 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2665 struct sctp_tcb *stcb,
2666 int non_asoc_addr_ok,
2667 uint8_t dest_is_loop,
2668 uint8_t dest_is_priv,
2674 struct sctp_ifa *ifa, *sifa;
2675 int num_eligible_addr = 0;
2678 struct sockaddr_in6 sin6, lsa6;
2680 if (fam == AF_INET6) {
2681 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
2682 (void)sa6_recoverscope(&sin6);
2685 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2686 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2687 (non_asoc_addr_ok == 0))
2689 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2694 if (fam == AF_INET6 &&
2696 sifa->src_is_loop && sifa->src_is_priv) {
2698 * don't allow fe80::1 to be a src on loop ::1, we
2699 * don't list it to the peer so we will get an
2704 if (fam == AF_INET6 &&
2705 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
2706 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
2708 * link-local <-> link-local must belong to the same
2711 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
2712 (void)sa6_recoverscope(&lsa6);
2713 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
2720 * Check if the IPv6 address matches to next-hop. In the
2721 * mobile case, old IPv6 address may be not deleted from the
2722 * interface. Then, the interface has previous and new
2723 * addresses. We should use one corresponding to the
2724 * next-hop. (by micchie)
2727 if (stcb && fam == AF_INET6 &&
2728 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2729 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
2735 /* Avoid topologically incorrect IPv4 address */
2736 if (stcb && fam == AF_INET &&
2737 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
2738 if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
2743 if (sctp_is_address_in_scope(ifa,
2744 stcb->asoc.ipv4_addr_legal,
2745 stcb->asoc.ipv6_addr_legal,
2746 stcb->asoc.loopback_scope,
2747 stcb->asoc.ipv4_local_scope,
2748 stcb->asoc.local_scope,
2749 stcb->asoc.site_scope, 0) == 0) {
2752 if (((non_asoc_addr_ok == 0) &&
2753 (sctp_is_addr_restricted(stcb, sifa))) ||
2754 (non_asoc_addr_ok &&
2755 (sctp_is_addr_restricted(stcb, sifa)) &&
2756 (!sctp_is_addr_pending(stcb, sifa)))) {
2758 * It is restricted for some reason..
2759 * probably not yet added.
2764 if (num_eligible_addr >= addr_wanted) {
2767 num_eligible_addr++;
2774 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
2775 struct sctp_tcb *stcb,
2776 int non_asoc_addr_ok,
2777 uint8_t dest_is_loop,
2778 uint8_t dest_is_priv,
2781 struct sctp_ifa *ifa, *sifa;
2782 int num_eligible_addr = 0;
2784 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2785 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2786 (non_asoc_addr_ok == 0)) {
2789 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
2795 if (sctp_is_address_in_scope(ifa,
2796 stcb->asoc.ipv4_addr_legal,
2797 stcb->asoc.ipv6_addr_legal,
2798 stcb->asoc.loopback_scope,
2799 stcb->asoc.ipv4_local_scope,
2800 stcb->asoc.local_scope,
2801 stcb->asoc.site_scope, 0) == 0) {
2804 if (((non_asoc_addr_ok == 0) &&
2805 (sctp_is_addr_restricted(stcb, sifa))) ||
2806 (non_asoc_addr_ok &&
2807 (sctp_is_addr_restricted(stcb, sifa)) &&
2808 (!sctp_is_addr_pending(stcb, sifa)))) {
2810 * It is restricted for some reason..
2811 * probably not yet added.
2816 num_eligible_addr++;
2818 return (num_eligible_addr);
2821 static struct sctp_ifa *
2822 sctp_choose_boundall(struct sctp_inpcb *inp,
2823 struct sctp_tcb *stcb,
2824 struct sctp_nets *net,
2827 uint8_t dest_is_priv,
2828 uint8_t dest_is_loop,
2829 int non_asoc_addr_ok,
2832 int cur_addr_num = 0, num_preferred = 0;
2834 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2835 struct sctp_ifa *sctp_ifa, *sifa;
2837 struct sctp_vrf *vrf;
2840 * For boundall we can use any address in the association.
2841 * If non_asoc_addr_ok is set we can use any address (at least in
2842 * theory). So we look for preferred addresses first. If we find one,
2843 * we use it. Otherwise we next try to get an address on the
2844 * interface, which we should be able to do (unless non_asoc_addr_ok
2845 * is false and we are routed out that way). In these cases where we
2846 * can't use the address of the interface we go through all the
2847 * ifn's looking for an address we can use and fill that in. Punting
2848 * means we send back address 0, which will probably cause problems
2849 * actually since then IP will fill in the address of the route ifn,
2850 * which means we probably already rejected it.. i.e. here comes an
2853 vrf = sctp_find_vrf(vrf_id);
2857 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2858 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2859 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
2860 if (sctp_ifn == NULL) {
2861 /* ?? We don't have this guy ?? */
2862 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No ifn emit interface?\n");
2863 goto bound_all_plan_b;
2865 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifn_index:%d name:%s is emit interface\n",
2866 ifn_index, sctp_ifn->ifn_name);
2869 cur_addr_num = net->indx_of_eligible_next_to_use;
2871 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
2876 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
2877 num_preferred, sctp_ifn->ifn_name);
2878 if (num_preferred == 0) {
2880 * no eligible addresses, we must use some other interface
2881 * address if we can find one.
2883 goto bound_all_plan_b;
2886 * Ok we have num_eligible_addr set with how many we can use, this
2887 * may vary from call to call due to addresses being deprecated
2890 if (cur_addr_num >= num_preferred) {
2894 * select the nth address from the list (where cur_addr_num is the
2895 * nth) and 0 is the first one, 1 is the second one etc...
2897 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
2899 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2900 dest_is_priv, cur_addr_num, fam, ro);
2902 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2904 atomic_add_int(&sctp_ifa->refcount, 1);
2906 /* save off where the next one we will want */
2907 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2912 * plan_b: Look at all interfaces and find a preferred address. If
2913 * no preferred fall through to plan_c.
2916 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
2917 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2918 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
2919 sctp_ifn->ifn_name);
2920 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2921 /* wrong base scope */
2922 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
2925 if ((sctp_ifn == looked_at) && looked_at) {
2926 /* already looked at this guy */
2927 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
2930 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2931 dest_is_loop, dest_is_priv, fam);
2932 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2933 "Found ifn:%p %d preferred source addresses\n",
2934 ifn, num_preferred);
2935 if (num_preferred == 0) {
2936 /* None on this interface. */
2937 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n");
2940 SCTPDBG(SCTP_DEBUG_OUTPUT2,
2941 "num preferred:%d on interface:%p cur_addr_num:%d\n",
2942 num_preferred, sctp_ifn, cur_addr_num);
2945 * Ok we have num_eligible_addr set with how many we can
2946 * use, this may vary from call to call due to addresses
2947 * being deprecated etc..
2949 if (cur_addr_num >= num_preferred) {
2952 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2953 dest_is_priv, cur_addr_num, fam, ro);
2957 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2958 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
2960 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
2961 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
2962 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
2963 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
2965 atomic_add_int(&sifa->refcount, 1);
2970 /* plan_c: do we have an acceptable address on the emit interface */
2971 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan C: find acceptable on interface\n");
2972 if (emit_ifn == NULL) {
2975 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
2976 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
2977 (non_asoc_addr_ok == 0))
2979 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
2984 if (sctp_is_address_in_scope(sifa,
2985 stcb->asoc.ipv4_addr_legal,
2986 stcb->asoc.ipv6_addr_legal,
2987 stcb->asoc.loopback_scope,
2988 stcb->asoc.ipv4_local_scope,
2989 stcb->asoc.local_scope,
2990 stcb->asoc.site_scope, 0) == 0) {
2993 if (((non_asoc_addr_ok == 0) &&
2994 (sctp_is_addr_restricted(stcb, sifa))) ||
2995 (non_asoc_addr_ok &&
2996 (sctp_is_addr_restricted(stcb, sifa)) &&
2997 (!sctp_is_addr_pending(stcb, sifa)))) {
2999 * It is restricted for some reason..
3000 * probably not yet added.
3005 atomic_add_int(&sifa->refcount, 1);
3010 * plan_d: We are in trouble. No preferred address on the emit
3011 * interface. And not even a preferred address on all interfaces. Go
3012 * out and see if we can find an acceptable address somewhere
3013 * amongst all interfaces.
3015 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D\n");
3016 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
3017 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
3018 /* wrong base scope */
3021 if ((sctp_ifn == looked_at) && looked_at)
3022 /* already looked at this guy */
3025 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
3026 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
3027 (non_asoc_addr_ok == 0))
3029 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
3035 if (sctp_is_address_in_scope(sifa,
3036 stcb->asoc.ipv4_addr_legal,
3037 stcb->asoc.ipv6_addr_legal,
3038 stcb->asoc.loopback_scope,
3039 stcb->asoc.ipv4_local_scope,
3040 stcb->asoc.local_scope,
3041 stcb->asoc.site_scope, 0) == 0) {
3044 if (((non_asoc_addr_ok == 0) &&
3045 (sctp_is_addr_restricted(stcb, sifa))) ||
3046 (non_asoc_addr_ok &&
3047 (sctp_is_addr_restricted(stcb, sifa)) &&
3048 (!sctp_is_addr_pending(stcb, sifa)))) {
3050 * It is restricted for some
3051 * reason.. probably not yet added.
3056 atomic_add_int(&sifa->refcount, 1);
3061 * Ok we can find NO address to source from that is not on our
3062 * restricted list and non_asoc_address is NOT ok, or it is on our
3063 * restricted list. We can't source to it :-(
3070 /* tcb may be NULL */
3072 sctp_source_address_selection(struct sctp_inpcb *inp,
3073 struct sctp_tcb *stcb,
3075 struct sctp_nets *net,
3076 int non_asoc_addr_ok, uint32_t vrf_id)
3078 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
3081 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
3084 struct sctp_ifa *answer;
3085 uint8_t dest_is_priv, dest_is_loop;
3089 * Rules: - Find the route if needed, cache if I can. - Look at
3090 * interface address in route, Is it in the bound list. If so we
3091 * have the best source. - If not we must rotate amongst the
3096 * Do we need to pay attention to scope. We can have a private address
3097 * or a global address we are sourcing or sending to. So if we draw
3099 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3101 * ------------------------------------------
3102 * source * dest * result
3103 * -----------------------------------------
3104 * <a> Private * Global * NAT
3105 * -----------------------------------------
3106 * <b> Private * Private * No problem
3107 * -----------------------------------------
3108 * <c> Global * Private * Huh, How will this work?
3109 * -----------------------------------------
3110 * <d> Global * Global * No Problem
3111 *------------------------------------------
3112 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3114 *------------------------------------------
3115 * source * dest * result
3116 * -----------------------------------------
3117 * <a> Linklocal * Global *
3118 * -----------------------------------------
3119 * <b> Linklocal * Linklocal * No problem
3120 * -----------------------------------------
3121 * <c> Global * Linklocal * Huh, How will this work?
3122 * -----------------------------------------
3123 * <d> Global * Global * No Problem
3124 *------------------------------------------
3125 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
3127 * And then we add to that what happens if there are multiple addresses
3128 * assigned to an interface. Remember the ifa on a ifn is a linked
3129 * list of addresses. So one interface can have more than one IP
3130 * address. What happens if we have both a private and a global
3131 * address? Do we then use context of destination to sort out which
3132 * one is best? And what about NAT's sending P->G may get you a NAT
3133 * translation, or should you select the G thats on the interface in
3138 * - count the number of addresses on the interface.
3139 * - if it is one, no problem except case <c>.
3140 * For <a> we will assume a NAT out there.
3141 * - if there are more than one, then we need to worry about scope P
3142 * or G. We should prefer G -> G and P -> P if possible.
3143 * Then as a secondary fall back to mixed types G->P being a last
3145 * - The above all works for bound all, but bound specific we need to
3146 * use the same concept but instead only consider the bound
3147 * addresses. If the bound set is NOT assigned to the interface then
3148 * we must use rotation amongst the bound addresses..
3150 if (ro->ro_rt == NULL) {
3152 * Need a route to cache.
3154 SCTP_RTALLOC(ro, vrf_id);
3156 if (ro->ro_rt == NULL) {
3159 fam = to->sin_family;
3160 dest_is_priv = dest_is_loop = 0;
3161 /* Setup our scopes for the destination */
3164 /* Scope based on outbound address */
3165 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
3168 /* mark it as local */
3169 net->addr_is_local = 1;
3171 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
3177 /* Scope based on outbound address */
3178 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
3179 SCTP_ROUTE_IS_REAL_LOOP(ro)) {
3181 * If the address is a loopback address, which
3182 * consists of "::1" OR "fe80::1%lo0", we are
3183 * loopback scope. But we don't use dest_is_priv
3184 * (link local addresses).
3188 /* mark it as local */
3189 net->addr_is_local = 1;
3191 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
3197 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
3198 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)to);
3199 SCTP_IPI_ADDR_RLOCK();
3200 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3204 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3205 dest_is_priv, dest_is_loop,
3206 non_asoc_addr_ok, fam);
3207 SCTP_IPI_ADDR_RUNLOCK();
3214 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro,
3215 vrf_id, dest_is_priv,
3217 non_asoc_addr_ok, fam);
3219 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
3224 SCTP_IPI_ADDR_RUNLOCK();
3229 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
3234 tlen = SCTP_BUF_LEN(control);
3237 * Independent of how many mbufs, find the c_type inside the control
3238 * structure and copy out the data.
3241 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3242 /* not enough room for one more we are done. */
3245 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3246 if (((int)cmh.cmsg_len + at) > tlen) {
3248 * this is real messed up since there is not enough
3249 * data here to cover the cmsg header. We are done.
3253 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3254 (c_type == cmh.cmsg_type)) {
3255 /* found the one we want, copy it out */
3256 at += CMSG_ALIGN(sizeof(struct cmsghdr));
3257 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3259 * space of cmsg_len after header not big
3264 m_copydata(control, at, cpsize, data);
3267 at += CMSG_ALIGN(cmh.cmsg_len);
3268 if (cmh.cmsg_len == 0) {
3277 static struct mbuf *
3278 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
3279 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t ** signature)
3281 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3282 struct sctp_state_cookie *stc;
3283 struct sctp_paramhdr *ph;
3289 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3290 sizeof(struct sctp_paramhdr)), 0,
3291 M_DONTWAIT, 1, MT_DATA);
3295 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3296 if (copy_init == NULL) {
3300 #ifdef SCTP_MBUF_LOGGING
3301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3306 if (SCTP_BUF_IS_EXTENDED(mat)) {
3307 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3309 mat = SCTP_BUF_NEXT(mat);
3313 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3315 if (copy_initack == NULL) {
3317 sctp_m_freem(copy_init);
3320 #ifdef SCTP_MBUF_LOGGING
3321 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
3326 if (SCTP_BUF_IS_EXTENDED(mat)) {
3327 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
3329 mat = SCTP_BUF_NEXT(mat);
3333 /* easy side we just drop it on the end */
3334 ph = mtod(mret, struct sctp_paramhdr *);
3335 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3336 sizeof(struct sctp_paramhdr);
3337 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3338 sizeof(struct sctp_paramhdr));
3339 ph->param_type = htons(SCTP_STATE_COOKIE);
3340 ph->param_length = 0; /* fill in at the end */
3341 /* Fill in the stc cookie data */
3342 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
3344 /* tack the INIT and then the INIT-ACK onto the chain */
3347 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3348 cookie_sz += SCTP_BUF_LEN(m_at);
3349 if (SCTP_BUF_NEXT(m_at) == NULL) {
3350 SCTP_BUF_NEXT(m_at) = copy_init;
3355 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3356 cookie_sz += SCTP_BUF_LEN(m_at);
3357 if (SCTP_BUF_NEXT(m_at) == NULL) {
3358 SCTP_BUF_NEXT(m_at) = copy_initack;
3363 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3364 cookie_sz += SCTP_BUF_LEN(m_at);
3365 if (SCTP_BUF_NEXT(m_at) == NULL) {
3369 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3371 /* no space, so free the entire chain */
3375 SCTP_BUF_LEN(sig) = 0;
3376 SCTP_BUF_NEXT(m_at) = sig;
3378 foo = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3379 memset(foo, 0, SCTP_SIGNATURE_SIZE);
3381 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3382 cookie_sz += SCTP_SIGNATURE_SIZE;
3383 ph->param_length = htons(cookie_sz);
3389 sctp_get_ect(struct sctp_tcb *stcb,
3390 struct sctp_tmit_chunk *chk)
3392 uint8_t this_random;
3395 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 0)
3398 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce) == 0)
3399 /* no nonce, always return ECT0 */
3400 return (SCTP_ECT0_BIT);
3402 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
3403 /* Peer does NOT support it, so we send a ECT0 only */
3404 return (SCTP_ECT0_BIT);
3407 return (SCTP_ECT0_BIT);
3409 if ((stcb->asoc.hb_random_idx > 3) ||
3410 ((stcb->asoc.hb_random_idx == 3) &&
3411 (stcb->asoc.hb_ect_randombit > 7))) {
3415 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
3416 memcpy(stcb->asoc.hb_random_values, &rndval,
3417 sizeof(stcb->asoc.hb_random_values));
3418 this_random = stcb->asoc.hb_random_values[0];
3419 stcb->asoc.hb_random_idx = 0;
3420 stcb->asoc.hb_ect_randombit = 0;
3422 if (stcb->asoc.hb_ect_randombit > 7) {
3423 stcb->asoc.hb_ect_randombit = 0;
3424 stcb->asoc.hb_random_idx++;
3425 if (stcb->asoc.hb_random_idx > 3) {
3429 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
3431 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
3433 /* ECN Nonce stuff */
3434 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
3435 stcb->asoc.hb_ect_randombit++;
3436 return (SCTP_ECT1_BIT);
3438 stcb->asoc.hb_ect_randombit++;
3439 return (SCTP_ECT0_BIT);
3444 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3445 struct sctp_tcb *stcb, /* may be NULL */
3446 struct sctp_nets *net,
3447 struct sockaddr *to,
3449 uint32_t auth_offset,
3450 struct sctp_auth_chunk *auth,
3451 uint16_t auth_keyid,
3452 int nofragment_flag,
3454 struct sctp_tmit_chunk *chk,
3461 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3464 union sctp_sockstore *over_addr
3466 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3469 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
3470 * header WITH an SCTPHDR but no IP header, endpoint inp and sa
3471 * structure: - fill in the HMAC digest of any AUTH chunk in the
3472 * packet. - calculate and fill in the SCTP checksum. - prepend an
3473 * IP address header. - if boundall use INADDR_ANY. - if
3474 * boundspecific do source address selection. - set fragmentation
3475 * option for ipV4. - On return from IP output, check/adjust mtu
3476 * size of output interface and smallest_mtu size as well.
3478 /* Will need ifdefs around this */
3481 struct sctphdr *sctphdr;
3485 sctp_route_t *ro = NULL;
3486 struct udphdr *udp = NULL;
3488 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3489 struct socket *so = NULL;
3493 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3494 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
3499 vrf_id = stcb->asoc.vrf_id;
3501 vrf_id = inp->def_vrf_id;
3504 /* fill in the HMAC digest for any AUTH chunk in the packet */
3505 if ((auth != NULL) && (stcb != NULL)) {
3506 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
3508 if (to->sa_family == AF_INET) {
3509 struct ip *ip = NULL;
3510 sctp_route_t iproute;
3514 len = sizeof(struct ip) + sizeof(struct sctphdr);
3516 len += sizeof(struct udphdr);
3518 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3521 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3524 SCTP_ALIGN_TO_END(newm, len);
3525 SCTP_BUF_LEN(newm) = len;
3526 SCTP_BUF_NEXT(newm) = m;
3528 packet_length = sctp_calculate_len(m);
3529 ip = mtod(m, struct ip *);
3530 ip->ip_v = IPVERSION;
3531 ip->ip_hl = (sizeof(struct ip) >> 2);
3533 tos_value = net->tos_flowlabel & 0x000000ff;
3535 tos_value = inp->ip_inp.inp.inp_ip_tos;
3537 if ((nofragment_flag) && (port == 0)) {
3542 /* FreeBSD has a function for ip_id's */
3543 ip->ip_id = ip_newid();
3545 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3546 ip->ip_len = packet_length;
3548 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3550 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
3553 ip->ip_tos = (u_char)(tos_value & 0xfc);
3556 /* no association at all */
3557 ip->ip_tos = (tos_value & 0xfc);
3560 ip->ip_p = IPPROTO_UDP;
3562 ip->ip_p = IPPROTO_SCTP;
3567 memset(&iproute, 0, sizeof(iproute));
3568 memcpy(&ro->ro_dst, to, to->sa_len);
3570 ro = (sctp_route_t *) & net->ro;
3572 /* Now the address selection part */
3573 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3575 /* call the routine to select the src address */
3576 if (net && out_of_asoc_ok == 0) {
3577 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3578 sctp_free_ifa(net->ro._s_addr);
3579 net->ro._s_addr = NULL;
3580 net->src_addr_selected = 0;
3586 if (net->src_addr_selected == 0) {
3587 /* Cache the source address */
3588 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
3591 net->src_addr_selected = 1;
3593 if (net->ro._s_addr == NULL) {
3594 /* No route to host */
3595 net->src_addr_selected = 0;
3598 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
3600 if (over_addr == NULL) {
3601 struct sctp_ifa *_lsrc;
3603 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3607 if (_lsrc == NULL) {
3610 ip->ip_src = _lsrc->address.sin.sin_addr;
3611 sctp_free_ifa(_lsrc);
3613 ip->ip_src = over_addr->sin.sin_addr;
3614 SCTP_RTALLOC(ro, vrf_id);
3618 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
3619 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
3620 udp->uh_dport = port;
3621 udp->uh_ulen = htons(packet_length - sizeof(struct ip));
3622 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
3623 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
3625 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
3628 sctphdr->src_port = src_port;
3629 sctphdr->dest_port = dest_port;
3630 sctphdr->v_tag = v_tag;
3631 sctphdr->checksum = 0;
3634 * If source address selection fails and we find no route
3635 * then the ip_output should fail as well with a
3636 * NO_ROUTE_TO_HOST type error. We probably should catch
3637 * that somewhere and abort the association right away
3638 * (assuming this is an INIT being sent).
3640 if ((ro->ro_rt == NULL)) {
3642 * src addr selection failed to find a route (or
3643 * valid source addr), so we can't get there from
3647 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3648 "%s: dropped packet - no valid source addr\n",
3651 SCTPDBG(SCTP_DEBUG_OUTPUT1,
3652 "Destination was ");
3653 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1,
3654 &net->ro._l_addr.sa);
3655 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3656 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3657 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", net);
3658 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3660 SCTP_FAILED_THRESHOLD,
3663 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3664 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3666 * JRS 5/14/07 - If a
3668 * unreachable, the PF bit
3669 * is turned off. This
3670 * allows an unambiguous use
3672 * destinations that are
3673 * reachable but potentially
3675 * destination is set to the
3676 * unreachable state, also
3677 * set the destination to
3681 * Add debug message here if
3682 * destination is not in PF
3686 * Stop any running T3
3689 if ((stcb->asoc.sctp_cmt_on_off == 1) &&
3690 (stcb->asoc.sctp_cmt_pf > 0)) {
3691 net->dest_state &= ~SCTP_ADDR_PF;
3692 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
3698 if (net == stcb->asoc.primary_destination) {
3699 /* need a new primary */
3700 struct sctp_nets *alt;
3702 alt = sctp_find_alternate_net(stcb, net, 0);
3704 if (sctp_set_primary_addr(stcb,
3705 (struct sockaddr *)NULL,
3707 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3708 if (net->ro._s_addr) {
3709 sctp_free_ifa(net->ro._s_addr);
3710 net->ro._s_addr = NULL;
3712 net->src_addr_selected = 0;
3718 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
3720 return (EHOSTUNREACH);
3722 if (ro != &iproute) {
3723 memcpy(&iproute, ro, sizeof(*ro));
3725 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
3726 (uint32_t) (ntohl(ip->ip_src.s_addr)));
3727 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
3728 (uint32_t) (ntohl(ip->ip_dst.s_addr)));
3729 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
3732 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
3733 /* failed to prepend data, give up */
3734 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3738 #ifdef SCTP_PACKET_LOGGING
3739 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
3740 sctp_packet_log(m, packet_length);
3742 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
3744 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
3746 (stcb->asoc.loopback_scope))) {
3747 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
3748 SCTP_STAT_INCR(sctps_sendswcrc);
3750 SCTP_STAT_INCR(sctps_sendnocrc);
3752 SCTP_ENABLE_UDP_CSUM(o_pak);
3754 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
3756 (stcb->asoc.loopback_scope))) {
3757 m->m_pkthdr.csum_flags = CSUM_SCTP;
3758 m->m_pkthdr.csum_data = 0;
3759 SCTP_STAT_INCR(sctps_sendhwcrc);
3761 SCTP_STAT_INCR(sctps_sendnocrc);
3764 /* send it out. table id is taken from stcb */
3765 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3766 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3767 so = SCTP_INP_SO(inp);
3768 SCTP_SOCKET_UNLOCK(so, 0);
3771 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
3772 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3773 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
3774 atomic_add_int(&stcb->asoc.refcnt, 1);
3775 SCTP_TCB_UNLOCK(stcb);
3776 SCTP_SOCKET_LOCK(so, 0);
3777 SCTP_TCB_LOCK(stcb);
3778 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3781 SCTP_STAT_INCR(sctps_sendpackets);
3782 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
3784 SCTP_STAT_INCR(sctps_senderrors);
3786 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
3788 /* free tempy routes */
3794 /* PMTU check versus smallest asoc MTU goes here */
3795 if ((ro->ro_rt != NULL) &&
3796 (net->ro._s_addr)) {
3799 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
3801 mtu -= sizeof(struct udphdr);
3803 if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
3804 #ifdef SCTP_PRINT_FOR_B_AND_M
3805 SCTP_PRINTF("sctp_mtu_size_reset called after ip_output mtu-change:%d\n", mtu);
3807 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
3810 } else if (ro->ro_rt == NULL) {
3811 /* route was freed */
3812 if (net->ro._s_addr &&
3813 net->src_addr_selected) {
3814 sctp_free_ifa(net->ro._s_addr);
3815 net->ro._s_addr = NULL;
3817 net->src_addr_selected = 0;
3823 else if (to->sa_family == AF_INET6) {
3825 struct ip6_hdr *ip6h;
3826 struct route_in6 ip6route;
3829 uint16_t flowBottom;
3830 u_char tosBottom, tosTop;
3831 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
3833 struct sockaddr_in6 lsa6_storage;
3835 u_short prev_port = 0;
3839 flowlabel = net->tos_flowlabel;
3841 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
3844 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr);
3846 len += sizeof(struct udphdr);
3848 newm = sctp_get_mbuf_for_msg(len, 1, M_DONTWAIT, 1, MT_DATA);
3851 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
3854 SCTP_ALIGN_TO_END(newm, len);
3855 SCTP_BUF_LEN(newm) = len;
3856 SCTP_BUF_NEXT(newm) = m;
3858 packet_length = sctp_calculate_len(m);
3860 ip6h = mtod(m, struct ip6_hdr *);
3862 * We assume here that inp_flow is in host byte order within
3865 flowBottom = flowlabel & 0x0000ffff;
3866 flowTop = ((flowlabel & 0x000f0000) >> 16);
3867 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
3868 /* protect *sin6 from overwrite */
3869 sin6 = (struct sockaddr_in6 *)to;
3873 /* KAME hack: embed scopeid */
3874 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
3875 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3879 memset(&ip6route, 0, sizeof(ip6route));
3880 ro = (sctp_route_t *) & ip6route;
3881 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
3883 ro = (sctp_route_t *) & net->ro;
3886 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3888 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
3891 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3894 /* we could get no asoc if it is a O-O-T-B packet */
3895 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3897 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
3899 ip6h->ip6_nxt = IPPROTO_UDP;
3901 ip6h->ip6_nxt = IPPROTO_SCTP;
3903 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr));
3904 ip6h->ip6_dst = sin6->sin6_addr;
3907 * Add SRC address selection here: we can only reuse to a
3908 * limited degree the kame src-addr-sel, since we can try
3909 * their selection but it may not be bound.
3911 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
3912 lsa6_tmp.sin6_family = AF_INET6;
3913 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
3915 if (net && out_of_asoc_ok == 0) {
3916 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED | SCTP_ADDR_IFA_UNUSEABLE))) {
3917 sctp_free_ifa(net->ro._s_addr);
3918 net->ro._s_addr = NULL;
3919 net->src_addr_selected = 0;
3925 if (net->src_addr_selected == 0) {
3926 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3927 /* KAME hack: embed scopeid */
3928 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
3929 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3932 /* Cache the source address */
3933 net->ro._s_addr = sctp_source_address_selection(inp,
3939 (void)sa6_recoverscope(sin6);
3940 net->src_addr_selected = 1;
3942 if (net->ro._s_addr == NULL) {
3943 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
3944 net->src_addr_selected = 0;
3947 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
3949 sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
3950 /* KAME hack: embed scopeid */
3951 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
3952 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
3955 if (over_addr == NULL) {
3956 struct sctp_ifa *_lsrc;
3958 _lsrc = sctp_source_address_selection(inp, stcb, ro,
3962 if (_lsrc == NULL) {
3965 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
3966 sctp_free_ifa(_lsrc);
3968 lsa6->sin6_addr = over_addr->sin6.sin6_addr;
3969 SCTP_RTALLOC(ro, vrf_id);
3971 (void)sa6_recoverscope(sin6);
3973 lsa6->sin6_port = inp->sctp_lport;
3975 if (ro->ro_rt == NULL) {
3977 * src addr selection failed to find a route (or
3978 * valid source addr), so we can't get there from
3984 * XXX: sa6 may not have a valid sin6_scope_id in the
3985 * non-SCOPEDROUTING case.
3987 bzero(&lsa6_storage, sizeof(lsa6_storage));
3988 lsa6_storage.sin6_family = AF_INET6;
3989 lsa6_storage.sin6_len = sizeof(lsa6_storage);
3990 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3991 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
3992 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
3997 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3998 lsa6_storage.sin6_port = inp->sctp_lport;
3999 lsa6 = &lsa6_storage;
4000 ip6h->ip6_src = lsa6->sin6_addr;
4003 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4004 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
4005 udp->uh_dport = port;
4006 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr));
4008 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
4010 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
4013 sctphdr->src_port = src_port;
4014 sctphdr->dest_port = dest_port;
4015 sctphdr->v_tag = v_tag;
4016 sctphdr->checksum = 0;
4019 * We set the hop limit now since there is a good chance
4020 * that our ro pointer is now filled
4022 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
4023 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
4026 /* Copy to be sure something bad is not happening */
4027 sin6->sin6_addr = ip6h->ip6_dst;
4028 lsa6->sin6_addr = ip6h->ip6_src;
4031 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
4032 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
4033 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
4034 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
4035 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
4037 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
4038 /* preserve the port and scope for link local send */
4039 prev_scope = sin6->sin6_scope_id;
4040 prev_port = sin6->sin6_port;
4042 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
4043 /* failed to prepend data, give up */
4045 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
4048 #ifdef SCTP_PACKET_LOGGING
4049 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
4050 sctp_packet_log(m, packet_length);
4052 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
4054 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4056 (stcb->asoc.loopback_scope))) {
4057 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
4058 SCTP_STAT_INCR(sctps_sendswcrc);
4060 SCTP_STAT_INCR(sctps_sendnocrc);
4062 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
4063 udp->uh_sum = 0xffff;
4066 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
4068 (stcb->asoc.loopback_scope))) {
4069 m->m_pkthdr.csum_flags = CSUM_SCTP;
4070 m->m_pkthdr.csum_data = 0;
4071 SCTP_STAT_INCR(sctps_sendhwcrc);
4073 SCTP_STAT_INCR(sctps_sendnocrc);
4076 /* send it out. table id is taken from stcb */
4077 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4078 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4079 so = SCTP_INP_SO(inp);
4080 SCTP_SOCKET_UNLOCK(so, 0);
4083 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
4084 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4085 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
4086 atomic_add_int(&stcb->asoc.refcnt, 1);
4087 SCTP_TCB_UNLOCK(stcb);
4088 SCTP_SOCKET_LOCK(so, 0);
4089 SCTP_TCB_LOCK(stcb);
4090 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4094 /* for link local this must be done */
4095 sin6->sin6_scope_id = prev_scope;
4096 sin6->sin6_port = prev_port;
4098 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
4099 SCTP_STAT_INCR(sctps_sendpackets);
4100 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
4102 SCTP_STAT_INCR(sctps_senderrors);
4105 /* Now if we had a temp route free it */
4110 /* PMTU check versus smallest asoc MTU goes here */
4111 if (ro->ro_rt == NULL) {
4112 /* Route was freed */
4113 if (net->ro._s_addr &&
4114 net->src_addr_selected) {
4115 sctp_free_ifa(net->ro._s_addr);
4116 net->ro._s_addr = NULL;
4118 net->src_addr_selected = 0;
4120 if ((ro->ro_rt != NULL) &&
4121 (net->ro._s_addr)) {
4124 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
4126 (stcb->asoc.smallest_mtu > mtu)) {
4127 #ifdef SCTP_PRINT_FOR_B_AND_M
4128 SCTP_PRINTF("sctp_mtu_size_reset called after ip6_output mtu-change:%d\n",
4131 sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
4134 net->mtu -= sizeof(struct udphdr);
4138 if (ND_IFINFO(ifp)->linkmtu &&
4139 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
4140 #ifdef SCTP_PRINT_FOR_B_AND_M
4141 SCTP_PRINTF("sctp_mtu_size_reset called via ifp ND_IFINFO() linkmtu:%d\n",
4142 ND_IFINFO(ifp)->linkmtu);
4144 sctp_mtu_size_reset(inp,
4146 ND_IFINFO(ifp)->linkmtu);
4154 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
4155 ((struct sockaddr *)to)->sa_family);
4157 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
4164 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
4165 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4170 struct mbuf *m, *m_at, *mp_last;
4171 struct sctp_nets *net;
4172 struct sctp_init_chunk *init;
4173 struct sctp_supported_addr_param *sup_addr;
4174 struct sctp_adaptation_layer_indication *ali;
4175 struct sctp_ecn_supported_param *ecn;
4176 struct sctp_prsctp_supported_param *prsctp;
4177 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4178 struct sctp_supported_chunk_types_param *pr_supported;
4179 int cnt_inits_to = 0;
4184 /* INIT's always go to the primary (and usually ONLY address) */
4186 net = stcb->asoc.primary_destination;
4188 net = TAILQ_FIRST(&stcb->asoc.nets);
4193 /* we confirm any address we send an INIT to */
4194 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4195 (void)sctp_set_primary_addr(stcb, NULL, net);
4197 /* we confirm any address we send an INIT to */
4198 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
4200 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
4202 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
4204 * special hook, if we are sending to link local it will not
4205 * show up in our private address count.
4207 struct sockaddr_in6 *sin6l;
4209 sin6l = &net->ro._l_addr.sin6;
4210 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
4214 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4215 /* This case should not happen */
4216 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
4219 /* start the INIT timer */
4220 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4222 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
4224 /* No memory, INIT timer will re-attempt. */
4225 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
4228 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
4230 * assume peer supports asconf in order to be able to queue local
4231 * address changes while an INIT is in flight and before the assoc
4234 stcb->asoc.peer_supports_asconf = 1;
4235 /* Now lets put the SCTP header in place */
4236 init = mtod(m, struct sctp_init_chunk *);
4237 /* now the chunk header */
4238 init->ch.chunk_type = SCTP_INITIATION;
4239 init->ch.chunk_flags = 0;
4240 /* fill in later from mbuf we build */
4241 init->ch.chunk_length = 0;
4242 /* place in my tag */
4243 init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
4244 /* set up some of the credits. */
4245 init->init.a_rwnd = htonl(max(inp->sctp_socket ? SCTP_SB_LIMIT_RCV(inp->sctp_socket) : 0,
4246 SCTP_MINIMAL_RWND));
4248 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
4249 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
4250 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
4251 /* now the address restriction */
4252 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)init +
4254 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
4256 /* we support 2 types: IPv6/IPv4 */
4257 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint16_t));
4258 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4259 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
4261 /* we support 1 type: IPv4 */
4262 sup_addr->ph.param_length = htons(sizeof(*sup_addr) + sizeof(uint8_t));
4263 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
4264 sup_addr->addr_type[1] = htons(0); /* this is the padding */
4266 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
4267 /* adaptation layer indication parameter */
4268 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
4269 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4270 ali->ph.param_length = htons(sizeof(*ali));
4271 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4272 SCTP_BUF_LEN(m) += sizeof(*ali);
4273 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
4275 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
4276 /* Add NAT friendly parameter */
4277 struct sctp_paramhdr *ph;
4279 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4280 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
4281 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4282 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
4283 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ph + sizeof(*ph));
4285 /* now any cookie time extensions */
4286 if (stcb->asoc.cookie_preserve_req) {
4287 struct sctp_cookie_perserve_param *cookie_preserve;
4289 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
4290 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
4291 cookie_preserve->ph.param_length = htons(
4292 sizeof(*cookie_preserve));
4293 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
4294 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
4295 ecn = (struct sctp_ecn_supported_param *)(
4296 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
4297 stcb->asoc.cookie_preserve_req = 0;
4300 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
4301 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4302 ecn->ph.param_length = htons(sizeof(*ecn));
4303 SCTP_BUF_LEN(m) += sizeof(*ecn);
4304 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4307 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4309 /* And now tell the peer we do pr-sctp */
4310 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4311 prsctp->ph.param_length = htons(sizeof(*prsctp));
4312 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4314 /* And now tell the peer we do all the extensions */
4315 pr_supported = (struct sctp_supported_chunk_types_param *)
4316 ((caddr_t)prsctp + sizeof(*prsctp));
4317 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4319 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4320 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4321 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4322 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4323 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4324 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4325 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4327 if (stcb->asoc.sctp_nr_sack_on_off == 1) {
4328 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
4330 p_len = sizeof(*pr_supported) + num_ext;
4331 pr_supported->ph.param_length = htons(p_len);
4332 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4333 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4336 /* ECN nonce: And now tell the peer we support ECN nonce */
4337 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
4338 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
4339 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
4340 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
4341 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
4342 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
4344 /* add authentication parameters */
4345 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
4346 struct sctp_auth_random *randp;
4347 struct sctp_auth_hmac_algo *hmacs;
4348 struct sctp_auth_chunk_list *chunks;
4350 /* attach RANDOM parameter, if available */
4351 if (stcb->asoc.authinfo.random != NULL) {
4352 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4353 p_len = sizeof(*randp) + stcb->asoc.authinfo.random_len;
4354 /* random key already contains the header */
4355 bcopy(stcb->asoc.authinfo.random->key, randp, p_len);
4356 /* zero out any padding required */
4357 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
4358 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4360 /* add HMAC_ALGO parameter */
4361 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4362 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
4363 (uint8_t *) hmacs->hmac_ids);
4365 p_len += sizeof(*hmacs);
4366 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4367 hmacs->ph.param_length = htons(p_len);
4368 /* zero out any padding required */
4369 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4370 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4372 /* add CHUNKS parameter */
4373 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4374 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4375 chunks->chunk_types);
4377 p_len += sizeof(*chunks);
4378 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4379 chunks->ph.param_length = htons(p_len);
4380 /* zero out any padding required */
4381 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4382 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4386 /* now the addresses */
4388 struct sctp_scoping scp;
4391 * To optimize this we could put the scoping stuff into a
4392 * structure and remove the individual uint8's from the
4393 * assoc structure. Then we could just sifa in the address
4394 * within the stcb.. but for now this is a quick hack to get
4395 * the address stuff teased apart.
4397 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4398 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4399 scp.loopback_scope = stcb->asoc.loopback_scope;
4400 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4401 scp.local_scope = stcb->asoc.local_scope;
4402 scp.site_scope = stcb->asoc.site_scope;
4404 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
4407 /* calulate the size and update pkt header and chunk header */
4409 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4410 if (SCTP_BUF_NEXT(m_at) == NULL)
4412 p_len += SCTP_BUF_LEN(m_at);
4414 init->ch.chunk_length = htons(p_len);
4416 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
4417 * here since the timer will drive a retranmission.
4420 /* I don't expect this to execute but we will be safe here */
4422 if ((padval) && (mp_last)) {
4424 * The compiler worries that mp_last may not be set even
4425 * though I think it is impossible :-> however we add
4426 * mp_last here just in case.
4428 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
4430 /* Houston we have a problem, no space */
4436 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
4437 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4438 (struct sockaddr *)&net->ro._l_addr,
4439 m, 0, NULL, 0, 0, 0, NULL, 0,
4440 inp->sctp_lport, stcb->rport, htonl(0),
4441 net->port, so_locked, NULL);
4442 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret);
4443 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4444 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4448 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4449 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
4452 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4453 * being equal to the beginning of the params i.e. (iphlen +
4454 * sizeof(struct sctp_init_msg) parse through the parameters to the
4455 * end of the mbuf verifying that all parameters are known.
4457 * For unknown parameters build and return a mbuf with
4458 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4459 * processing this chunk stop, and set *abort_processing to 1.
4461 * By having param_offset be pre-set to where parameters begin it is
4462 * hoped that this routine may be reused in the future by new
4465 struct sctp_paramhdr *phdr, params;
4467 struct mbuf *mat, *op_err;
4468 char tempbuf[SCTP_PARAM_BUFFER_SIZE];
4469 int at, limit, pad_needed;
4470 uint16_t ptype, plen, padded_size;
4473 *abort_processing = 0;
4476 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4479 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
4480 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4481 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4482 ptype = ntohs(phdr->param_type);
4483 plen = ntohs(phdr->param_length);
4484 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
4485 /* wacked parameter */
4486 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
4489 limit -= SCTP_SIZE32(plen);
4491 * All parameters for all chunks that we know/understand are
4492 * listed here. We process them other places and make
4493 * appropriate stop actions per the upper bits. However this
4494 * is the generic routine processor's can call to get back
4495 * an operr.. to either incorporate (init-ack) or send.
4497 padded_size = SCTP_SIZE32(plen);
4499 /* Param's with variable size */
4500 case SCTP_HEARTBEAT_INFO:
4501 case SCTP_STATE_COOKIE:
4502 case SCTP_UNRECOG_PARAM:
4503 case SCTP_ERROR_CAUSE_IND:
4507 /* Param's with variable size within a range */
4508 case SCTP_CHUNK_LIST:
4509 case SCTP_SUPPORTED_CHUNK_EXT:
4510 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
4511 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
4516 case SCTP_SUPPORTED_ADDRTYPE:
4517 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
4518 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
4524 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
4525 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
4530 case SCTP_SET_PRIM_ADDR:
4531 case SCTP_DEL_IP_ADDRESS:
4532 case SCTP_ADD_IP_ADDRESS:
4533 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
4534 (padded_size != sizeof(struct sctp_asconf_addr_param))) {
4535 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
4540 /* Param's with a fixed size */
4541 case SCTP_IPV4_ADDRESS:
4542 if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
4543 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
4548 case SCTP_IPV6_ADDRESS:
4549 if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
4550 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
4555 case SCTP_COOKIE_PRESERVE:
4556 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
4557 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
4562 case SCTP_HAS_NAT_SUPPORT:
4565 case SCTP_ECN_NONCE_SUPPORTED:
4566 case SCTP_PRSCTP_SUPPORTED:
4568 if (padded_size != sizeof(struct sctp_paramhdr)) {
4569 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecnnonce/prsctp/nat support %d\n", plen);
4574 case SCTP_ECN_CAPABLE:
4575 if (padded_size != sizeof(struct sctp_ecn_supported_param)) {
4576 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
4581 case SCTP_ULP_ADAPTATION:
4582 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
4583 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
4588 case SCTP_SUCCESS_REPORT:
4589 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
4590 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
4595 case SCTP_HOSTNAME_ADDRESS:
4597 /* We can NOT handle HOST NAME addresses!! */
4600 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
4601 *abort_processing = 1;
4602 if (op_err == NULL) {
4603 /* Ok need to try to get a mbuf */
4605 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4607 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4610 l_len += sizeof(struct sctp_paramhdr);
4611 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4613 SCTP_BUF_LEN(op_err) = 0;
4615 * pre-reserve space for ip
4616 * and sctp header and
4620 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4622 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4624 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4625 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4629 /* If we have space */
4630 struct sctp_paramhdr s;
4633 uint32_t cpthis = 0;
4635 pad_needed = 4 - (err_at % 4);
4636 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4637 err_at += pad_needed;
4639 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
4640 s.param_length = htons(sizeof(s) + plen);
4641 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4642 err_at += sizeof(s);
4643 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4645 sctp_m_freem(op_err);
4647 * we are out of memory but
4648 * we still need to have a
4649 * look at what to do (the
4650 * system is in trouble
4655 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4663 * we do not recognize the parameter figure out what
4666 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
4667 if ((ptype & 0x4000) == 0x4000) {
4668 /* Report bit is set?? */
4669 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
4670 if (op_err == NULL) {
4673 /* Ok need to try to get an mbuf */
4675 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4677 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4680 l_len += sizeof(struct sctp_paramhdr);
4681 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4683 SCTP_BUF_LEN(op_err) = 0;
4685 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4687 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4689 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4690 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4694 /* If we have space */
4695 struct sctp_paramhdr s;
4698 uint32_t cpthis = 0;
4700 pad_needed = 4 - (err_at % 4);
4701 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4702 err_at += pad_needed;
4704 s.param_type = htons(SCTP_UNRECOG_PARAM);
4705 s.param_length = htons(sizeof(s) + plen);
4706 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4707 err_at += sizeof(s);
4708 if (plen > sizeof(tempbuf)) {
4709 plen = sizeof(tempbuf);
4711 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf), plen));
4713 sctp_m_freem(op_err);
4715 * we are out of memory but
4716 * we still need to have a
4717 * look at what to do (the
4718 * system is in trouble
4722 goto more_processing;
4724 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4729 if ((ptype & 0x8000) == 0x0000) {
4730 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
4733 /* skip this chunk and continue processing */
4734 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
4735 at += SCTP_SIZE32(plen);
4740 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
4744 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
4745 *abort_processing = 1;
4746 if ((op_err == NULL) && phdr) {
4750 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4752 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4754 l_len += (2 * sizeof(struct sctp_paramhdr));
4755 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4757 SCTP_BUF_LEN(op_err) = 0;
4759 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4761 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
4763 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4764 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4767 if ((op_err) && phdr) {
4768 struct sctp_paramhdr s;
4771 uint32_t cpthis = 0;
4773 pad_needed = 4 - (err_at % 4);
4774 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4775 err_at += pad_needed;
4777 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4778 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
4779 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4780 err_at += sizeof(s);
4781 /* Only copy back the p-hdr that caused the issue */
4782 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
4788 sctp_are_there_new_addresses(struct sctp_association *asoc,
4789 struct mbuf *in_initpkt, int iphlen, int offset)
4792 * Given a INIT packet, look through the packet to verify that there
4793 * are NO new addresses. As we go through the parameters add reports
4794 * of any un-understood parameters that require an error. Also we
4795 * must return (1) to drop the packet if we see a un-understood
4796 * parameter that tells us to drop the chunk.
4798 struct sockaddr_in sin4, *sa4;
4801 struct sockaddr_in6 sin6, *sa6;
4804 struct sockaddr *sa_touse;
4805 struct sockaddr *sa;
4806 struct sctp_paramhdr *phdr, params;
4810 struct ip6_hdr *ip6h;
4814 uint16_t ptype, plen;
4817 struct sctp_nets *net;
4819 memset(&sin4, 0, sizeof(sin4));
4821 memset(&sin6, 0, sizeof(sin6));
4823 sin4.sin_family = AF_INET;
4824 sin4.sin_len = sizeof(sin4);
4826 sin6.sin6_family = AF_INET6;
4827 sin6.sin6_len = sizeof(sin6);
4830 /* First what about the src address of the pkt ? */
4831 iph = mtod(in_initpkt, struct ip *);
4832 switch (iph->ip_v) {
4834 /* source addr is IPv4 */
4835 sin4.sin_addr = iph->ip_src;
4836 sa_touse = (struct sockaddr *)&sin4;
4839 case IPV6_VERSION >> 4:
4840 /* source addr is IPv6 */
4841 ip6h = mtod(in_initpkt, struct ip6_hdr *);
4842 sin6.sin6_addr = ip6h->ip6_src;
4843 sa_touse = (struct sockaddr *)&sin6;
4851 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4852 sa = (struct sockaddr *)&net->ro._l_addr;
4853 if (sa->sa_family == sa_touse->sa_family) {
4854 if (sa->sa_family == AF_INET) {
4855 sa4 = (struct sockaddr_in *)sa;
4856 if (sa4->sin_addr.s_addr ==
4857 sin4.sin_addr.s_addr) {
4863 if (sa->sa_family == AF_INET6) {
4864 sa6 = (struct sockaddr_in6 *)sa;
4865 if (SCTP6_ARE_ADDR_EQUAL(sa6,
4875 /* New address added! no need to look futher. */
4878 /* Ok so far lets munge through the rest of the packet */
4882 offset += sizeof(struct sctp_init_chunk);
4883 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4885 ptype = ntohs(phdr->param_type);
4886 plen = ntohs(phdr->param_length);
4887 if (ptype == SCTP_IPV4_ADDRESS) {
4888 struct sctp_ipv4addr_param *p4, p4_buf;
4890 phdr = sctp_get_next_param(mat, offset,
4891 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4892 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4896 p4 = (struct sctp_ipv4addr_param *)phdr;
4897 sin4.sin_addr.s_addr = p4->addr;
4898 sa_touse = (struct sockaddr *)&sin4;
4899 } else if (ptype == SCTP_IPV6_ADDRESS) {
4900 struct sctp_ipv6addr_param *p6, p6_buf;
4902 phdr = sctp_get_next_param(mat, offset,
4903 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4904 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4908 p6 = (struct sctp_ipv6addr_param *)phdr;
4910 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4913 sa_touse = (struct sockaddr *)&sin4;
4916 /* ok, sa_touse points to one to check */
4918 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4919 sa = (struct sockaddr *)&net->ro._l_addr;
4920 if (sa->sa_family != sa_touse->sa_family) {
4923 if (sa->sa_family == AF_INET) {
4924 sa4 = (struct sockaddr_in *)sa;
4925 if (sa4->sin_addr.s_addr ==
4926 sin4.sin_addr.s_addr) {
4932 if (sa->sa_family == AF_INET6) {
4933 sa6 = (struct sockaddr_in6 *)sa;
4934 if (SCTP6_ARE_ADDR_EQUAL(
4943 /* New addr added! no need to look further */
4947 offset += SCTP_SIZE32(plen);
4948 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
4954 * Given a MBUF chain that was sent into us containing an INIT. Build a
4955 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
4956 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
4957 * message (i.e. the struct sctp_init_msg).
4960 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4961 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
4962 struct sctp_init_chunk *init_chk, uint32_t vrf_id, uint16_t port, int hold_inp_lock)
4964 struct sctp_association *asoc;
4965 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last;
4966 struct sctp_init_ack_chunk *initack;
4967 struct sctp_adaptation_layer_indication *ali;
4968 struct sctp_ecn_supported_param *ecn;
4969 struct sctp_prsctp_supported_param *prsctp;
4970 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4971 struct sctp_supported_chunk_types_param *pr_supported;
4972 union sctp_sockstore store, store1, *over_addr;
4973 struct sockaddr_in *sin, *to_sin;
4976 struct sockaddr_in6 *sin6, *to_sin6;
4982 struct ip6_hdr *ip6;
4985 struct sockaddr *to;
4986 struct sctp_state_cookie stc;
4987 struct sctp_nets *net = NULL;
4988 uint8_t *signature = NULL;
4989 int cnt_inits_to = 0;
4990 uint16_t his_limit, i_want;
4991 int abort_flag, padval;
4994 int nat_friendly = 0;
5002 if ((asoc != NULL) &&
5003 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
5004 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
5005 /* new addresses, out of here in non-cookie-wait states */
5007 * Send a ABORT, we don't add the new address error clause
5008 * though we even set the T bit and copy in the 0 tag.. this
5009 * looks no different than if no listener was present.
5011 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL, vrf_id, port);
5015 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
5016 (offset + sizeof(struct sctp_init_chunk)),
5017 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
5020 sctp_send_abort(init_pkt, iphlen, sh,
5021 init_chk->init.initiate_tag, op_err, vrf_id, port);
5024 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
5026 /* No memory, INIT timer will re-attempt. */
5028 sctp_m_freem(op_err);
5031 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk);
5033 /* the time I built cookie */
5034 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
5036 /* populate any tie tags */
5038 /* unlock before tag selections */
5039 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
5040 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
5041 stc.cookie_life = asoc->cookie_life;
5042 net = asoc->primary_destination;
5044 stc.tie_tag_my_vtag = 0;
5045 stc.tie_tag_peer_vtag = 0;
5046 /* life I will award this cookie */
5047 stc.cookie_life = inp->sctp_ep.def_cookie_life;
5050 /* copy in the ports for later check */
5051 stc.myport = sh->dest_port;
5052 stc.peerport = sh->src_port;
5055 * If we wanted to honor cookie life extentions, we would add to
5056 * stc.cookie_life. For now we should NOT honor any extension
5058 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
5059 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5060 struct inpcb *in_inp;
5062 /* Its a V6 socket */
5063 in_inp = (struct inpcb *)inp;
5064 stc.ipv6_addr_legal = 1;
5065 /* Now look at the binding flag to see if V4 will be legal */
5066 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
5067 stc.ipv4_addr_legal = 1;
5069 /* V4 addresses are NOT legal on the association */
5070 stc.ipv4_addr_legal = 0;
5073 /* Its a V4 socket, no - V6 */
5074 stc.ipv4_addr_legal = 1;
5075 stc.ipv6_addr_legal = 0;
5078 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
5083 /* now for scope setup */
5084 memset((caddr_t)&store, 0, sizeof(store));
5085 memset((caddr_t)&store1, 0, sizeof(store1));
5087 to_sin = &store1.sin;
5090 to_sin6 = &store1.sin6;
5092 iph = mtod(init_pkt, struct ip *);
5093 /* establish the to_addr's */
5094 switch (iph->ip_v) {
5096 to_sin->sin_port = sh->dest_port;
5097 to_sin->sin_family = AF_INET;
5098 to_sin->sin_len = sizeof(struct sockaddr_in);
5099 to_sin->sin_addr = iph->ip_dst;
5102 case IPV6_VERSION >> 4:
5103 ip6 = mtod(init_pkt, struct ip6_hdr *);
5104 to_sin6->sin6_addr = ip6->ip6_dst;
5105 to_sin6->sin6_scope_id = 0;
5106 to_sin6->sin6_port = sh->dest_port;
5107 to_sin6->sin6_family = AF_INET6;
5108 to_sin6->sin6_len = sizeof(struct sockaddr_in6);
5117 to = (struct sockaddr *)&store;
5118 switch (iph->ip_v) {
5121 sin->sin_family = AF_INET;
5122 sin->sin_len = sizeof(struct sockaddr_in);
5123 sin->sin_port = sh->src_port;
5124 sin->sin_addr = iph->ip_src;
5125 /* lookup address */
5126 stc.address[0] = sin->sin_addr.s_addr;
5130 stc.addr_type = SCTP_IPV4_ADDRESS;
5131 /* local from address */
5132 stc.laddress[0] = to_sin->sin_addr.s_addr;
5133 stc.laddress[1] = 0;
5134 stc.laddress[2] = 0;
5135 stc.laddress[3] = 0;
5136 stc.laddr_type = SCTP_IPV4_ADDRESS;
5137 /* scope_id is only for v6 */
5139 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
5140 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
5145 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
5146 /* Must use the address in this case */
5147 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
5148 stc.loopback_scope = 1;
5151 stc.local_scope = 0;
5156 case IPV6_VERSION >> 4:
5158 ip6 = mtod(init_pkt, struct ip6_hdr *);
5159 sin6->sin6_family = AF_INET6;
5160 sin6->sin6_len = sizeof(struct sockaddr_in6);
5161 sin6->sin6_port = sh->src_port;
5162 sin6->sin6_addr = ip6->ip6_src;
5163 /* lookup address */
5164 memcpy(&stc.address, &sin6->sin6_addr,
5165 sizeof(struct in6_addr));
5166 sin6->sin6_scope_id = 0;
5167 stc.addr_type = SCTP_IPV6_ADDRESS;
5169 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
5171 * FIX ME: does this have scope from
5174 (void)sa6_recoverscope(sin6);
5175 stc.scope_id = sin6->sin6_scope_id;
5176 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5177 stc.loopback_scope = 1;
5178 stc.local_scope = 0;
5181 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
5183 * If the new destination is a
5184 * LINK_LOCAL we must have common
5185 * both site and local scope. Don't
5186 * set local scope though since we
5187 * must depend on the source to be
5188 * added implicitly. We cannot
5189 * assure just because we share one
5190 * link that all links are common.
5192 stc.local_scope = 0;
5196 * we start counting for the private
5197 * address stuff at 1. since the
5198 * link local we source from won't
5199 * show up in our scoped count.
5203 * pull out the scope_id from
5207 * FIX ME: does this have scope from
5210 (void)sa6_recoverscope(sin6);
5211 stc.scope_id = sin6->sin6_scope_id;
5212 sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
5213 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
5215 * If the new destination is
5216 * SITE_LOCAL then we must have site
5221 memcpy(&stc.laddress, &to_sin6->sin6_addr, sizeof(struct in6_addr));
5222 stc.laddr_type = SCTP_IPV6_ADDRESS;
5232 /* set the scope per the existing tcb */
5235 struct sctp_nets *lnet;
5239 stc.loopback_scope = asoc->loopback_scope;
5240 stc.ipv4_scope = asoc->ipv4_local_scope;
5241 stc.site_scope = asoc->site_scope;
5242 stc.local_scope = asoc->local_scope;
5244 /* Why do we not consider IPv4 LL addresses? */
5245 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
5246 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
5247 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
5249 * if we have a LL address, start
5257 /* use the net pointer */
5258 to = (struct sockaddr *)&net->ro._l_addr;
5259 switch (to->sa_family) {
5261 sin = (struct sockaddr_in *)to;
5262 stc.address[0] = sin->sin_addr.s_addr;
5266 stc.addr_type = SCTP_IPV4_ADDRESS;
5267 if (net->src_addr_selected == 0) {
5269 * strange case here, the INIT should have
5270 * did the selection.
5272 net->ro._s_addr = sctp_source_address_selection(inp,
5273 stcb, (sctp_route_t *) & net->ro,
5275 if (net->ro._s_addr == NULL)
5278 net->src_addr_selected = 1;
5281 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
5282 stc.laddress[1] = 0;
5283 stc.laddress[2] = 0;
5284 stc.laddress[3] = 0;
5285 stc.laddr_type = SCTP_IPV4_ADDRESS;
5289 sin6 = (struct sockaddr_in6 *)to;
5290 memcpy(&stc.address, &sin6->sin6_addr,
5291 sizeof(struct in6_addr));
5292 stc.addr_type = SCTP_IPV6_ADDRESS;
5293 if (net->src_addr_selected == 0) {
5295 * strange case here, the INIT should have
5296 * did the selection.
5298 net->ro._s_addr = sctp_source_address_selection(inp,
5299 stcb, (sctp_route_t *) & net->ro,
5301 if (net->ro._s_addr == NULL)
5304 net->src_addr_selected = 1;
5306 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
5307 sizeof(struct in6_addr));
5308 stc.laddr_type = SCTP_IPV6_ADDRESS;
5313 /* Now lets put the SCTP header in place */
5314 initack = mtod(m, struct sctp_init_ack_chunk *);
5315 /* Save it off for quick ref */
5316 stc.peers_vtag = init_chk->init.initiate_tag;
5318 memcpy(stc.identification, SCTP_VERSION_STRING,
5319 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
5320 /* now the chunk header */
5321 initack->ch.chunk_type = SCTP_INITIATION_ACK;
5322 initack->ch.chunk_flags = 0;
5323 /* fill in later from mbuf we build */
5324 initack->ch.chunk_length = 0;
5325 /* place in my tag */
5326 if ((asoc != NULL) &&
5327 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
5328 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
5329 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
5330 /* re-use the v-tags and init-seq here */
5331 initack->init.initiate_tag = htonl(asoc->my_vtag);
5332 initack->init.initial_tsn = htonl(asoc->init_seq_number);
5334 uint32_t vtag, itsn;
5336 if (hold_inp_lock) {
5337 SCTP_INP_INCR_REF(inp);
5338 SCTP_INP_RUNLOCK(inp);
5341 atomic_add_int(&asoc->refcnt, 1);
5342 SCTP_TCB_UNLOCK(stcb);
5344 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5345 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) {
5347 * Got a duplicate vtag on some guy behind a
5348 * nat make sure we don't use it.
5352 initack->init.initiate_tag = htonl(vtag);
5353 /* get a TSN to use too */
5354 itsn = sctp_select_initial_TSN(&inp->sctp_ep);
5355 initack->init.initial_tsn = htonl(itsn);
5356 SCTP_TCB_LOCK(stcb);
5357 atomic_add_int(&asoc->refcnt, -1);
5359 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
5360 initack->init.initiate_tag = htonl(vtag);
5361 /* get a TSN to use too */
5362 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
5364 if (hold_inp_lock) {
5365 SCTP_INP_RLOCK(inp);
5366 SCTP_INP_DECR_REF(inp);
5369 /* save away my tag to */
5370 stc.my_vtag = initack->init.initiate_tag;
5372 /* set up some of the credits. */
5373 so = inp->sctp_socket;
5375 /* memory problem */
5379 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
5381 /* set what I want */
5382 his_limit = ntohs(init_chk->init.num_inbound_streams);
5383 /* choose what I want */
5385 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
5386 i_want = asoc->streamoutcnt;
5388 i_want = inp->sctp_ep.pre_open_stream_count;
5391 i_want = inp->sctp_ep.pre_open_stream_count;
5393 if (his_limit < i_want) {
5394 /* I Want more :< */
5395 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
5397 /* I can have what I want :> */
5398 initack->init.num_outbound_streams = htons(i_want);
5400 /* tell him his limt. */
5401 initack->init.num_inbound_streams =
5402 htons(inp->sctp_ep.max_open_streams_intome);
5404 /* adaptation layer indication parameter */
5405 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack));
5406 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
5407 ali->ph.param_length = htons(sizeof(*ali));
5408 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
5409 SCTP_BUF_LEN(m) += sizeof(*ali);
5410 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali));
5413 if (SCTP_BASE_SYSCTL(sctp_ecn_enable) == 1) {
5414 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
5415 ecn->ph.param_length = htons(sizeof(*ecn));
5416 SCTP_BUF_LEN(m) += sizeof(*ecn);
5418 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
5421 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
5423 /* And now tell the peer we do pr-sctp */
5424 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
5425 prsctp->ph.param_length = htons(sizeof(*prsctp));
5426 SCTP_BUF_LEN(m) += sizeof(*prsctp);
5428 /* Add NAT friendly parameter */
5429 struct sctp_paramhdr *ph;
5431 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5432 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
5433 ph->param_length = htons(sizeof(struct sctp_paramhdr));
5434 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr);
5436 /* And now tell the peer we do all the extensions */
5437 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5438 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
5440 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
5441 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
5442 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
5443 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
5444 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
5445 if (!SCTP_BASE_SYSCTL(sctp_auth_disable))
5446 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
5447 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
5448 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
5449 p_len = sizeof(*pr_supported) + num_ext;
5450 pr_supported->ph.param_length = htons(p_len);
5451 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
5452 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5454 /* ECN nonce: And now tell the peer we support ECN nonce */
5455 if (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) {
5456 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
5457 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
5458 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
5459 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
5460 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
5462 /* add authentication parameters */
5463 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) {
5464 struct sctp_auth_random *randp;
5465 struct sctp_auth_hmac_algo *hmacs;
5466 struct sctp_auth_chunk_list *chunks;
5467 uint16_t random_len;
5469 /* generate and add RANDOM parameter */
5470 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
5471 randp = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5472 randp->ph.param_type = htons(SCTP_RANDOM);
5473 p_len = sizeof(*randp) + random_len;
5474 randp->ph.param_length = htons(p_len);
5475 SCTP_READ_RANDOM(randp->random_data, random_len);
5476 /* zero out any padding required */
5477 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len);
5478 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5480 /* add HMAC_ALGO parameter */
5481 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5482 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
5483 (uint8_t *) hmacs->hmac_ids);
5485 p_len += sizeof(*hmacs);
5486 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
5487 hmacs->ph.param_length = htons(p_len);
5488 /* zero out any padding required */
5489 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
5490 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5492 /* add CHUNKS parameter */
5493 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
5494 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
5495 chunks->chunk_types);
5497 p_len += sizeof(*chunks);
5498 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
5499 chunks->ph.param_length = htons(p_len);
5500 /* zero out any padding required */
5501 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
5502 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
5506 /* now the addresses */
5508 struct sctp_scoping scp;
5511 * To optimize this we could put the scoping stuff into a
5512 * structure and remove the individual uint8's from the stc
5513 * structure. Then we could just sifa in the address within
5514 * the stc.. but for now this is a quick hack to get the
5515 * address stuff teased apart.
5517 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
5518 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
5519 scp.loopback_scope = stc.loopback_scope;
5520 scp.ipv4_local_scope = stc.ipv4_scope;
5521 scp.local_scope = stc.local_scope;
5522 scp.site_scope = stc.site_scope;
5523 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
5526 /* tack on the operational error if present */
5534 llen += SCTP_BUF_LEN(ol);
5535 ol = SCTP_BUF_NEXT(ol);
5538 /* must add a pad to the param */
5539 uint32_t cpthis = 0;
5542 padlen = 4 - (llen % 4);
5543 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
5545 while (SCTP_BUF_NEXT(m_at) != NULL) {
5546 m_at = SCTP_BUF_NEXT(m_at);
5548 SCTP_BUF_NEXT(m_at) = op_err;
5549 while (SCTP_BUF_NEXT(m_at) != NULL) {
5550 m_at = SCTP_BUF_NEXT(m_at);
5553 /* pre-calulate the size and update pkt header and chunk header */
5555 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5556 p_len += SCTP_BUF_LEN(m_tmp);
5557 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5558 /* m_tmp should now point to last one */
5563 /* Now we must build a cookie */
5564 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m, 0, &stc, &signature);
5565 if (m_cookie == NULL) {
5566 /* memory problem */
5570 /* Now append the cookie to the end and update the space/size */
5571 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5573 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5574 p_len += SCTP_BUF_LEN(m_tmp);
5575 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5576 /* m_tmp should now point to last one */
5582 * Place in the size, but we don't include the last pad (if any) in
5585 initack->ch.chunk_length = htons(p_len);
5588 * Time to sign the cookie, we don't sign over the cookie signature
5589 * though thus we set trailer.
5591 (void)sctp_hmac_m(SCTP_HMAC,
5592 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
5593 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
5594 (uint8_t *) signature, SCTP_SIGNATURE_SIZE);
5596 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
5597 * here since the timer will drive a retranmission.
5600 if ((padval) && (mp_last)) {
5601 /* see my previous comments on mp_last */
5604 ret = sctp_add_pad_tombuf(mp_last, (4 - padval));
5606 /* Houston we have a problem, no space */
5612 if (stc.loopback_scope) {
5613 over_addr = &store1;
5618 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5620 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
5621 port, SCTP_SO_NOT_LOCKED, over_addr);
5622 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5627 sctp_insert_on_wheel(struct sctp_tcb *stcb,
5628 struct sctp_association *asoc,
5629 struct sctp_stream_out *strq, int holds_lock)
5631 if (holds_lock == 0) {
5632 SCTP_TCB_SEND_LOCK(stcb);
5634 if ((strq->next_spoke.tqe_next == NULL) &&
5635 (strq->next_spoke.tqe_prev == NULL)) {
5636 TAILQ_INSERT_TAIL(&asoc->out_wheel, strq, next_spoke);
5638 if (holds_lock == 0) {
5639 SCTP_TCB_SEND_UNLOCK(stcb);
5644 sctp_remove_from_wheel(struct sctp_tcb *stcb,
5645 struct sctp_association *asoc,
5646 struct sctp_stream_out *strq,
5649 /* take off and then setup so we know it is not on the wheel */
5650 if (holds_lock == 0) {
5651 SCTP_TCB_SEND_LOCK(stcb);
5653 if (TAILQ_EMPTY(&strq->outqueue)) {
5654 if (asoc->last_out_stream == strq) {
5655 asoc->last_out_stream = TAILQ_PREV(asoc->last_out_stream, sctpwheel_listhead, next_spoke);
5656 if (asoc->last_out_stream == NULL) {
5657 asoc->last_out_stream = TAILQ_LAST(&asoc->out_wheel, sctpwheel_listhead);
5659 if (asoc->last_out_stream == strq) {
5660 asoc->last_out_stream = NULL;
5663 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
5664 strq->next_spoke.tqe_next = NULL;
5665 strq->next_spoke.tqe_prev = NULL;
5667 if (holds_lock == 0) {
5668 SCTP_TCB_SEND_UNLOCK(stcb);
5673 sctp_prune_prsctp(struct sctp_tcb *stcb,
5674 struct sctp_association *asoc,
5675 struct sctp_sndrcvinfo *srcv,
5679 struct sctp_tmit_chunk *chk, *nchk;
5681 SCTP_TCB_LOCK_ASSERT(stcb);
5682 if ((asoc->peer_supports_prsctp) &&
5683 (asoc->sent_queue_cnt_removeable > 0)) {
5684 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5686 * Look for chunks marked with the PR_SCTP flag AND
5687 * the buffer space flag. If the one being sent is
5688 * equal or greater priority then purge the old one
5689 * and free some space.
5691 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5693 * This one is PR-SCTP AND buffer space
5696 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5698 * Lower numbers equates to higher
5699 * priority so if the one we are
5700 * looking at has a larger or equal
5701 * priority we want to drop the data
5702 * and NOT retransmit it.
5706 * We release the book_size
5707 * if the mbuf is here
5712 if (chk->sent > SCTP_DATAGRAM_UNSENT)
5713 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
5715 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
5716 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5719 freed_spc += ret_spc;
5720 if (freed_spc >= dataout) {
5723 } /* if chunk was present */
5724 } /* if of sufficent priority */
5725 } /* if chunk has enabled */
5726 } /* tailqforeach */
5728 chk = TAILQ_FIRST(&asoc->send_queue);
5730 nchk = TAILQ_NEXT(chk, sctp_next);
5731 /* Here we must move to the sent queue and mark */
5732 if (PR_SCTP_TTL_ENABLED(chk->flags)) {
5733 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5736 * We release the book_size
5737 * if the mbuf is here
5741 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5742 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
5745 freed_spc += ret_spc;
5746 if (freed_spc >= dataout) {
5749 } /* end if chk->data */
5750 } /* end if right class */
5751 } /* end if chk pr-sctp */
5753 } /* end while (chk) */
5754 } /* if enabled in asoc */
5758 sctp_get_frag_point(struct sctp_tcb *stcb,
5759 struct sctp_association *asoc)
5764 * For endpoints that have both v6 and v4 addresses we must reserve
5765 * room for the ipv6 header, for those that are only dealing with V4
5766 * we use a larger frag point.
5768 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5769 ovh = SCTP_MED_OVERHEAD;
5771 ovh = SCTP_MED_V4_OVERHEAD;
5774 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
5775 siz = asoc->smallest_mtu - ovh;
5777 siz = (stcb->asoc.sctp_frag_point - ovh);
5779 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
5781 /* A data chunk MUST fit in a cluster */
5782 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
5785 /* adjust for an AUTH chunk if DATA requires auth */
5786 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
5787 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5790 /* make it an even word boundary please */
5797 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
5801 * We assume that the user wants PR_SCTP_TTL if the user provides a
5802 * positive lifetime but does not specify any PR_SCTP policy. This
5803 * is a BAD assumption and causes problems at least with the
5804 * U-Vancovers MPI folks. I will change this to be no policy means
5807 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
5808 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
5813 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
5814 case CHUNK_FLAGS_PR_SCTP_BUF:
5816 * Time to live is a priority stored in tv_sec when doing
5817 * the buffer drop thing.
5819 sp->ts.tv_sec = sp->timetolive;
5822 case CHUNK_FLAGS_PR_SCTP_TTL:
5826 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5827 tv.tv_sec = sp->timetolive / 1000;
5828 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
5830 * TODO sctp_constants.h needs alternative time
5831 * macros when _KERNEL is undefined.
5833 timevaladd(&sp->ts, &tv);
5836 case CHUNK_FLAGS_PR_SCTP_RTX:
5838 * Time to live is a the number or retransmissions stored in
5841 sp->ts.tv_sec = sp->timetolive;
5845 SCTPDBG(SCTP_DEBUG_USRREQ1,
5846 "Unknown PR_SCTP policy %u.\n",
5847 PR_SCTP_POLICY(sp->sinfo_flags));
5853 sctp_msg_append(struct sctp_tcb *stcb,
5854 struct sctp_nets *net,
5856 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
5858 int error = 0, holds_lock;
5860 struct sctp_stream_queue_pending *sp = NULL;
5861 struct sctp_stream_out *strm;
5864 * Given an mbuf chain, put it into the association send queue and
5865 * place it on the wheel
5867 holds_lock = hold_stcb_lock;
5868 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
5869 /* Invalid stream number */
5870 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5874 if ((stcb->asoc.stream_locked) &&
5875 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
5876 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
5880 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
5881 /* Now can we send this? */
5882 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
5883 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5884 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
5885 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
5886 /* got data while shutting down */
5887 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
5891 sctp_alloc_a_strmoq(stcb, sp);
5893 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
5897 sp->sinfo_flags = srcv->sinfo_flags;
5898 sp->timetolive = srcv->sinfo_timetolive;
5899 sp->ppid = srcv->sinfo_ppid;
5900 sp->context = srcv->sinfo_context;
5902 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
5905 sp->net = stcb->asoc.primary_destination;
5907 atomic_add_int(&sp->net->ref_count, 1);
5908 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
5909 sp->stream = srcv->sinfo_stream;
5910 sp->msg_is_complete = 1;
5911 sp->sender_all_done = 1;
5914 sp->tail_mbuf = NULL;
5917 sctp_set_prsctp_policy(sp);
5919 * We could in theory (for sendall) sifa the length in, but we would
5920 * still have to hunt through the chain since we need to setup the
5924 if (SCTP_BUF_NEXT(at) == NULL)
5926 sp->length += SCTP_BUF_LEN(at);
5927 at = SCTP_BUF_NEXT(at);
5929 SCTP_TCB_SEND_LOCK(stcb);
5930 sctp_snd_sb_alloc(stcb, sp->length);
5931 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
5932 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
5933 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
5934 sp->strseq = strm->next_sequence_sent;
5935 strm->next_sequence_sent++;
5937 if ((strm->next_spoke.tqe_next == NULL) &&
5938 (strm->next_spoke.tqe_prev == NULL)) {
5939 /* Not on wheel, insert */
5940 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
5943 SCTP_TCB_SEND_UNLOCK(stcb);
5952 static struct mbuf *
5953 sctp_copy_mbufchain(struct mbuf *clonechain,
5954 struct mbuf *outchain,
5955 struct mbuf **endofchain,
5958 uint8_t copy_by_ref)
5961 struct mbuf *appendchain;
5965 if (endofchain == NULL) {
5969 sctp_m_freem(outchain);
5972 if (can_take_mbuf) {
5973 appendchain = clonechain;
5976 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
5978 /* Its not in a cluster */
5979 if (*endofchain == NULL) {
5980 /* lets get a mbuf cluster */
5981 if (outchain == NULL) {
5982 /* This is the general case */
5984 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5985 if (outchain == NULL) {
5988 SCTP_BUF_LEN(outchain) = 0;
5989 *endofchain = outchain;
5990 /* get the prepend space */
5991 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
5994 * We really should not get a NULL
6000 if (SCTP_BUF_NEXT(m) == NULL) {
6004 m = SCTP_BUF_NEXT(m);
6007 if (*endofchain == NULL) {
6009 * huh, TSNH XXX maybe we
6012 sctp_m_freem(outchain);
6016 /* get the new end of length */
6017 len = M_TRAILINGSPACE(*endofchain);
6019 /* how much is left at the end? */
6020 len = M_TRAILINGSPACE(*endofchain);
6022 /* Find the end of the data, for appending */
6023 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
6025 /* Now lets copy it out */
6026 if (len >= sizeofcpy) {
6027 /* It all fits, copy it in */
6028 m_copydata(clonechain, 0, sizeofcpy, cp);
6029 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6031 /* fill up the end of the chain */
6033 m_copydata(clonechain, 0, len, cp);
6034 SCTP_BUF_LEN((*endofchain)) += len;
6035 /* now we need another one */
6038 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
6043 SCTP_BUF_NEXT((*endofchain)) = m;
6045 cp = mtod((*endofchain), caddr_t);
6046 m_copydata(clonechain, len, sizeofcpy, cp);
6047 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
6051 /* copy the old fashion way */
6052 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
6053 #ifdef SCTP_MBUF_LOGGING
6054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6059 if (SCTP_BUF_IS_EXTENDED(mat)) {
6060 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6062 mat = SCTP_BUF_NEXT(mat);
6068 if (appendchain == NULL) {
6071 sctp_m_freem(outchain);
6075 /* tack on to the end */
6076 if (*endofchain != NULL) {
6077 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
6081 if (SCTP_BUF_NEXT(m) == NULL) {
6082 SCTP_BUF_NEXT(m) = appendchain;
6085 m = SCTP_BUF_NEXT(m);
6089 * save off the end and update the end-chain postion
6093 if (SCTP_BUF_NEXT(m) == NULL) {
6097 m = SCTP_BUF_NEXT(m);
6101 /* save off the end and update the end-chain postion */
6104 if (SCTP_BUF_NEXT(m) == NULL) {
6108 m = SCTP_BUF_NEXT(m);
6110 return (appendchain);
6115 sctp_med_chunk_output(struct sctp_inpcb *inp,
6116 struct sctp_tcb *stcb,
6117 struct sctp_association *asoc,
6120 int control_only, int from_where,
6121 struct timeval *now, int *now_filled, int frag_point, int so_locked
6122 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
6128 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
6131 struct sctp_copy_all *ca;
6134 int added_control = 0;
6135 int un_sent, do_chunk_output = 1;
6136 struct sctp_association *asoc;
6138 ca = (struct sctp_copy_all *)ptr;
6139 if (ca->m == NULL) {
6142 if (ca->inp != inp) {
6146 if ((ca->m) && ca->sndlen) {
6147 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
6149 /* can't copy so we are done */
6153 #ifdef SCTP_MBUF_LOGGING
6154 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6159 if (SCTP_BUF_IS_EXTENDED(mat)) {
6160 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6162 mat = SCTP_BUF_NEXT(mat);
6169 SCTP_TCB_LOCK_ASSERT(stcb);
6170 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
6171 /* Abort this assoc with m as the user defined reason */
6173 struct sctp_paramhdr *ph;
6175 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
6177 ph = mtod(m, struct sctp_paramhdr *);
6178 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
6179 ph->param_length = htons(ca->sndlen);
6182 * We add one here to keep the assoc from
6183 * dis-appearing on us.
6185 atomic_add_int(&stcb->asoc.refcnt, 1);
6186 sctp_abort_an_association(inp, stcb,
6187 SCTP_RESPONSE_TO_USER_REQ,
6188 m, SCTP_SO_NOT_LOCKED);
6190 * sctp_abort_an_association calls sctp_free_asoc()
6191 * free association will NOT free it since we
6192 * incremented the refcnt .. we do this to prevent
6193 * it being freed and things getting tricky since we
6194 * could end up (from free_asoc) calling inpcb_free
6195 * which would get a recursive lock call to the
6196 * iterator lock.. But as a consequence of that the
6197 * stcb will return to us un-locked.. since
6198 * free_asoc returns with either no TCB or the TCB
6199 * unlocked, we must relock.. to unlock in the
6200 * iterator timer :-0
6202 SCTP_TCB_LOCK(stcb);
6203 atomic_add_int(&stcb->asoc.refcnt, -1);
6204 goto no_chunk_output;
6208 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
6212 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
6213 /* shutdown this assoc */
6216 cnt = sctp_is_there_unsent_data(stcb);
6218 if (TAILQ_EMPTY(&asoc->send_queue) &&
6219 TAILQ_EMPTY(&asoc->sent_queue) &&
6221 if (asoc->locked_on_sending) {
6225 * there is nothing queued to send, so I'm
6228 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6229 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6230 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6232 * only send SHUTDOWN the first time
6235 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
6236 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
6237 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
6239 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
6240 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
6241 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
6242 asoc->primary_destination);
6243 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6244 asoc->primary_destination);
6246 do_chunk_output = 0;
6250 * we still got (or just got) data to send,
6251 * so set SHUTDOWN_PENDING
6254 * XXX sockets draft says that SCTP_EOF
6255 * should be sent with no data. currently,
6256 * we will allow user data to be sent first
6257 * and move to SHUTDOWN-PENDING
6259 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
6260 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
6261 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
6262 if (asoc->locked_on_sending) {
6264 * Locked to send out the
6267 struct sctp_stream_queue_pending *sp;
6269 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
6271 if ((sp->length == 0) && (sp->msg_is_complete == 0))
6272 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
6275 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
6276 if (TAILQ_EMPTY(&asoc->send_queue) &&
6277 TAILQ_EMPTY(&asoc->sent_queue) &&
6278 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
6280 atomic_add_int(&stcb->asoc.refcnt, 1);
6281 sctp_abort_an_association(stcb->sctp_ep, stcb,
6282 SCTP_RESPONSE_TO_USER_REQ,
6283 NULL, SCTP_SO_NOT_LOCKED);
6284 atomic_add_int(&stcb->asoc.refcnt, -1);
6285 goto no_chunk_output;
6287 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
6288 asoc->primary_destination);
6294 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
6295 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
6297 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
6298 (stcb->asoc.total_flight > 0) &&
6299 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
6301 do_chunk_output = 0;
6303 if (do_chunk_output)
6304 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
6305 else if (added_control) {
6306 int num_out = 0, reason = 0, now_filled = 0;
6310 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
6311 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
6312 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
6323 sctp_sendall_completes(void *ptr, uint32_t val)
6325 struct sctp_copy_all *ca;
6327 ca = (struct sctp_copy_all *)ptr;
6329 * Do a notify here? Kacheong suggests that the notify be done at
6330 * the send time.. so you would push up a notification if any send
6331 * failed. Don't know if this is feasable since the only failures we
6332 * have is "memory" related and if you cannot get an mbuf to send
6333 * the data you surely can't get an mbuf to send up to notify the
6334 * user you can't send the data :->
6337 /* now free everything */
6338 sctp_m_freem(ca->m);
6339 SCTP_FREE(ca, SCTP_M_COPYAL);
6343 #define MC_ALIGN(m, len) do { \
6344 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
6349 static struct mbuf *
6350 sctp_copy_out_all(struct uio *uio, int len)
6352 struct mbuf *ret, *at;
6353 int left, willcpy, cancpy, error;
6355 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
6361 SCTP_BUF_LEN(ret) = 0;
6362 /* save space for the data chunk header */
6363 cancpy = M_TRAILINGSPACE(ret);
6364 willcpy = min(cancpy, left);
6367 /* Align data to the end */
6368 error = uiomove(mtod(at, caddr_t), willcpy, uio);
6374 SCTP_BUF_LEN(at) = willcpy;
6375 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
6378 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
6379 if (SCTP_BUF_NEXT(at) == NULL) {
6382 at = SCTP_BUF_NEXT(at);
6383 SCTP_BUF_LEN(at) = 0;
6384 cancpy = M_TRAILINGSPACE(at);
6385 willcpy = min(cancpy, left);
6392 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
6393 struct sctp_sndrcvinfo *srcv)
6396 struct sctp_copy_all *ca;
6398 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
6402 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6405 memset(ca, 0, sizeof(struct sctp_copy_all));
6408 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
6410 * take off the sendall flag, it would be bad if we failed to do
6413 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
6414 /* get length and mbuf chain */
6416 ca->sndlen = uio->uio_resid;
6417 ca->m = sctp_copy_out_all(uio, ca->sndlen);
6418 if (ca->m == NULL) {
6419 SCTP_FREE(ca, SCTP_M_COPYAL);
6420 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
6424 /* Gather the length of the send */
6430 ca->sndlen += SCTP_BUF_LEN(m);
6431 m = SCTP_BUF_NEXT(m);
6435 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
6436 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
6437 SCTP_ASOC_ANY_STATE,
6439 sctp_sendall_completes, inp, 1);
6441 SCTP_PRINTF("Failed to initiate iterator for sendall\n");
6442 SCTP_FREE(ca, SCTP_M_COPYAL);
6443 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
6451 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
6453 struct sctp_tmit_chunk *chk, *nchk;
6455 chk = TAILQ_FIRST(&asoc->control_send_queue);
6457 nchk = TAILQ_NEXT(chk, sctp_next);
6458 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6459 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6461 sctp_m_freem(chk->data);
6464 asoc->ctrl_queue_cnt--;
6465 sctp_free_a_chunk(stcb, chk);
6472 sctp_toss_old_asconf(struct sctp_tcb *stcb)
6474 struct sctp_association *asoc;
6475 struct sctp_tmit_chunk *chk, *chk_tmp;
6476 struct sctp_asconf_chunk *acp;
6479 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue); chk != NULL;
6482 chk_tmp = TAILQ_NEXT(chk, sctp_next);
6483 /* find SCTP_ASCONF chunk in queue */
6484 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6486 acp = mtod(chk->data, struct sctp_asconf_chunk *);
6487 if (compare_with_wrap(ntohl(acp->serial_number), stcb->asoc.asconf_seq_out_acked, MAX_SEQ)) {
6492 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
6494 sctp_m_freem(chk->data);
6497 asoc->ctrl_queue_cnt--;
6498 sctp_free_a_chunk(stcb, chk);
6505 sctp_clean_up_datalist(struct sctp_tcb *stcb,
6507 struct sctp_association *asoc,
6508 struct sctp_tmit_chunk **data_list,
6510 struct sctp_nets *net)
6513 struct sctp_tmit_chunk *tp1;
6515 for (i = 0; i < bundle_at; i++) {
6516 /* off of the send queue */
6519 * Any chunk NOT 0 you zap the time chunk 0 gets
6520 * zapped or set based on if a RTO measurment is
6523 data_list[i]->do_rtt = 0;
6526 data_list[i]->sent_rcv_time = net->last_sent_time;
6527 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
6528 TAILQ_REMOVE(&asoc->send_queue,
6531 /* on to the sent queue */
6532 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
6533 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
6534 data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
6535 struct sctp_tmit_chunk *tpp;
6537 /* need to move back */
6539 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
6541 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
6545 if (compare_with_wrap(tp1->rec.data.TSN_seq,
6546 data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
6549 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
6551 TAILQ_INSERT_TAIL(&asoc->sent_queue,
6556 /* This does not lower until the cum-ack passes it */
6557 asoc->sent_queue_cnt++;
6558 asoc->send_queue_cnt--;
6559 if ((asoc->peers_rwnd <= 0) &&
6560 (asoc->total_flight == 0) &&
6562 /* Mark the chunk as being a window probe */
6563 SCTP_STAT_INCR(sctps_windowprobed);
6565 #ifdef SCTP_AUDITING_ENABLED
6566 sctp_audit_log(0xC2, 3);
6568 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6569 data_list[i]->snd_count = 1;
6570 data_list[i]->rec.data.chunk_was_revoked = 0;
6571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
6572 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6573 data_list[i]->whoTo->flight_size,
6574 data_list[i]->book_size,
6575 (uintptr_t) data_list[i]->whoTo,
6576 data_list[i]->rec.data.TSN_seq);
6578 sctp_flight_size_increase(data_list[i]);
6579 sctp_total_flight_increase(stcb, data_list[i]);
6580 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
6581 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6582 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
6584 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6585 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
6586 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6587 /* SWS sender side engages */
6588 asoc->peers_rwnd = 0;
6594 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
6596 struct sctp_tmit_chunk *chk, *nchk;
6598 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
6600 nchk = TAILQ_NEXT(chk, sctp_next);
6601 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6602 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
6603 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6604 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6605 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
6606 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6607 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6608 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6609 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6610 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6611 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6612 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6613 /* Stray chunks must be cleaned up */
6615 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6617 sctp_m_freem(chk->data);
6620 asoc->ctrl_queue_cnt--;
6621 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
6622 asoc->fwd_tsn_cnt--;
6623 sctp_free_a_chunk(stcb, chk);
6624 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6625 /* special handling, we must look into the param */
6626 if (chk != asoc->str_reset) {
6627 goto clean_up_anyway;
6635 sctp_can_we_split_this(struct sctp_tcb *stcb,
6637 uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
6640 * Make a decision on if I should split a msg into multiple parts.
6641 * This is only asked of incomplete messages.
6645 * If we are doing EEOR we need to always send it if its the
6646 * entire thing, since it might be all the guy is putting in
6649 if (goal_mtu >= length) {
6651 * If we have data outstanding,
6652 * we get another chance when the sack
6653 * arrives to transmit - wait for more data
6655 if (stcb->asoc.total_flight == 0) {
6657 * If nothing is in flight, we zero the
6665 /* You can fill the rest */
6670 * For those strange folk that make the send buffer
6671 * smaller than our fragmentation point, we can't
6672 * get a full msg in so we have to allow splitting.
6674 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
6677 if ((length <= goal_mtu) ||
6678 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
6679 /* Sub-optimial residual don't split in non-eeor mode. */
6683 * If we reach here length is larger than the goal_mtu. Do we wish
6684 * to split it for the sake of packet putting together?
6686 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
6687 /* Its ok to split it */
6688 return (min(goal_mtu, frag_point));
6690 /* Nope, can't split */
6696 sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
6697 struct sctp_stream_out *strq,
6699 uint32_t frag_point,
6705 /* Move from the stream to the send_queue keeping track of the total */
6706 struct sctp_association *asoc;
6707 struct sctp_stream_queue_pending *sp;
6708 struct sctp_tmit_chunk *chk;
6709 struct sctp_data_chunk *dchkh;
6710 uint32_t to_move, length;
6711 uint8_t rcv_flags = 0;
6713 uint8_t send_lock_up = 0;
6715 SCTP_TCB_LOCK_ASSERT(stcb);
6718 /* sa_ignore FREED_MEMORY */
6719 sp = TAILQ_FIRST(&strq->outqueue);
6722 if (send_lock_up == 0) {
6723 SCTP_TCB_SEND_LOCK(stcb);
6726 sp = TAILQ_FIRST(&strq->outqueue);
6730 if (strq->last_msg_incomplete) {
6731 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
6733 strq->last_msg_incomplete);
6734 strq->last_msg_incomplete = 0;
6738 SCTP_TCB_SEND_UNLOCK(stcb);
6743 if ((sp->msg_is_complete) && (sp->length == 0)) {
6744 if (sp->sender_all_done) {
6746 * We are doing differed cleanup. Last time through
6747 * when we took all the data the sender_all_done was
6750 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
6751 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
6752 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
6753 sp->sender_all_done,
6755 sp->msg_is_complete,
6759 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) {
6760 SCTP_TCB_SEND_LOCK(stcb);
6763 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
6764 TAILQ_REMOVE(&strq->outqueue, sp, next);
6765 sctp_free_remote_addr(sp->net);
6767 sctp_m_freem(sp->data);
6770 sctp_free_a_strmoq(stcb, sp);
6771 /* we can't be locked to it */
6773 stcb->asoc.locked_on_sending = NULL;
6775 SCTP_TCB_SEND_UNLOCK(stcb);
6778 /* back to get the next msg */
6782 * sender just finished this but still holds a
6791 /* is there some to get */
6792 if (sp->length == 0) {
6798 } else if (sp->discard_rest) {
6799 if (send_lock_up == 0) {
6800 SCTP_TCB_SEND_LOCK(stcb);
6803 /* Whack down the size */
6804 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
6805 if ((stcb->sctp_socket != NULL) && \
6806 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6807 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
6808 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
6811 sctp_m_freem(sp->data);
6813 sp->tail_mbuf = NULL;
6823 some_taken = sp->some_taken;
6824 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
6825 sp->msg_is_complete = 1;
6828 length = sp->length;
6829 if (sp->msg_is_complete) {
6830 /* The message is complete */
6831 to_move = min(length, frag_point);
6832 if (to_move == length) {
6833 /* All of it fits in the MTU */
6834 if (sp->some_taken) {
6835 rcv_flags |= SCTP_DATA_LAST_FRAG;
6836 sp->put_last_out = 1;
6838 rcv_flags |= SCTP_DATA_NOT_FRAG;
6839 sp->put_last_out = 1;
6842 /* Not all of it fits, we fragment */
6843 if (sp->some_taken == 0) {
6844 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6849 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
6852 * We use a snapshot of length in case it
6853 * is expanding during the compare.
6858 if (to_move >= llen) {
6860 if (send_lock_up == 0) {
6862 * We are taking all of an incomplete msg
6863 * thus we need a send lock.
6865 SCTP_TCB_SEND_LOCK(stcb);
6867 if (sp->msg_is_complete) {
6869 * the sender finished the
6876 if (sp->some_taken == 0) {
6877 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6881 /* Nothing to take. */
6882 if (sp->some_taken) {
6891 /* If we reach here, we can copy out a chunk */
6892 sctp_alloc_a_chunk(stcb, chk);
6894 /* No chunk memory */
6900 * Setup for unordered if needed by looking at the user sent info
6903 if (sp->sinfo_flags & SCTP_UNORDERED) {
6904 rcv_flags |= SCTP_DATA_UNORDERED;
6906 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) ||
6907 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) {
6908 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
6910 /* clear out the chunk before setting up */
6911 memset(chk, 0, sizeof(*chk));
6912 chk->rec.data.rcv_flags = rcv_flags;
6914 if (to_move >= length) {
6915 /* we think we can steal the whole thing */
6916 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
6917 SCTP_TCB_SEND_LOCK(stcb);
6920 if (to_move < sp->length) {
6921 /* bail, it changed */
6924 chk->data = sp->data;
6925 chk->last_mbuf = sp->tail_mbuf;
6926 /* register the stealing */
6927 sp->data = sp->tail_mbuf = NULL;
6932 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
6933 chk->last_mbuf = NULL;
6934 if (chk->data == NULL) {
6935 sp->some_taken = some_taken;
6936 sctp_free_a_chunk(stcb, chk);
6941 #ifdef SCTP_MBUF_LOGGING
6942 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6947 if (SCTP_BUF_IS_EXTENDED(mat)) {
6948 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
6950 mat = SCTP_BUF_NEXT(mat);
6954 /* Pull off the data */
6955 m_adj(sp->data, to_move);
6956 /* Now lets work our way down and compact it */
6958 while (m && (SCTP_BUF_LEN(m) == 0)) {
6959 sp->data = SCTP_BUF_NEXT(m);
6960 SCTP_BUF_NEXT(m) = NULL;
6961 if (sp->tail_mbuf == m) {
6963 * Freeing tail? TSNH since
6964 * we supposedly were taking less
6965 * than the sp->length.
6968 panic("Huh, freing tail? - TSNH");
6970 SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
6971 sp->tail_mbuf = sp->data = NULL;
6980 if (SCTP_BUF_IS_EXTENDED(chk->data)) {
6981 chk->copy_by_ref = 1;
6983 chk->copy_by_ref = 0;
6986 * get last_mbuf and counts of mb useage This is ugly but hopefully
6987 * its only one mbuf.
6989 if (chk->last_mbuf == NULL) {
6990 chk->last_mbuf = chk->data;
6991 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
6992 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
6995 if (to_move > length) {
6996 /*- This should not happen either
6997 * since we always lower to_move to the size
6998 * of sp->length if its larger.
7001 panic("Huh, how can to_move be larger?");
7003 SCTP_PRINTF("Huh, how can to_move be larger?\n");
7007 atomic_subtract_int(&sp->length, to_move);
7009 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) {
7010 /* Not enough room for a chunk header, get some */
7013 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
7016 * we're in trouble here. _PREPEND below will free
7017 * all the data if there is no leading space, so we
7018 * must put the data back and restore.
7020 if (send_lock_up == 0) {
7021 SCTP_TCB_SEND_LOCK(stcb);
7024 if (chk->data == NULL) {
7025 /* unsteal the data */
7026 sp->data = chk->data;
7027 sp->tail_mbuf = chk->last_mbuf;
7031 /* reassemble the data */
7033 sp->data = chk->data;
7034 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
7036 sp->some_taken = some_taken;
7037 atomic_add_int(&sp->length, to_move);
7040 sctp_free_a_chunk(stcb, chk);
7044 SCTP_BUF_LEN(m) = 0;
7045 SCTP_BUF_NEXT(m) = chk->data;
7047 M_ALIGN(chk->data, 4);
7050 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
7051 if (chk->data == NULL) {
7052 /* HELP, TSNH since we assured it would not above? */
7054 panic("prepend failes HELP?");
7056 SCTP_PRINTF("prepend fails HELP?\n");
7057 sctp_free_a_chunk(stcb, chk);
7063 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
7064 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk));
7065 chk->book_size_scale = 0;
7066 chk->sent = SCTP_DATAGRAM_UNSENT;
7069 chk->asoc = &stcb->asoc;
7070 chk->pad_inplace = 0;
7071 chk->no_fr_allowed = 0;
7072 chk->rec.data.stream_seq = sp->strseq;
7073 chk->rec.data.stream_number = sp->stream;
7074 chk->rec.data.payloadtype = sp->ppid;
7075 chk->rec.data.context = sp->context;
7076 chk->rec.data.doing_fast_retransmit = 0;
7077 chk->rec.data.ect_nonce = 0; /* ECN Nonce */
7079 chk->rec.data.timetodrop = sp->ts;
7080 chk->flags = sp->act_flags;
7083 atomic_add_int(&chk->whoTo->ref_count, 1);
7085 if (sp->holds_key_ref) {
7086 chk->auth_keyid = sp->auth_keyid;
7087 sctp_auth_key_acquire(stcb, chk->auth_keyid);
7088 chk->holds_key_ref = 1;
7090 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
7091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
7092 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
7093 (uintptr_t) stcb, sp->length,
7094 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
7095 chk->rec.data.TSN_seq);
7097 dchkh = mtod(chk->data, struct sctp_data_chunk *);
7099 * Put the rest of the things in place now. Size was done earlier in
7100 * previous loop prior to padding.
7103 #ifdef SCTP_ASOCLOG_OF_TSNS
7104 SCTP_TCB_LOCK_ASSERT(stcb);
7105 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
7106 asoc->tsn_out_at = 0;
7107 asoc->tsn_out_wrapped = 1;
7109 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
7110 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
7111 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
7112 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
7113 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
7114 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
7115 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
7116 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
7120 dchkh->ch.chunk_type = SCTP_DATA;
7121 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
7122 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
7123 dchkh->dp.stream_id = htons(strq->stream_no);
7124 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
7125 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
7126 dchkh->ch.chunk_length = htons(chk->send_size);
7127 /* Now advance the chk->send_size by the actual pad needed. */
7128 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
7133 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
7134 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
7135 chk->pad_inplace = 1;
7137 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
7138 /* pad added an mbuf */
7139 chk->last_mbuf = lm;
7141 chk->send_size += pads;
7143 /* We only re-set the policy if it is on */
7144 if (sp->pr_sctp_on) {
7145 sctp_set_prsctp_policy(sp);
7146 asoc->pr_sctp_cnt++;
7147 chk->pr_sctp_on = 1;
7149 chk->pr_sctp_on = 0;
7151 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
7152 /* All done pull and kill the message */
7153 atomic_subtract_int(&asoc->stream_queue_cnt, 1);
7154 if (sp->put_last_out == 0) {
7155 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
7156 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
7157 sp->sender_all_done,
7159 sp->msg_is_complete,
7163 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
7164 SCTP_TCB_SEND_LOCK(stcb);
7167 TAILQ_REMOVE(&strq->outqueue, sp, next);
7168 sctp_free_remote_addr(sp->net);
7170 sctp_m_freem(sp->data);
7173 sctp_free_a_strmoq(stcb, sp);
7175 /* we can't be locked to it */
7177 stcb->asoc.locked_on_sending = NULL;
7179 /* more to go, we are locked */
7182 asoc->chunks_on_out_queue++;
7183 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
7184 asoc->send_queue_cnt++;
7187 SCTP_TCB_SEND_UNLOCK(stcb);
7194 static struct sctp_stream_out *
7195 sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
7197 struct sctp_stream_out *strq;
7199 /* Find the next stream to use */
7200 if (asoc->last_out_stream == NULL) {
7201 strq = TAILQ_FIRST(&asoc->out_wheel);
7203 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
7205 strq = TAILQ_FIRST(&asoc->out_wheel);
7213 sctp_fill_outqueue(struct sctp_tcb *stcb,
7214 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now)
7216 struct sctp_association *asoc;
7217 struct sctp_stream_out *strq, *strqn;
7218 int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
7220 struct sctp_stream_queue_pending *sp;
7222 SCTP_TCB_LOCK_ASSERT(stcb);
7225 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
7226 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7228 /* ?? not sure what else to do */
7229 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7232 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
7234 /* Need an allowance for the data chunk header too */
7235 goal_mtu -= sizeof(struct sctp_data_chunk);
7237 /* must make even word boundary */
7238 goal_mtu &= 0xfffffffc;
7239 if (asoc->locked_on_sending) {
7240 /* We are stuck on one stream until the message completes. */
7241 strqn = strq = asoc->locked_on_sending;
7244 strqn = strq = sctp_select_a_stream(stcb, asoc);
7248 while ((goal_mtu > 0) && strq) {
7249 sp = TAILQ_FIRST(&strq->outqueue);
7251 * If CMT is off, we must validate that the stream in
7252 * question has the first item pointed towards are network
7253 * destionation requested by the caller. Note that if we
7254 * turn out to be locked to a stream (assigning TSN's then
7255 * we must stop, since we cannot look for another stream
7256 * with data to send to that destination). In CMT's case, by
7257 * skipping this check, we will send one data packet towards
7258 * the requested net.
7263 if ((sp->net != net) &&
7264 (asoc->sctp_cmt_on_off == 0)) {
7265 /* none for this network */
7269 strq = sctp_select_a_stream(stcb, asoc);
7273 if (strqn == strq) {
7274 /* I have circled */
7282 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked,
7283 &giveup, eeor_mode, &bail);
7285 asoc->last_out_stream = strq;
7288 asoc->locked_on_sending = strq;
7289 if ((moved_how_much == 0) || (giveup) || bail)
7290 /* no more to move for now */
7293 asoc->locked_on_sending = NULL;
7294 if (TAILQ_EMPTY(&strq->outqueue)) {
7295 if (strq == strqn) {
7296 /* Must move start to next one */
7297 strqn = TAILQ_NEXT(strq, next_spoke);
7298 if (strqn == NULL) {
7299 strqn = TAILQ_FIRST(&asoc->out_wheel);
7300 if (strqn == NULL) {
7305 sctp_remove_from_wheel(stcb, asoc, strq, 0);
7307 if ((giveup) || bail) {
7310 strq = sctp_select_a_stream(stcb, asoc);
7315 total_moved += moved_how_much;
7316 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
7317 goal_mtu &= 0xfffffffc;
7322 if (total_moved == 0) {
7323 if ((stcb->asoc.sctp_cmt_on_off == 0) &&
7324 (net == stcb->asoc.primary_destination)) {
7325 /* ran dry for primary network net */
7326 SCTP_STAT_INCR(sctps_primary_randry);
7327 } else if (stcb->asoc.sctp_cmt_on_off == 1) {
7328 /* ran dry with CMT on */
7329 SCTP_STAT_INCR(sctps_cmt_randry);
7335 sctp_fix_ecn_echo(struct sctp_association *asoc)
7337 struct sctp_tmit_chunk *chk;
7339 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7340 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
7341 chk->sent = SCTP_DATAGRAM_UNSENT;
7347 sctp_move_to_an_alt(struct sctp_tcb *stcb,
7348 struct sctp_association *asoc,
7349 struct sctp_nets *net)
7351 struct sctp_tmit_chunk *chk;
7352 struct sctp_nets *a_net;
7354 SCTP_TCB_LOCK_ASSERT(stcb);
7356 * JRS 5/14/07 - If CMT PF is turned on, find an alternate
7357 * destination using the PF algorithm for finding alternate
7360 if ((asoc->sctp_cmt_on_off == 1) &&
7361 (asoc->sctp_cmt_pf > 0)) {
7362 a_net = sctp_find_alternate_net(stcb, net, 2);
7364 a_net = sctp_find_alternate_net(stcb, net, 0);
7366 if ((a_net != net) &&
7367 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
7369 * We only proceed if a valid alternate is found that is not
7370 * this one and is reachable. Here we must move all chunks
7371 * queued in the send queue off of the destination address
7374 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
7375 if (chk->whoTo == net) {
7376 /* Move the chunk to our alternate */
7377 sctp_free_remote_addr(chk->whoTo);
7379 atomic_add_int(&a_net->ref_count, 1);
7386 sctp_med_chunk_output(struct sctp_inpcb *inp,
7387 struct sctp_tcb *stcb,
7388 struct sctp_association *asoc,
7391 int control_only, int from_where,
7392 struct timeval *now, int *now_filled, int frag_point, int so_locked
7393 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
7399 * Ok this is the generic chunk service queue. we must do the
7400 * following: - Service the stream queue that is next, moving any
7401 * message (note I must get a complete message i.e. FIRST/MIDDLE and
7402 * LAST to the out queue in one pass) and assigning TSN's - Check to
7403 * see if the cwnd/rwnd allows any output, if so we go ahead and
7404 * fomulate and send the low level chunks. Making sure to combine
7405 * any control in the control chunk queue also.
7407 struct sctp_nets *net, *start_at, *old_start_at = NULL;
7408 struct mbuf *outchain, *endoutchain;
7409 struct sctp_tmit_chunk *chk, *nchk;
7411 /* temp arrays for unlinking */
7412 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7413 int no_fragmentflg, error;
7414 unsigned int max_rwnd_per_dest;
7415 int one_chunk, hbflag, skip_data_for_this_net;
7416 int asconf, cookie, no_out_cnt;
7417 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
7418 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
7420 uint32_t auth_offset = 0;
7421 struct sctp_auth_chunk *auth = NULL;
7422 uint16_t auth_keyid;
7423 int override_ok = 1;
7424 int data_auth_reqd = 0;
7427 * JRS 5/14/07 - Add flag for whether a heartbeat is sent to the
7434 auth_keyid = stcb->asoc.authinfo.active_keyid;
7436 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
7437 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
7438 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
7443 ctl_cnt = no_out_cnt = asconf = cookie = 0;
7445 * First lets prime the pump. For each destination, if there is room
7446 * in the flight size, attempt to pull an MTU's worth out of the
7447 * stream queues into the general send_queue
7449 #ifdef SCTP_AUDITING_ENABLED
7450 sctp_audit_log(0xC2, 2);
7452 SCTP_TCB_LOCK_ASSERT(stcb);
7454 if ((control_only) || (asoc->stream_reset_outstanding))
7459 /* Nothing to possible to send? */
7460 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7461 TAILQ_EMPTY(&asoc->asconf_send_queue) &&
7462 TAILQ_EMPTY(&asoc->send_queue) &&
7463 TAILQ_EMPTY(&asoc->out_wheel)) {
7467 if (asoc->peers_rwnd == 0) {
7468 /* No room in peers rwnd */
7470 if (asoc->total_flight > 0) {
7471 /* we are allowed one chunk in flight */
7475 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
7476 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
7477 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7479 * This for loop we are in takes in each net, if
7480 * its's got space in cwnd and has data sent to it
7481 * (when CMT is off) then it calls
7482 * sctp_fill_outqueue for the net. This gets data on
7483 * the send queue for that network.
7485 * In sctp_fill_outqueue TSN's are assigned and data is
7486 * copied out of the stream buffers. Note mostly
7487 * copy by reference (we hope).
7489 net->window_probe = 0;
7490 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) || (net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
7491 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7492 sctp_log_cwnd(stcb, net, 1,
7493 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7497 if ((asoc->sctp_cmt_on_off == 0) &&
7498 (net->ref_count < 2)) {
7499 /* nothing can be in queue for this guy */
7500 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7501 sctp_log_cwnd(stcb, net, 2,
7502 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7506 if (net->flight_size >= net->cwnd) {
7507 /* skip this network, no room - can't fill */
7508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7509 sctp_log_cwnd(stcb, net, 3,
7510 SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
7515 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
7517 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now);
7519 /* memory alloc failure */
7525 /* now service each destination and send out what we can for it */
7526 /* Nothing to send? */
7527 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
7528 (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) &&
7529 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
7533 if (asoc->sctp_cmt_on_off == 1) {
7534 /* get the last start point */
7535 start_at = asoc->last_net_cmt_send_started;
7536 if (start_at == NULL) {
7537 /* null so to beginning */
7538 start_at = TAILQ_FIRST(&asoc->nets);
7540 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
7541 if (start_at == NULL) {
7542 start_at = TAILQ_FIRST(&asoc->nets);
7545 asoc->last_net_cmt_send_started = start_at;
7547 start_at = TAILQ_FIRST(&asoc->nets);
7549 old_start_at = NULL;
7550 again_one_more_time:
7551 for (net = start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
7552 /* how much can we send? */
7553 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
7554 if (old_start_at && (old_start_at == net)) {
7555 /* through list ocmpletely. */
7559 if ((asoc->sctp_cmt_on_off == 0) && (net->ref_count < 2)) {
7561 * Ref-count of 1 so we cannot have data or control
7562 * queued to this address. Skip it (non-CMT).
7566 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
7567 (TAILQ_FIRST(&asoc->asconf_send_queue) == NULL) &&
7568 (net->flight_size >= net->cwnd)) {
7570 * Nothing on control or asconf and flight is full,
7571 * we can skip even in the CMT case.
7575 ctl_cnt = bundle_at = 0;
7576 endoutchain = outchain = NULL;
7579 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
7580 skip_data_for_this_net = 1;
7582 skip_data_for_this_net = 0;
7584 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
7586 * if we have a route and an ifp check to see if we
7587 * have room to send to this guy
7591 ifp = net->ro.ro_rt->rt_ifp;
7592 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
7593 SCTP_STAT_INCR(sctps_ifnomemqueued);
7594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
7595 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
7600 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
7602 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7606 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7616 if (mtu > asoc->peers_rwnd) {
7617 if (asoc->total_flight > 0) {
7618 /* We have a packet in flight somewhere */
7619 r_mtu = asoc->peers_rwnd;
7621 /* We are always allowed to send one MTU out */
7628 /************************/
7629 /* ASCONF transmission */
7630 /************************/
7631 /* Now first lets go through the asconf queue */
7632 for (chk = TAILQ_FIRST(&asoc->asconf_send_queue);
7634 nchk = TAILQ_NEXT(chk, sctp_next);
7635 if (chk->rec.chunk_id.id != SCTP_ASCONF) {
7638 if (chk->whoTo != net) {
7640 * No, not sent to the network we are
7645 if (chk->data == NULL) {
7648 if (chk->sent != SCTP_DATAGRAM_UNSENT &&
7649 chk->sent != SCTP_DATAGRAM_RESEND) {
7653 * if no AUTH is yet included and this chunk
7654 * requires it, make sure to account for it. We
7655 * don't apply the size until the AUTH chunk is
7656 * actually added below in case there is no room for
7657 * this chunk. NOTE: we overload the use of "omtu"
7660 if ((auth == NULL) &&
7661 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7662 stcb->asoc.peer_auth_chunks)) {
7663 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7666 /* Here we do NOT factor the r_mtu */
7667 if ((chk->send_size < (int)(mtu - omtu)) ||
7668 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7670 * We probably should glom the mbuf chain
7671 * from the chk->data for control but the
7672 * problem is it becomes yet one more level
7673 * of tracking to do if for some reason
7674 * output fails. Then I have got to
7675 * reconstruct the merged control chain.. el
7676 * yucko.. for now we take the easy way and
7680 * Add an AUTH chunk, if chunk requires it
7681 * save the offset into the chain for AUTH
7683 if ((auth == NULL) &&
7684 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7685 stcb->asoc.peer_auth_chunks))) {
7686 outchain = sctp_add_auth_chunk(outchain,
7691 chk->rec.chunk_id.id);
7692 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7694 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7695 (int)chk->rec.chunk_id.can_take_data,
7696 chk->send_size, chk->copy_by_ref);
7697 if (outchain == NULL) {
7699 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7702 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7703 /* update our MTU size */
7704 if (mtu > (chk->send_size + omtu))
7705 mtu -= (chk->send_size + omtu);
7708 to_out += (chk->send_size + omtu);
7709 /* Do clear IP_DF ? */
7710 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7713 if (chk->rec.chunk_id.can_take_data)
7716 * set hb flag since we can use these for
7722 * should sysctl this: don't bundle data
7723 * with ASCONF since it requires AUTH
7726 chk->sent = SCTP_DATAGRAM_SENT;
7730 * Ok we are out of room but we can
7731 * output without effecting the
7732 * flight size since this little guy
7733 * is a control only packet.
7735 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7737 * do NOT clear the asconf flag as
7738 * it is used to do appropriate
7739 * source address selection.
7741 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7742 (struct sockaddr *)&net->ro._l_addr,
7743 outchain, auth_offset, auth,
7744 stcb->asoc.authinfo.active_keyid,
7745 no_fragmentflg, 0, NULL, asconf,
7746 inp->sctp_lport, stcb->rport,
7747 htonl(stcb->asoc.peer_vtag),
7748 net->port, so_locked, NULL))) {
7749 if (error == ENOBUFS) {
7750 asoc->ifp_had_enobuf = 1;
7751 SCTP_STAT_INCR(sctps_lowlevelerr);
7753 if (from_where == 0) {
7754 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7756 if (*now_filled == 0) {
7757 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7759 *now = net->last_sent_time;
7761 net->last_sent_time = *now;
7764 /* error, could not output */
7765 if (error == EHOSTUNREACH) {
7771 sctp_move_to_an_alt(stcb, asoc, net);
7776 asoc->ifp_had_enobuf = 0;
7777 if (*now_filled == 0) {
7778 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7780 *now = net->last_sent_time;
7782 net->last_sent_time = *now;
7786 * increase the number we sent, if a
7787 * cookie is sent we don't tell them
7790 outchain = endoutchain = NULL;
7794 *num_out += ctl_cnt;
7795 /* recalc a clean slate and setup */
7796 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7797 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7799 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
7806 /************************/
7807 /* Control transmission */
7808 /************************/
7809 /* Now first lets go through the control queue */
7810 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
7812 nchk = TAILQ_NEXT(chk, sctp_next);
7813 if (chk->whoTo != net) {
7815 * No, not sent to the network we are
7820 if (chk->data == NULL) {
7823 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
7825 * It must be unsent. Cookies and ASCONF's
7826 * hang around but there timers will force
7827 * when marked for resend.
7832 * if no AUTH is yet included and this chunk
7833 * requires it, make sure to account for it. We
7834 * don't apply the size until the AUTH chunk is
7835 * actually added below in case there is no room for
7836 * this chunk. NOTE: we overload the use of "omtu"
7839 if ((auth == NULL) &&
7840 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7841 stcb->asoc.peer_auth_chunks)) {
7842 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7845 /* Here we do NOT factor the r_mtu */
7846 if ((chk->send_size <= (int)(mtu - omtu)) ||
7847 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7849 * We probably should glom the mbuf chain
7850 * from the chk->data for control but the
7851 * problem is it becomes yet one more level
7852 * of tracking to do if for some reason
7853 * output fails. Then I have got to
7854 * reconstruct the merged control chain.. el
7855 * yucko.. for now we take the easy way and
7859 * Add an AUTH chunk, if chunk requires it
7860 * save the offset into the chain for AUTH
7862 if ((auth == NULL) &&
7863 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7864 stcb->asoc.peer_auth_chunks))) {
7865 outchain = sctp_add_auth_chunk(outchain,
7870 chk->rec.chunk_id.id);
7871 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7873 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
7874 (int)chk->rec.chunk_id.can_take_data,
7875 chk->send_size, chk->copy_by_ref);
7876 if (outchain == NULL) {
7878 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
7881 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7882 /* update our MTU size */
7883 if (mtu > (chk->send_size + omtu))
7884 mtu -= (chk->send_size + omtu);
7887 to_out += (chk->send_size + omtu);
7888 /* Do clear IP_DF ? */
7889 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7892 if (chk->rec.chunk_id.can_take_data)
7894 /* Mark things to be removed, if needed */
7895 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7896 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
7897 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
7898 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
7899 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
7900 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
7901 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
7902 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
7903 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
7904 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
7905 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
7907 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
7910 * JRS 5/14/07 - Set the
7911 * flag to say a heartbeat
7916 /* remove these chunks at the end */
7917 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
7918 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
7919 /* turn off the timer */
7920 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
7921 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7922 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
7928 * Other chunks, since they have
7929 * timers running (i.e. COOKIE) we
7930 * just "trust" that it gets sent or
7934 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7938 chk->sent = SCTP_DATAGRAM_SENT;
7943 * Ok we are out of room but we can
7944 * output without effecting the
7945 * flight size since this little guy
7946 * is a control only packet.
7949 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7951 * do NOT clear the asconf
7952 * flag as it is used to do
7953 * appropriate source
7954 * address selection.
7958 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
7961 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7962 (struct sockaddr *)&net->ro._l_addr,
7965 stcb->asoc.authinfo.active_keyid,
7966 no_fragmentflg, 0, NULL, asconf,
7967 inp->sctp_lport, stcb->rport,
7968 htonl(stcb->asoc.peer_vtag),
7969 net->port, so_locked, NULL))) {
7970 if (error == ENOBUFS) {
7971 asoc->ifp_had_enobuf = 1;
7972 SCTP_STAT_INCR(sctps_lowlevelerr);
7974 if (from_where == 0) {
7975 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7977 /* error, could not output */
7979 if (*now_filled == 0) {
7980 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7982 *now = net->last_sent_time;
7984 net->last_sent_time = *now;
7988 if (error == EHOSTUNREACH) {
7994 sctp_move_to_an_alt(stcb, asoc, net);
7999 asoc->ifp_had_enobuf = 0;
8000 /* Only HB or ASCONF advances time */
8002 if (*now_filled == 0) {
8003 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8005 *now = net->last_sent_time;
8007 net->last_sent_time = *now;
8012 * increase the number we sent, if a
8013 * cookie is sent we don't tell them
8016 outchain = endoutchain = NULL;
8020 *num_out += ctl_cnt;
8021 /* recalc a clean slate and setup */
8022 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8023 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
8025 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
8032 /* JRI: if dest is in PF state, do not send data to it */
8033 if ((asoc->sctp_cmt_on_off == 1) &&
8034 (asoc->sctp_cmt_pf > 0) &&
8035 (net->dest_state & SCTP_ADDR_PF)) {
8038 if (net->flight_size >= net->cwnd) {
8041 if ((asoc->sctp_cmt_on_off == 1) &&
8042 (net->flight_size > max_rwnd_per_dest)) {
8045 /*********************/
8046 /* Data transmission */
8047 /*********************/
8049 * if AUTH for DATA is required and no AUTH has been added
8050 * yet, account for this in the mtu now... if no data can be
8051 * bundled, this adjustment won't matter anyways since the
8052 * packet will be going out...
8054 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
8055 stcb->asoc.peer_auth_chunks);
8056 if (data_auth_reqd && (auth == NULL)) {
8057 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8059 /* now lets add any data within the MTU constraints */
8060 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
8062 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
8063 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
8069 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
8070 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
8080 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) &&
8081 (skip_data_for_this_net == 0)) ||
8083 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
8084 if (no_data_chunks) {
8085 /* let only control go out */
8089 if (net->flight_size >= net->cwnd) {
8090 /* skip this net, no room for data */
8094 nchk = TAILQ_NEXT(chk, sctp_next);
8095 if (asoc->sctp_cmt_on_off == 1) {
8096 if (chk->whoTo != net) {
8098 * For CMT, steal the data
8099 * to this network if its
8102 sctp_free_remote_addr(chk->whoTo);
8104 atomic_add_int(&chk->whoTo->ref_count, 1);
8106 } else if (chk->whoTo != net) {
8107 /* No, not sent to this net */
8110 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
8112 * strange, we have a chunk that is
8113 * to big for its destination and
8114 * yet no fragment ok flag.
8115 * Something went wrong when the
8116 * PMTU changed...we did not mark
8117 * this chunk for some reason?? I
8118 * will fix it here by letting IP
8119 * fragment it for now and printing
8120 * a warning. This really should not
8123 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
8124 chk->send_size, mtu);
8125 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
8127 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
8128 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
8129 struct sctp_data_chunk *dchkh;
8131 dchkh = mtod(chk->data, struct sctp_data_chunk *);
8132 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
8134 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
8135 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
8136 /* ok we will add this one */
8139 * Add an AUTH chunk, if chunk
8140 * requires it, save the offset into
8141 * the chain for AUTH
8143 if (data_auth_reqd) {
8145 outchain = sctp_add_auth_chunk(outchain,
8151 auth_keyid = chk->auth_keyid;
8153 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8154 } else if (override_ok) {
8159 auth_keyid = chk->auth_keyid;
8161 } else if (auth_keyid != chk->auth_keyid) {
8169 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
8170 chk->send_size, chk->copy_by_ref);
8171 if (outchain == NULL) {
8172 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
8173 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8174 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8177 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
8180 /* upate our MTU size */
8181 /* Do clear IP_DF ? */
8182 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8185 /* unsigned subtraction of mtu */
8186 if (mtu > chk->send_size)
8187 mtu -= chk->send_size;
8190 /* unsigned subtraction of r_mtu */
8191 if (r_mtu > chk->send_size)
8192 r_mtu -= chk->send_size;
8196 to_out += chk->send_size;
8197 if ((to_out > mx_mtu) && no_fragmentflg) {
8199 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
8201 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
8205 chk->window_probe = 0;
8206 data_list[bundle_at++] = chk;
8207 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8211 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
8212 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
8213 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
8215 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
8217 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
8218 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
8228 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
8230 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
8231 if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
8232 data_list[0]->window_probe = 1;
8233 net->window_probe = 1;
8239 * Must be sent in order of the
8240 * TSN's (on a network)
8244 } /* for (chunk gather loop for this net) */
8245 } /* if asoc.state OPEN */
8247 /* Is there something to send for this destination? */
8249 /* We may need to start a control timer or two */
8251 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
8254 * do NOT clear the asconf flag as it is
8255 * used to do appropriate source address
8260 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
8263 /* must start a send timer if data is being sent */
8264 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8266 * no timer running on this destination
8269 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8270 } else if ((asoc->sctp_cmt_on_off == 1) &&
8271 (asoc->sctp_cmt_pf > 0) &&
8273 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) &&
8274 (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
8276 * JRS 5/14/07 - If a HB has been sent to a
8277 * PF destination and no T3 timer is
8278 * currently running, start the T3 timer to
8279 * track the HBs that were sent.
8281 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8283 /* Now send it, if there is anything to send :> */
8284 if ((error = sctp_lowlevel_chunk_output(inp,
8287 (struct sockaddr *)&net->ro._l_addr,
8296 inp->sctp_lport, stcb->rport,
8297 htonl(stcb->asoc.peer_vtag),
8298 net->port, so_locked, NULL))) {
8299 /* error, we could not output */
8300 if (error == ENOBUFS) {
8301 SCTP_STAT_INCR(sctps_lowlevelerr);
8302 asoc->ifp_had_enobuf = 1;
8304 if (from_where == 0) {
8305 SCTP_STAT_INCR(sctps_lowlevelerrusr);
8307 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
8309 if (*now_filled == 0) {
8310 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8312 *now = net->last_sent_time;
8314 net->last_sent_time = *now;
8318 if (error == EHOSTUNREACH) {
8320 * Destination went unreachable
8323 sctp_move_to_an_alt(stcb, asoc, net);
8327 * I add this line to be paranoid. As far as
8328 * I can tell the continue, takes us back to
8329 * the top of the for, but just to make sure
8330 * I will reset these again here.
8332 ctl_cnt = bundle_at = 0;
8333 continue; /* This takes us back to the
8334 * for() for the nets. */
8336 asoc->ifp_had_enobuf = 0;
8338 outchain = endoutchain = NULL;
8341 if (bundle_at || hbflag) {
8342 /* For data/asconf and hb set time */
8343 if (*now_filled == 0) {
8344 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
8346 *now = net->last_sent_time;
8348 net->last_sent_time = *now;
8352 *num_out += (ctl_cnt + bundle_at);
8355 /* setup for a RTO measurement */
8356 tsns_sent = data_list[0]->rec.data.TSN_seq;
8357 /* fill time if not already filled */
8358 if (*now_filled == 0) {
8359 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8361 *now = asoc->time_last_sent;
8363 asoc->time_last_sent = *now;
8365 data_list[0]->do_rtt = 1;
8366 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
8367 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
8368 if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
8369 if (net->flight_size < net->cwnd) {
8370 /* start or restart it */
8371 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8372 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8373 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
8375 SCTP_STAT_INCR(sctps_earlyfrstrout);
8376 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
8378 /* stop it if its running */
8379 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
8380 SCTP_STAT_INCR(sctps_earlyfrstpout);
8381 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
8382 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
8391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8392 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
8395 if (old_start_at == NULL) {
8396 old_start_at = start_at;
8397 start_at = TAILQ_FIRST(&asoc->nets);
8399 goto again_one_more_time;
8402 * At the end there should be no NON timed chunks hanging on this
8405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
8406 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
8408 if ((*num_out == 0) && (*reason_code == 0)) {
8413 sctp_clean_up_ctl(stcb, asoc);
8418 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
8421 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
8422 * the control chunk queue.
8424 struct sctp_chunkhdr *hdr;
8425 struct sctp_tmit_chunk *chk;
8428 SCTP_TCB_LOCK_ASSERT(stcb);
8429 sctp_alloc_a_chunk(stcb, chk);
8432 sctp_m_freem(op_err);
8435 chk->copy_by_ref = 0;
8436 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
8437 if (op_err == NULL) {
8438 sctp_free_a_chunk(stcb, chk);
8443 while (mat != NULL) {
8444 chk->send_size += SCTP_BUF_LEN(mat);
8445 mat = SCTP_BUF_NEXT(mat);
8447 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
8448 chk->rec.chunk_id.can_take_data = 1;
8449 chk->sent = SCTP_DATAGRAM_UNSENT;
8452 chk->asoc = &stcb->asoc;
8454 chk->whoTo = chk->asoc->primary_destination;
8455 atomic_add_int(&chk->whoTo->ref_count, 1);
8456 hdr = mtod(op_err, struct sctp_chunkhdr *);
8457 hdr->chunk_type = SCTP_OPERATION_ERROR;
8458 hdr->chunk_flags = 0;
8459 hdr->chunk_length = htons(chk->send_size);
8460 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
8463 chk->asoc->ctrl_queue_cnt++;
8467 sctp_send_cookie_echo(struct mbuf *m,
8469 struct sctp_tcb *stcb,
8470 struct sctp_nets *net)
8473 * pull out the cookie and put it at the front of the control chunk
8477 struct mbuf *cookie;
8478 struct sctp_paramhdr parm, *phdr;
8479 struct sctp_chunkhdr *hdr;
8480 struct sctp_tmit_chunk *chk;
8481 uint16_t ptype, plen;
8483 /* First find the cookie in the param area */
8485 at = offset + sizeof(struct sctp_init_chunk);
8487 SCTP_TCB_LOCK_ASSERT(stcb);
8489 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
8493 ptype = ntohs(phdr->param_type);
8494 plen = ntohs(phdr->param_length);
8495 if (ptype == SCTP_STATE_COOKIE) {
8498 /* found the cookie */
8499 if ((pad = (plen % 4))) {
8502 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
8503 if (cookie == NULL) {
8507 #ifdef SCTP_MBUF_LOGGING
8508 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8513 if (SCTP_BUF_IS_EXTENDED(mat)) {
8514 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8516 mat = SCTP_BUF_NEXT(mat);
8522 at += SCTP_SIZE32(plen);
8524 if (cookie == NULL) {
8525 /* Did not find the cookie */
8528 /* ok, we got the cookie lets change it into a cookie echo chunk */
8530 /* first the change from param to cookie */
8531 hdr = mtod(cookie, struct sctp_chunkhdr *);
8532 hdr->chunk_type = SCTP_COOKIE_ECHO;
8533 hdr->chunk_flags = 0;
8534 /* get the chunk stuff now and place it in the FRONT of the queue */
8535 sctp_alloc_a_chunk(stcb, chk);
8538 sctp_m_freem(cookie);
8541 chk->copy_by_ref = 0;
8542 chk->send_size = plen;
8543 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
8544 chk->rec.chunk_id.can_take_data = 0;
8545 chk->sent = SCTP_DATAGRAM_UNSENT;
8547 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8548 chk->asoc = &stcb->asoc;
8550 chk->whoTo = chk->asoc->primary_destination;
8551 atomic_add_int(&chk->whoTo->ref_count, 1);
8552 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
8553 chk->asoc->ctrl_queue_cnt++;
8558 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
8562 struct sctp_nets *net)
8565 * take a HB request and make it into a HB ack and send it.
8567 struct mbuf *outchain;
8568 struct sctp_chunkhdr *chdr;
8569 struct sctp_tmit_chunk *chk;
8573 /* must have a net pointer */
8576 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
8577 if (outchain == NULL) {
8578 /* gak out of memory */
8581 #ifdef SCTP_MBUF_LOGGING
8582 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8587 if (SCTP_BUF_IS_EXTENDED(mat)) {
8588 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8590 mat = SCTP_BUF_NEXT(mat);
8594 chdr = mtod(outchain, struct sctp_chunkhdr *);
8595 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
8596 chdr->chunk_flags = 0;
8597 if (chk_length % 4) {
8599 uint32_t cpthis = 0;
8602 padlen = 4 - (chk_length % 4);
8603 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
8605 sctp_alloc_a_chunk(stcb, chk);
8608 sctp_m_freem(outchain);
8611 chk->copy_by_ref = 0;
8612 chk->send_size = chk_length;
8613 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
8614 chk->rec.chunk_id.can_take_data = 1;
8615 chk->sent = SCTP_DATAGRAM_UNSENT;
8618 chk->asoc = &stcb->asoc;
8619 chk->data = outchain;
8621 atomic_add_int(&chk->whoTo->ref_count, 1);
8622 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8623 chk->asoc->ctrl_queue_cnt++;
8627 sctp_send_cookie_ack(struct sctp_tcb *stcb)
8629 /* formulate and queue a cookie-ack back to sender */
8630 struct mbuf *cookie_ack;
8631 struct sctp_chunkhdr *hdr;
8632 struct sctp_tmit_chunk *chk;
8635 SCTP_TCB_LOCK_ASSERT(stcb);
8637 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
8638 if (cookie_ack == NULL) {
8642 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
8643 sctp_alloc_a_chunk(stcb, chk);
8646 sctp_m_freem(cookie_ack);
8649 chk->copy_by_ref = 0;
8650 chk->send_size = sizeof(struct sctp_chunkhdr);
8651 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
8652 chk->rec.chunk_id.can_take_data = 1;
8653 chk->sent = SCTP_DATAGRAM_UNSENT;
8656 chk->asoc = &stcb->asoc;
8657 chk->data = cookie_ack;
8658 if (chk->asoc->last_control_chunk_from != NULL) {
8659 chk->whoTo = chk->asoc->last_control_chunk_from;
8661 chk->whoTo = chk->asoc->primary_destination;
8663 atomic_add_int(&chk->whoTo->ref_count, 1);
8664 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
8665 hdr->chunk_type = SCTP_COOKIE_ACK;
8666 hdr->chunk_flags = 0;
8667 hdr->chunk_length = htons(chk->send_size);
8668 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
8669 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8670 chk->asoc->ctrl_queue_cnt++;
8676 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
8678 /* formulate and queue a SHUTDOWN-ACK back to the sender */
8679 struct mbuf *m_shutdown_ack;
8680 struct sctp_shutdown_ack_chunk *ack_cp;
8681 struct sctp_tmit_chunk *chk;
8683 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8684 if (m_shutdown_ack == NULL) {
8688 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
8689 sctp_alloc_a_chunk(stcb, chk);
8692 sctp_m_freem(m_shutdown_ack);
8695 chk->copy_by_ref = 0;
8696 chk->send_size = sizeof(struct sctp_chunkhdr);
8697 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
8698 chk->rec.chunk_id.can_take_data = 1;
8699 chk->sent = SCTP_DATAGRAM_UNSENT;
8702 chk->asoc = &stcb->asoc;
8703 chk->data = m_shutdown_ack;
8705 atomic_add_int(&net->ref_count, 1);
8707 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
8708 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
8709 ack_cp->ch.chunk_flags = 0;
8710 ack_cp->ch.chunk_length = htons(chk->send_size);
8711 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
8712 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8713 chk->asoc->ctrl_queue_cnt++;
8718 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
8720 /* formulate and queue a SHUTDOWN to the sender */
8721 struct mbuf *m_shutdown;
8722 struct sctp_shutdown_chunk *shutdown_cp;
8723 struct sctp_tmit_chunk *chk;
8725 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8726 if (m_shutdown == NULL) {
8730 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
8731 sctp_alloc_a_chunk(stcb, chk);
8734 sctp_m_freem(m_shutdown);
8737 chk->copy_by_ref = 0;
8738 chk->send_size = sizeof(struct sctp_shutdown_chunk);
8739 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
8740 chk->rec.chunk_id.can_take_data = 1;
8741 chk->sent = SCTP_DATAGRAM_UNSENT;
8744 chk->asoc = &stcb->asoc;
8745 chk->data = m_shutdown;
8747 atomic_add_int(&net->ref_count, 1);
8749 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
8750 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
8751 shutdown_cp->ch.chunk_flags = 0;
8752 shutdown_cp->ch.chunk_length = htons(chk->send_size);
8753 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
8754 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
8755 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8756 chk->asoc->ctrl_queue_cnt++;
8761 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
8764 * formulate and queue an ASCONF to the peer. ASCONF parameters
8765 * should be queued on the assoc queue.
8767 struct sctp_tmit_chunk *chk;
8768 struct mbuf *m_asconf;
8771 SCTP_TCB_LOCK_ASSERT(stcb);
8773 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
8774 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
8775 /* can't send a new one if there is one in flight already */
8778 /* compose an ASCONF chunk, maximum length is PMTU */
8779 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
8780 if (m_asconf == NULL) {
8783 sctp_alloc_a_chunk(stcb, chk);
8786 sctp_m_freem(m_asconf);
8789 chk->copy_by_ref = 0;
8790 chk->data = m_asconf;
8791 chk->send_size = len;
8792 chk->rec.chunk_id.id = SCTP_ASCONF;
8793 chk->rec.chunk_id.can_take_data = 0;
8794 chk->sent = SCTP_DATAGRAM_UNSENT;
8796 chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
8797 chk->asoc = &stcb->asoc;
8799 atomic_add_int(&chk->whoTo->ref_count, 1);
8800 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
8801 chk->asoc->ctrl_queue_cnt++;
8806 sctp_send_asconf_ack(struct sctp_tcb *stcb)
8809 * formulate and queue a asconf-ack back to sender. the asconf-ack
8810 * must be stored in the tcb.
8812 struct sctp_tmit_chunk *chk;
8813 struct sctp_asconf_ack *ack, *latest_ack;
8814 struct mbuf *m_ack, *m;
8815 struct sctp_nets *net = NULL;
8817 SCTP_TCB_LOCK_ASSERT(stcb);
8818 /* Get the latest ASCONF-ACK */
8819 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
8820 if (latest_ack == NULL) {
8823 if (latest_ack->last_sent_to != NULL &&
8824 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
8825 /* we're doing a retransmission */
8826 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
8829 if (stcb->asoc.last_control_chunk_from == NULL)
8830 net = stcb->asoc.primary_destination;
8832 net = stcb->asoc.last_control_chunk_from;
8836 if (stcb->asoc.last_control_chunk_from == NULL)
8837 net = stcb->asoc.primary_destination;
8839 net = stcb->asoc.last_control_chunk_from;
8841 latest_ack->last_sent_to = net;
8843 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
8844 if (ack->data == NULL) {
8847 /* copy the asconf_ack */
8848 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_DONTWAIT);
8849 if (m_ack == NULL) {
8850 /* couldn't copy it */
8853 #ifdef SCTP_MBUF_LOGGING
8854 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
8859 if (SCTP_BUF_IS_EXTENDED(mat)) {
8860 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
8862 mat = SCTP_BUF_NEXT(mat);
8867 sctp_alloc_a_chunk(stcb, chk);
8871 sctp_m_freem(m_ack);
8874 chk->copy_by_ref = 0;
8881 chk->send_size = ack->len;
8882 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
8883 chk->rec.chunk_id.can_take_data = 1;
8884 chk->sent = SCTP_DATAGRAM_UNSENT;
8886 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */
8887 chk->asoc = &stcb->asoc;
8888 atomic_add_int(&chk->whoTo->ref_count, 1);
8890 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
8891 chk->asoc->ctrl_queue_cnt++;
8898 sctp_chunk_retransmission(struct sctp_inpcb *inp,
8899 struct sctp_tcb *stcb,
8900 struct sctp_association *asoc,
8901 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
8902 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
8908 * send out one MTU of retransmission. If fast_retransmit is
8909 * happening we ignore the cwnd. Otherwise we obey the cwnd and
8910 * rwnd. For a Cookie or Asconf in the control chunk queue we
8911 * retransmit them by themselves.
8913 * For data chunks we will pick out the lowest TSN's in the sent_queue
8914 * marked for resend and bundle them all together (up to a MTU of
8915 * destination). The address to send to should have been
8916 * selected/changed where the retransmission was marked (i.e. in FR
8917 * or t3-timeout routines).
8919 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
8920 struct sctp_tmit_chunk *chk, *fwd;
8921 struct mbuf *m, *endofchain;
8922 struct sctp_nets *net = NULL;
8923 uint32_t tsns_sent = 0;
8924 int no_fragmentflg, bundle_at, cnt_thru;
8926 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
8927 struct sctp_auth_chunk *auth = NULL;
8928 uint32_t auth_offset = 0;
8929 uint16_t auth_keyid;
8930 int override_ok = 1;
8931 int data_auth_reqd = 0;
8934 SCTP_TCB_LOCK_ASSERT(stcb);
8935 tmr_started = ctl_cnt = bundle_at = error = 0;
8940 endofchain = m = NULL;
8941 auth_keyid = stcb->asoc.authinfo.active_keyid;
8942 #ifdef SCTP_AUDITING_ENABLED
8943 sctp_audit_log(0xC3, 1);
8945 if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
8946 (TAILQ_EMPTY(&asoc->control_send_queue))) {
8947 SCTPDBG(SCTP_DEBUG_OUTPUT1, "SCTP hits empty queue with cnt set to %d?\n",
8948 asoc->sent_queue_retran_cnt);
8949 asoc->sent_queue_cnt = 0;
8950 asoc->sent_queue_cnt_removeable = 0;
8951 /* send back 0/0 so we enter normal transmission */
8955 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8956 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
8957 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
8958 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
8959 if (chk->sent != SCTP_DATAGRAM_RESEND) {
8962 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
8963 if (chk != asoc->str_reset) {
8965 * not eligible for retran if its
8972 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
8977 * Add an AUTH chunk, if chunk requires it save the
8978 * offset into the chain for AUTH
8980 if ((auth == NULL) &&
8981 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
8982 stcb->asoc.peer_auth_chunks))) {
8983 m = sctp_add_auth_chunk(m, &endofchain,
8984 &auth, &auth_offset,
8986 chk->rec.chunk_id.id);
8987 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
8989 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
8995 /* do we have control chunks to retransmit? */
8997 /* Start a timer no matter if we suceed or fail */
8998 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
8999 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
9000 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
9001 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
9002 chk->snd_count++; /* update our count */
9003 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
9004 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
9005 auth_offset, auth, stcb->asoc.authinfo.active_keyid,
9006 no_fragmentflg, 0, NULL, 0,
9007 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9008 chk->whoTo->port, so_locked, NULL))) {
9009 SCTP_STAT_INCR(sctps_lowlevelerr);
9012 m = endofchain = NULL;
9016 * We don't want to mark the net->sent time here since this
9017 * we use this for HB and retrans cannot measure RTT
9019 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
9021 chk->sent = SCTP_DATAGRAM_SENT;
9022 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
9026 /* Clean up the fwd-tsn list */
9027 sctp_clean_up_ctl(stcb, asoc);
9032 * Ok, it is just data retransmission we need to do or that and a
9033 * fwd-tsn with it all.
9035 if (TAILQ_EMPTY(&asoc->sent_queue)) {
9036 return (SCTP_RETRAN_DONE);
9038 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
9039 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
9040 /* not yet open, resend the cookie and that is it */
9043 #ifdef SCTP_AUDITING_ENABLED
9044 sctp_auditing(20, inp, stcb, NULL);
9046 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
9047 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
9048 if (chk->sent != SCTP_DATAGRAM_RESEND) {
9049 /* No, not sent to this net or not ready for rtx */
9052 if (chk->data == NULL) {
9053 printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
9054 chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
9057 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
9058 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
9059 /* Gak, we have exceeded max unlucky retran, abort! */
9060 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n",
9062 SCTP_BASE_SYSCTL(sctp_max_retran_chunk));
9063 atomic_add_int(&stcb->asoc.refcnt, 1);
9064 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, NULL, so_locked);
9065 SCTP_TCB_LOCK(stcb);
9066 atomic_subtract_int(&stcb->asoc.refcnt, 1);
9067 return (SCTP_RETRAN_EXIT);
9069 /* pick up the net */
9071 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9072 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
9074 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
9077 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
9078 /* No room in peers rwnd */
9081 tsn = asoc->last_acked_seq + 1;
9082 if (tsn == chk->rec.data.TSN_seq) {
9084 * we make a special exception for this
9085 * case. The peer has no rwnd but is missing
9086 * the lowest chunk.. which is probably what
9087 * is holding up the rwnd.
9089 goto one_chunk_around;
9094 if (asoc->peers_rwnd < mtu) {
9096 if ((asoc->peers_rwnd == 0) &&
9097 (asoc->total_flight == 0)) {
9098 chk->window_probe = 1;
9099 chk->whoTo->window_probe = 1;
9102 #ifdef SCTP_AUDITING_ENABLED
9103 sctp_audit_log(0xC3, 2);
9107 net->fast_retran_ip = 0;
9108 if (chk->rec.data.doing_fast_retransmit == 0) {
9110 * if no FR in progress skip destination that have
9111 * flight_size > cwnd.
9113 if (net->flight_size >= net->cwnd) {
9118 * Mark the destination net to have FR recovery
9122 net->fast_retran_ip = 1;
9126 * if no AUTH is yet included and this chunk requires it,
9127 * make sure to account for it. We don't apply the size
9128 * until the AUTH chunk is actually added below in case
9129 * there is no room for this chunk.
9131 if (data_auth_reqd && (auth == NULL)) {
9132 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9136 if ((chk->send_size <= (mtu - dmtu)) ||
9137 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
9138 /* ok we will add this one */
9139 if (data_auth_reqd) {
9141 m = sctp_add_auth_chunk(m,
9147 auth_keyid = chk->auth_keyid;
9149 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9150 } else if (override_ok) {
9151 auth_keyid = chk->auth_keyid;
9153 } else if (chk->auth_keyid != auth_keyid) {
9154 /* different keyid, so done bundling */
9158 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
9160 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9163 /* Do clear IP_DF ? */
9164 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9167 /* upate our MTU size */
9168 if (mtu > (chk->send_size + dmtu))
9169 mtu -= (chk->send_size + dmtu);
9172 data_list[bundle_at++] = chk;
9173 if (one_chunk && (asoc->total_flight <= 0)) {
9174 SCTP_STAT_INCR(sctps_windowprobed);
9177 if (one_chunk == 0) {
9179 * now are there anymore forward from chk to pick
9182 fwd = TAILQ_NEXT(chk, sctp_next);
9184 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
9185 /* Nope, not for retran */
9186 fwd = TAILQ_NEXT(fwd, sctp_next);
9189 if (fwd->whoTo != net) {
9190 /* Nope, not the net in question */
9191 fwd = TAILQ_NEXT(fwd, sctp_next);
9194 if (data_auth_reqd && (auth == NULL)) {
9195 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
9198 if (fwd->send_size <= (mtu - dmtu)) {
9199 if (data_auth_reqd) {
9201 m = sctp_add_auth_chunk(m,
9207 auth_keyid = fwd->auth_keyid;
9209 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9210 } else if (override_ok) {
9211 auth_keyid = fwd->auth_keyid;
9213 } else if (fwd->auth_keyid != auth_keyid) {
9221 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
9223 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
9226 /* Do clear IP_DF ? */
9227 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
9230 /* upate our MTU size */
9231 if (mtu > (fwd->send_size + dmtu))
9232 mtu -= (fwd->send_size + dmtu);
9235 data_list[bundle_at++] = fwd;
9236 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
9239 fwd = TAILQ_NEXT(fwd, sctp_next);
9241 /* can't fit so we are done */
9246 /* Is there something to send for this destination? */
9249 * No matter if we fail/or suceed we should start a
9250 * timer. A failure is like a lost IP packet :-)
9252 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9254 * no timer running on this destination
9257 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9260 /* Now lets send it, if there is anything to send :> */
9261 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
9262 (struct sockaddr *)&net->ro._l_addr, m,
9263 auth_offset, auth, auth_keyid,
9264 no_fragmentflg, 0, NULL, 0,
9265 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
9266 net->port, so_locked, NULL))) {
9267 /* error, we could not output */
9268 SCTP_STAT_INCR(sctps_lowlevelerr);
9271 m = endofchain = NULL;
9276 * We don't want to mark the net->sent time here
9277 * since this we use this for HB and retrans cannot
9280 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
9282 /* For auto-close */
9284 if (*now_filled == 0) {
9285 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
9286 *now = asoc->time_last_sent;
9289 asoc->time_last_sent = *now;
9291 *cnt_out += bundle_at;
9292 #ifdef SCTP_AUDITING_ENABLED
9293 sctp_audit_log(0xC4, bundle_at);
9296 tsns_sent = data_list[0]->rec.data.TSN_seq;
9298 for (i = 0; i < bundle_at; i++) {
9299 SCTP_STAT_INCR(sctps_sendretransdata);
9300 data_list[i]->sent = SCTP_DATAGRAM_SENT;
9302 * When we have a revoked data, and we
9303 * retransmit it, then we clear the revoked
9304 * flag since this flag dictates if we
9305 * subtracted from the fs
9307 if (data_list[i]->rec.data.chunk_was_revoked) {
9308 /* Deflate the cwnd */
9309 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
9310 data_list[i]->rec.data.chunk_was_revoked = 0;
9312 data_list[i]->snd_count++;
9313 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
9314 /* record the time */
9315 data_list[i]->sent_rcv_time = asoc->time_last_sent;
9316 if (data_list[i]->book_size_scale) {
9318 * need to double the book size on
9321 data_list[i]->book_size_scale = 0;
9323 * Since we double the booksize, we
9324 * must also double the output queue
9325 * size, since this get shrunk when
9326 * we free by this amount.
9328 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
9329 data_list[i]->book_size *= 2;
9333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
9334 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
9335 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
9337 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
9338 (uint32_t) (data_list[i]->send_size +
9339 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
9341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
9342 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
9343 data_list[i]->whoTo->flight_size,
9344 data_list[i]->book_size,
9345 (uintptr_t) data_list[i]->whoTo,
9346 data_list[i]->rec.data.TSN_seq);
9348 sctp_flight_size_increase(data_list[i]);
9349 sctp_total_flight_increase(stcb, data_list[i]);
9350 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
9351 /* SWS sender side engages */
9352 asoc->peers_rwnd = 0;
9355 (data_list[i]->rec.data.doing_fast_retransmit)) {
9356 SCTP_STAT_INCR(sctps_sendfastretrans);
9357 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
9358 (tmr_started == 0)) {
9360 * ok we just fast-retrans'd
9361 * the lowest TSN, i.e the
9362 * first on the list. In
9363 * this case we want to give
9364 * some more time to get a
9365 * SACK back without a
9368 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
9369 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
9370 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
9374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9375 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
9377 #ifdef SCTP_AUDITING_ENABLED
9378 sctp_auditing(21, inp, stcb, NULL);
9384 if (asoc->sent_queue_retran_cnt <= 0) {
9385 /* all done we have no more to retran */
9386 asoc->sent_queue_retran_cnt = 0;
9390 /* No more room in rwnd */
9393 /* stop the for loop here. we sent out a packet */
9401 sctp_timer_validation(struct sctp_inpcb *inp,
9402 struct sctp_tcb *stcb,
9403 struct sctp_association *asoc,
9406 struct sctp_nets *net;
9408 /* Validate that a timer is running somewhere */
9409 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9410 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
9411 /* Here is a timer */
9415 SCTP_TCB_LOCK_ASSERT(stcb);
9416 /* Gak, we did not have a timer somewhere */
9417 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
9418 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
9423 sctp_chunk_output(struct sctp_inpcb *inp,
9424 struct sctp_tcb *stcb,
9427 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
9433 * Ok this is the generic chunk service queue. we must do the
9435 * - See if there are retransmits pending, if so we must
9437 * - Service the stream queue that is next, moving any
9438 * message (note I must get a complete message i.e.
9439 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
9441 * - Check to see if the cwnd/rwnd allows any output, if so we
9442 * go ahead and fomulate and send the low level chunks. Making sure
9443 * to combine any control in the control chunk queue also.
9445 struct sctp_association *asoc;
9446 struct sctp_nets *net;
9447 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
9448 burst_cnt = 0, burst_limit = 0;
9452 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
9454 int fr_done, tot_frs = 0;
9457 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
9458 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
9464 SCTP_TCB_LOCK_ASSERT(stcb);
9466 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
9468 if ((un_sent <= 0) &&
9469 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
9470 (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
9471 (asoc->sent_queue_retran_cnt == 0)) {
9472 /* Nothing to do unless there is something to be sent left */
9476 * Do we have something to send, data or control AND a sack timer
9477 * running, if so piggy-back the sack.
9479 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
9480 sctp_send_sack(stcb);
9481 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
9483 while (asoc->sent_queue_retran_cnt) {
9485 * Ok, it is retransmission time only, we send out only ONE
9486 * packet with a single call off to the retran code.
9488 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
9490 * Special hook for handling cookiess discarded
9491 * by peer that carried data. Send cookie-ack only
9492 * and then the next call with get the retran's.
9494 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9496 &now, &now_filled, frag_point, so_locked);
9498 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
9499 /* if its not from a HB then do it */
9501 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
9507 * its from any other place, we don't allow retran
9508 * output (only control)
9513 /* Can't send anymore */
9515 * now lets push out control by calling med-level
9516 * output once. this assures that we WILL send HB's
9519 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
9521 &now, &now_filled, frag_point, so_locked);
9522 #ifdef SCTP_AUDITING_ENABLED
9523 sctp_auditing(8, inp, stcb, NULL);
9525 (void)sctp_timer_validation(inp, stcb, asoc, ret);
9530 * The count was off.. retran is not happening so do
9531 * the normal retransmission.
9533 #ifdef SCTP_AUDITING_ENABLED
9534 sctp_auditing(9, inp, stcb, NULL);
9536 if (ret == SCTP_RETRAN_EXIT) {
9541 if (from_where == SCTP_OUTPUT_FROM_T3) {
9542 /* Only one transmission allowed out of a timeout */
9543 #ifdef SCTP_AUDITING_ENABLED
9544 sctp_auditing(10, inp, stcb, NULL);
9546 /* Push out any control */
9547 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
9548 &now, &now_filled, frag_point, so_locked);
9551 if (tot_frs > asoc->max_burst) {
9552 /* Hit FR burst limit */
9555 if ((num_out == 0) && (ret == 0)) {
9557 /* No more retrans to send */
9561 #ifdef SCTP_AUDITING_ENABLED
9562 sctp_auditing(12, inp, stcb, NULL);
9564 /* Check for bad destinations, if they exist move chunks around. */
9565 burst_limit = asoc->max_burst;
9566 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
9567 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
9568 SCTP_ADDR_NOT_REACHABLE) {
9570 * if possible move things off of this address we
9571 * still may send below due to the dormant state but
9572 * we try to find an alternate address to send to
9573 * and if we have one we move all queued data on the
9574 * out wheel to this alternate address.
9576 if (net->ref_count > 1)
9577 sctp_move_to_an_alt(stcb, asoc, net);
9578 } else if ((asoc->sctp_cmt_on_off == 1) &&
9579 (asoc->sctp_cmt_pf > 0) &&
9580 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
9582 * JRS 5/14/07 - If CMT PF is on and the current
9583 * destination is in PF state, move all queued data
9584 * to an alternate desination.
9586 if (net->ref_count > 1)
9587 sctp_move_to_an_alt(stcb, asoc, net);
9590 * if ((asoc->sat_network) || (net->addr_is_local))
9591 * { burst_limit = asoc->max_burst *
9592 * SCTP_SAT_NETWORK_BURST_INCR; }
9594 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
9595 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
9597 * JRS - Use the congestion control
9598 * given in the congestion control
9601 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, burst_limit);
9602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9603 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
9605 SCTP_STAT_INCR(sctps_maxburstqueued);
9607 net->fast_retran_ip = 0;
9609 if (net->flight_size == 0) {
9610 /* Should be decaying the cwnd here */
9619 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
9620 &reason_code, 0, from_where,
9621 &now, &now_filled, frag_point, so_locked);
9623 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
9624 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9625 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
9627 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9628 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
9629 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
9633 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
9637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9638 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
9640 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
9645 * When nagle is on, we look at how much is un_sent, then
9646 * if its smaller than an MTU and we have data in
9649 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9650 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
9651 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
9652 (stcb->asoc.total_flight > 0)) {
9656 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
9657 TAILQ_EMPTY(&asoc->send_queue) &&
9658 TAILQ_EMPTY(&asoc->out_wheel)) {
9659 /* Nothing left to send */
9662 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
9663 /* Nothing left to send */
9666 } while (num_out && (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
9667 (burst_cnt < burst_limit)));
9669 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
9670 if (burst_cnt >= burst_limit) {
9671 SCTP_STAT_INCR(sctps_maxburstqueued);
9672 asoc->burst_limit_applied = 1;
9673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
9674 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
9677 asoc->burst_limit_applied = 0;
9680 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
9681 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
9683 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
9687 * Now we need to clean up the control chunk chain if a ECNE is on
9688 * it. It must be marked as UNSENT again so next call will continue
9689 * to send it until such time that we get a CWR, to remove it.
9691 if (stcb->asoc.ecn_echo_cnt_onq)
9692 sctp_fix_ecn_echo(asoc);
9698 sctp_output(inp, m, addr, control, p, flags)
9699 struct sctp_inpcb *inp;
9701 struct sockaddr *addr;
9702 struct mbuf *control;
9707 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9710 if (inp->sctp_socket == NULL) {
9711 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
9714 return (sctp_sosend(inp->sctp_socket,
9724 send_forward_tsn(struct sctp_tcb *stcb,
9725 struct sctp_association *asoc)
9727 struct sctp_tmit_chunk *chk;
9728 struct sctp_forward_tsn_chunk *fwdtsn;
9729 uint32_t advance_peer_ack_point;
9731 SCTP_TCB_LOCK_ASSERT(stcb);
9732 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9733 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
9734 /* mark it to unsent */
9735 chk->sent = SCTP_DATAGRAM_UNSENT;
9737 /* Do we correct its output location? */
9738 if (chk->whoTo != asoc->primary_destination) {
9739 sctp_free_remote_addr(chk->whoTo);
9740 chk->whoTo = asoc->primary_destination;
9741 atomic_add_int(&chk->whoTo->ref_count, 1);
9743 goto sctp_fill_in_rest;
9746 /* Ok if we reach here we must build one */
9747 sctp_alloc_a_chunk(stcb, chk);
9751 asoc->fwd_tsn_cnt++;
9752 chk->copy_by_ref = 0;
9753 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
9754 chk->rec.chunk_id.can_take_data = 0;
9757 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
9758 if (chk->data == NULL) {
9759 sctp_free_a_chunk(stcb, chk);
9762 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9763 chk->sent = SCTP_DATAGRAM_UNSENT;
9765 chk->whoTo = asoc->primary_destination;
9766 atomic_add_int(&chk->whoTo->ref_count, 1);
9767 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
9768 asoc->ctrl_queue_cnt++;
9771 * Here we go through and fill out the part that deals with
9772 * stream/seq of the ones we skip.
9774 SCTP_BUF_LEN(chk->data) = 0;
9776 struct sctp_tmit_chunk *at, *tp1, *last;
9777 struct sctp_strseq *strseq;
9778 unsigned int cnt_of_space, i, ovh;
9779 unsigned int space_needed;
9780 unsigned int cnt_of_skipped = 0;
9782 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
9783 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
9784 /* no more to look at */
9787 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9788 /* We don't report these */
9793 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
9794 (cnt_of_skipped * sizeof(struct sctp_strseq)));
9796 cnt_of_space = M_TRAILINGSPACE(chk->data);
9798 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9799 ovh = SCTP_MIN_OVERHEAD;
9801 ovh = SCTP_MIN_V4_OVERHEAD;
9803 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
9804 /* trim to a mtu size */
9805 cnt_of_space = asoc->smallest_mtu - ovh;
9807 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9808 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9809 0xff, 0, cnt_of_skipped,
9810 asoc->advanced_peer_ack_point);
9813 advance_peer_ack_point = asoc->advanced_peer_ack_point;
9814 if (cnt_of_space < space_needed) {
9816 * ok we must trim down the chunk by lowering the
9817 * advance peer ack point.
9819 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9820 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9821 0xff, 0xff, cnt_of_space,
9824 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
9825 cnt_of_skipped /= sizeof(struct sctp_strseq);
9827 * Go through and find the TSN that will be the one
9830 at = TAILQ_FIRST(&asoc->sent_queue);
9831 for (i = 0; i < cnt_of_skipped; i++) {
9832 tp1 = TAILQ_NEXT(at, sctp_next);
9835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
9836 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
9837 0xff, cnt_of_skipped, at->rec.data.TSN_seq,
9838 asoc->advanced_peer_ack_point);
9842 * last now points to last one I can report, update
9845 advance_peer_ack_point = last->rec.data.TSN_seq;
9846 space_needed = sizeof(struct sctp_forward_tsn_chunk) +
9847 cnt_of_skipped * sizeof(struct sctp_strseq);
9849 chk->send_size = space_needed;
9850 /* Setup the chunk */
9851 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
9852 fwdtsn->ch.chunk_length = htons(chk->send_size);
9853 fwdtsn->ch.chunk_flags = 0;
9854 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
9855 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
9856 SCTP_BUF_LEN(chk->data) = chk->send_size;
9859 * Move pointer to after the fwdtsn and transfer to the
9862 strseq = (struct sctp_strseq *)fwdtsn;
9864 * Now populate the strseq list. This is done blindly
9865 * without pulling out duplicate stream info. This is
9866 * inefficent but won't harm the process since the peer will
9867 * look at these in sequence and will thus release anything.
9868 * It could mean we exceed the PMTU and chop off some that
9869 * we could have included.. but this is unlikely (aka 1432/4
9870 * would mean 300+ stream seq's would have to be reported in
9871 * one FWD-TSN. With a bit of work we can later FIX this to
9872 * optimize and pull out duplcates.. but it does add more
9873 * overhead. So for now... not!
9875 at = TAILQ_FIRST(&asoc->sent_queue);
9876 for (i = 0; i < cnt_of_skipped; i++) {
9877 tp1 = TAILQ_NEXT(at, sctp_next);
9878 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
9879 /* We don't report these */
9884 if (at->rec.data.TSN_seq == advance_peer_ack_point) {
9885 at->rec.data.fwd_tsn_cnt = 0;
9887 strseq->stream = ntohs(at->rec.data.stream_number);
9888 strseq->sequence = ntohs(at->rec.data.stream_seq);
9898 sctp_send_sack(struct sctp_tcb *stcb)
9901 * Queue up a SACK or NR-SACK in the control queue.
9902 * We must first check to see if a SACK or NR-SACK is
9903 * somehow on the control queue.
9904 * If so, we will take and and remove the old one.
9906 struct sctp_association *asoc;
9907 struct sctp_tmit_chunk *chk, *a_chk;
9908 struct sctp_sack_chunk *sack;
9909 struct sctp_nr_sack_chunk *nr_sack;
9910 struct sctp_gap_ack_block *gap_descriptor;
9911 struct sack_track *selector;
9916 int limit_reached = 0;
9917 unsigned int i, sel_start, siz, j, starting_index;
9918 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
9921 uint32_t highest_tsn;
9925 if ((stcb->asoc.sctp_nr_sack_on_off == 1) &&
9926 (stcb->asoc.peer_supports_nr_sack == 1)) {
9927 type = SCTP_NR_SELECTIVE_ACK;
9929 type = SCTP_SELECTIVE_ACK;
9933 SCTP_TCB_LOCK_ASSERT(stcb);
9934 if (asoc->last_data_chunk_from == NULL) {
9935 /* Hmm we never received anything */
9938 sctp_slide_mapping_arrays(stcb);
9939 sctp_set_rwnd(stcb, asoc);
9940 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9941 if (chk->rec.chunk_id.id == type) {
9942 /* Hmm, found a sack already on queue, remove it */
9943 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
9944 asoc->ctrl_queue_cnt++;
9947 sctp_m_freem(a_chk->data);
9950 sctp_free_remote_addr(a_chk->whoTo);
9951 a_chk->whoTo = NULL;
9955 if (a_chk == NULL) {
9956 sctp_alloc_a_chunk(stcb, a_chk);
9957 if (a_chk == NULL) {
9958 /* No memory so we drop the idea, and set a timer */
9959 if (stcb->asoc.delayed_ack) {
9960 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
9961 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
9962 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
9963 stcb->sctp_ep, stcb, NULL);
9965 stcb->asoc.send_sack = 1;
9969 a_chk->copy_by_ref = 0;
9970 a_chk->rec.chunk_id.id = type;
9971 a_chk->rec.chunk_id.can_take_data = 1;
9973 /* Clear our pkt counts */
9974 asoc->data_pkts_seen = 0;
9977 a_chk->snd_count = 0;
9978 a_chk->send_size = 0; /* fill in later */
9979 a_chk->sent = SCTP_DATAGRAM_UNSENT;
9980 a_chk->whoTo = NULL;
9982 if ((asoc->numduptsns) ||
9983 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
9986 * Ok, we have some duplicates or the destination for the
9987 * sack is unreachable, lets see if we can select an
9988 * alternate than asoc->last_data_chunk_from
9990 if ((!(asoc->last_data_chunk_from->dest_state &
9991 SCTP_ADDR_NOT_REACHABLE)) &&
9992 (asoc->used_alt_onsack > asoc->numnets)) {
9993 /* We used an alt last time, don't this time */
9994 a_chk->whoTo = NULL;
9996 asoc->used_alt_onsack++;
9997 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
9999 if (a_chk->whoTo == NULL) {
10000 /* Nope, no alternate */
10001 a_chk->whoTo = asoc->last_data_chunk_from;
10002 asoc->used_alt_onsack = 0;
10006 * No duplicates so we use the last place we received data
10009 asoc->used_alt_onsack = 0;
10010 a_chk->whoTo = asoc->last_data_chunk_from;
10012 if (a_chk->whoTo) {
10013 atomic_add_int(&a_chk->whoTo->ref_count, 1);
10015 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
10016 highest_tsn = asoc->highest_tsn_inside_map;
10018 highest_tsn = asoc->highest_tsn_inside_nr_map;
10020 if (highest_tsn == asoc->cumulative_tsn) {
10022 if (type == SCTP_SELECTIVE_ACK) {
10023 space_req = sizeof(struct sctp_sack_chunk);
10025 space_req = sizeof(struct sctp_nr_sack_chunk);
10028 /* gaps get a cluster */
10029 space_req = MCLBYTES;
10031 /* Ok now lets formulate a MBUF with our sack */
10032 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
10033 if ((a_chk->data == NULL) ||
10034 (a_chk->whoTo == NULL)) {
10035 /* rats, no mbuf memory */
10037 /* was a problem with the destination */
10038 sctp_m_freem(a_chk->data);
10039 a_chk->data = NULL;
10041 sctp_free_a_chunk(stcb, a_chk);
10042 /* sa_ignore NO_NULL_CHK */
10043 if (stcb->asoc.delayed_ack) {
10044 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
10045 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
10046 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
10047 stcb->sctp_ep, stcb, NULL);
10049 stcb->asoc.send_sack = 1;
10053 /* ok, lets go through and fill it in */
10054 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
10055 space = M_TRAILINGSPACE(a_chk->data);
10056 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
10057 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
10059 limit = mtod(a_chk->data, caddr_t);
10062 /* 0x01 is used by nonce for ecn */
10063 if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
10064 (SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
10065 (asoc->peer_supports_ecn_nonce))
10066 flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
10070 if ((asoc->sctp_cmt_on_off == 1) &&
10071 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
10073 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
10074 * received, then set high bit to 1, else 0. Reset
10077 flags |= (asoc->cmt_dac_pkts_rcvd << 6);
10078 asoc->cmt_dac_pkts_rcvd = 0;
10080 #ifdef SCTP_ASOCLOG_OF_TSNS
10081 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
10082 stcb->asoc.cumack_log_atsnt++;
10083 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
10084 stcb->asoc.cumack_log_atsnt = 0;
10087 /* reset the readers interpretation */
10088 stcb->freed_by_sorcv_sincelast = 0;
10090 if (type == SCTP_SELECTIVE_ACK) {
10091 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
10093 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
10094 if (highest_tsn > asoc->mapping_array_base_tsn) {
10095 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10097 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
10101 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
10102 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
10103 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
10104 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10106 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
10110 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
10113 * The base TSN is intialized to be the first TSN the peer
10114 * will send us. If the cum-ack is behind this then when they
10115 * send us the next in sequence it will mark the base_tsn bit.
10116 * Thus we need to use the very first selector and the offset
10117 * is 1. Our table is built for this case.
10119 starting_index = 0;
10123 * we skip the first selector when the cum-ack is at or above the
10124 * mapping array base. This is because the bits at the base or above
10125 * are turned on and our first selector in the table assumes they are
10126 * off. We thus will use the second selector (first is 0). We use
10127 * the reverse of our macro to fix the offset, in bits, that our
10128 * table is at. Note that this method assumes that the cum-tsn is
10129 * within the first bit, i.e. its value is 0-7 which means the
10130 * result to our offset will be either a 0 - -7. If the cumack
10131 * is NOT in the first byte (0) (which it should be since we did
10132 * a mapping array slide above) then we need to calculate the starting
10133 * index i.e. which byte of the mapping array we should start at. We
10134 * do this by dividing by 8 and pushing the remainder (mod) into offset.
10135 * then we multiply the offset to be negative, since we need a negative
10136 * offset into the selector table.
10138 SCTP_CALC_TSN_TO_GAP(offset, asoc->cumulative_tsn, asoc->mapping_array_base_tsn);
10140 starting_index = offset / 8;
10141 offset = offset % 8;
10142 printf("Strange starting index is %d offset:%d (not 0/x)\n",
10143 starting_index, offset);
10145 starting_index = 0;
10147 /* We need a negative offset in our table */
10151 if (((type == SCTP_SELECTIVE_ACK) &&
10152 compare_with_wrap(highest_tsn, asoc->cumulative_tsn, MAX_TSN)) ||
10153 ((type == SCTP_NR_SELECTIVE_ACK) &&
10154 compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN))) {
10155 /* we have a gap .. maybe */
10156 for (i = starting_index; i < siz; i++) {
10157 if (type == SCTP_SELECTIVE_ACK) {
10158 selector = &sack_array[asoc->mapping_array[i] | asoc->nr_mapping_array[i]];
10160 selector = &sack_array[asoc->mapping_array[i]];
10162 if (mergeable && selector->right_edge) {
10164 * Backup, left and right edges were ok to
10170 if (selector->num_entries == 0)
10173 for (j = sel_start; j < selector->num_entries; j++) {
10174 if (mergeable && selector->right_edge) {
10176 * do a merge by NOT setting
10182 * no merge, set the left
10186 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10188 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10191 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10197 if (selector->left_edge) {
10201 if (limit_reached) {
10202 /* Reached the limit stop */
10209 if ((type == SCTP_NR_SELECTIVE_ACK) &&
10210 (limit_reached == 0)) {
10214 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn)
10215 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
10217 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
10219 if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
10222 * cum-ack behind the mapping array, so we start and use all
10227 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
10229 * we skip the first one when the cum-ack is at or above the
10230 * mapping array base. Note this only works if
10234 if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
10235 /* we have a gap .. maybe */
10236 for (i = 0; i < siz; i++) {
10237 selector = &sack_array[asoc->nr_mapping_array[i]];
10238 if (mergeable && selector->right_edge) {
10240 * Backup, left and right edges were
10243 num_nr_gap_blocks--;
10246 if (selector->num_entries == 0)
10249 for (j = sel_start; j < selector->num_entries; j++) {
10250 if (mergeable && selector->right_edge) {
10252 * do a merge by NOT
10259 * no merge, set the
10263 gap_descriptor->start = htons((selector->gaps[j].start + offset));
10265 gap_descriptor->end = htons((selector->gaps[j].end + offset));
10266 num_nr_gap_blocks++;
10268 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
10274 if (selector->left_edge) {
10278 if (limit_reached) {
10279 /* Reached the limit stop */
10287 /* now we must add any dups we are going to report. */
10288 if ((limit_reached == 0) && (asoc->numduptsns)) {
10289 dup = (uint32_t *) gap_descriptor;
10290 for (i = 0; i < asoc->numduptsns; i++) {
10291 *dup = htonl(asoc->dup_tsns[i]);
10294 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
10299 asoc->numduptsns = 0;
10302 * now that the chunk is prepared queue it to the control chunk
10305 if (type == SCTP_SELECTIVE_ACK) {
10306 a_chk->send_size = sizeof(struct sctp_sack_chunk) +
10307 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10308 num_dups * sizeof(int32_t);
10309 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10310 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10311 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
10312 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
10313 sack->sack.num_dup_tsns = htons(num_dups);
10314 sack->ch.chunk_type = type;
10315 sack->ch.chunk_flags = flags;
10316 sack->ch.chunk_length = htons(a_chk->send_size);
10318 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
10319 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
10320 num_dups * sizeof(int32_t);
10321 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
10322 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
10323 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
10324 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
10325 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
10326 nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
10327 nr_sack->nr_sack.reserved = 0;
10328 nr_sack->ch.chunk_type = type;
10329 nr_sack->ch.chunk_flags = flags;
10330 nr_sack->ch.chunk_length = htons(a_chk->send_size);
10332 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
10333 asoc->my_last_reported_rwnd = asoc->my_rwnd;
10334 asoc->ctrl_queue_cnt++;
10335 asoc->send_sack = 0;
10336 SCTP_STAT_INCR(sctps_sendsacks);
10341 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
10342 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
10347 struct mbuf *m_abort;
10348 struct mbuf *m_out = NULL, *m_end = NULL;
10349 struct sctp_abort_chunk *abort = NULL;
10351 uint32_t auth_offset = 0;
10352 struct sctp_auth_chunk *auth = NULL;
10355 * Add an AUTH chunk, if chunk requires it and save the offset into
10356 * the chain for AUTH
10358 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
10359 stcb->asoc.peer_auth_chunks)) {
10360 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
10361 stcb, SCTP_ABORT_ASSOCIATION);
10362 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10364 SCTP_TCB_LOCK_ASSERT(stcb);
10365 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
10366 if (m_abort == NULL) {
10369 sctp_m_freem(m_out);
10372 /* link in any error */
10373 SCTP_BUF_NEXT(m_abort) = operr;
10380 sz += SCTP_BUF_LEN(n);
10381 n = SCTP_BUF_NEXT(n);
10384 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
10385 if (m_out == NULL) {
10386 /* NO Auth chunk prepended, so reserve space in front */
10387 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
10390 /* Put AUTH chunk at the front of the chain */
10391 SCTP_BUF_NEXT(m_end) = m_abort;
10394 /* fill in the ABORT chunk */
10395 abort = mtod(m_abort, struct sctp_abort_chunk *);
10396 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
10397 abort->ch.chunk_flags = 0;
10398 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
10400 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
10401 stcb->asoc.primary_destination,
10402 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
10403 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, NULL, 0,
10404 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
10405 stcb->asoc.primary_destination->port, so_locked, NULL);
10406 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10410 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
10411 struct sctp_nets *net,
10414 /* formulate and SEND a SHUTDOWN-COMPLETE */
10415 struct mbuf *m_shutdown_comp;
10416 struct sctp_shutdown_complete_chunk *shutdown_complete;
10420 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
10421 if (m_shutdown_comp == NULL) {
10425 if (reflect_vtag) {
10426 flags = SCTP_HAD_NO_TCB;
10427 vtag = stcb->asoc.my_vtag;
10430 vtag = stcb->asoc.peer_vtag;
10432 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
10433 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10434 shutdown_complete->ch.chunk_flags = flags;
10435 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10436 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
10437 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
10438 (struct sockaddr *)&net->ro._l_addr,
10439 m_shutdown_comp, 0, NULL, 0, 1, 0, NULL, 0,
10440 stcb->sctp_ep->sctp_lport, stcb->rport,
10442 net->port, SCTP_SO_NOT_LOCKED, NULL);
10443 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10448 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh,
10449 uint32_t vrf_id, uint16_t port)
10451 /* formulate and SEND a SHUTDOWN-COMPLETE */
10452 struct mbuf *o_pak;
10454 struct ip *iph, *iph_out;
10455 struct udphdr *udp = NULL;
10458 struct ip6_hdr *ip6, *ip6_out;
10461 int offset_out, len, mlen;
10462 struct sctp_shutdown_complete_msg *comp_cp;
10464 iph = mtod(m, struct ip *);
10465 switch (iph->ip_v) {
10467 len = (sizeof(struct ip) + sizeof(struct sctp_shutdown_complete_msg));
10470 case IPV6_VERSION >> 4:
10471 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
10478 len += sizeof(struct udphdr);
10480 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
10481 if (mout == NULL) {
10484 SCTP_BUF_RESV_UF(mout, max_linkhdr);
10485 SCTP_BUF_LEN(mout) = len;
10486 SCTP_BUF_NEXT(mout) = NULL;
10493 switch (iph->ip_v) {
10495 iph_out = mtod(mout, struct ip *);
10497 /* Fill in the IP header for the ABORT */
10498 iph_out->ip_v = IPVERSION;
10499 iph_out->ip_hl = (sizeof(struct ip) / 4);
10500 iph_out->ip_tos = (u_char)0;
10501 iph_out->ip_id = 0;
10502 iph_out->ip_off = 0;
10503 iph_out->ip_ttl = MAXTTL;
10505 iph_out->ip_p = IPPROTO_UDP;
10507 iph_out->ip_p = IPPROTO_SCTP;
10509 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
10510 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
10512 /* let IP layer calculate this */
10513 iph_out->ip_sum = 0;
10514 offset_out += sizeof(*iph_out);
10515 comp_cp = (struct sctp_shutdown_complete_msg *)(
10516 (caddr_t)iph_out + offset_out);
10519 case IPV6_VERSION >> 4:
10520 ip6 = (struct ip6_hdr *)iph;
10521 ip6_out = mtod(mout, struct ip6_hdr *);
10523 /* Fill in the IPv6 header for the ABORT */
10524 ip6_out->ip6_flow = ip6->ip6_flow;
10525 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
10527 ip6_out->ip6_nxt = IPPROTO_UDP;
10529 ip6_out->ip6_nxt = IPPROTO_SCTP;
10531 ip6_out->ip6_src = ip6->ip6_dst;
10532 ip6_out->ip6_dst = ip6->ip6_src;
10534 * ?? The old code had both the iph len + payload, I think
10535 * this is wrong and would never have worked
10537 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
10538 offset_out += sizeof(*ip6_out);
10539 comp_cp = (struct sctp_shutdown_complete_msg *)(
10540 (caddr_t)ip6_out + offset_out);
10544 /* Currently not supported. */
10545 sctp_m_freem(mout);
10549 udp = (struct udphdr *)comp_cp;
10550 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
10551 udp->uh_dport = port;
10552 udp->uh_ulen = htons(sizeof(struct sctp_shutdown_complete_msg) + sizeof(struct udphdr));
10553 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
10554 offset_out += sizeof(struct udphdr);
10555 comp_cp = (struct sctp_shutdown_complete_msg *)((caddr_t)comp_cp + sizeof(struct udphdr));
10557 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
10559 sctp_m_freem(mout);
10562 /* Now copy in and fill in the ABORT tags etc. */
10563 comp_cp->sh.src_port = sh->dest_port;
10564 comp_cp->sh.dest_port = sh->src_port;
10565 comp_cp->sh.checksum = 0;
10566 comp_cp->sh.v_tag = sh->v_tag;
10567 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
10568 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
10569 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
10571 if (iph_out != NULL) {
10574 struct sctp_tcb *stcb = NULL;
10576 mlen = SCTP_BUF_LEN(mout);
10577 bzero(&ro, sizeof ro);
10578 /* set IPv4 length */
10579 iph_out->ip_len = mlen;
10580 #ifdef SCTP_PACKET_LOGGING
10581 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10582 sctp_packet_log(mout, mlen);
10585 comp_cp->sh.checksum = sctp_calculate_cksum(mout, offset_out);
10586 SCTP_STAT_INCR(sctps_sendswcrc);
10587 SCTP_ENABLE_UDP_CSUM(mout);
10589 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10590 mout->m_pkthdr.csum_data = 0;
10591 SCTP_STAT_INCR(sctps_sendhwcrc);
10593 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10595 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
10597 /* Free the route if we got one back */
10602 if (ip6_out != NULL) {
10603 struct route_in6 ro;
10605 struct sctp_tcb *stcb = NULL;
10606 struct ifnet *ifp = NULL;
10608 bzero(&ro, sizeof(ro));
10609 mlen = SCTP_BUF_LEN(mout);
10610 #ifdef SCTP_PACKET_LOGGING
10611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
10612 sctp_packet_log(mout, mlen);
10614 SCTP_ATTACH_CHAIN(o_pak, mout, mlen);
10616 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
10618 (stcb->asoc.loopback_scope))) {
10619 comp_cp->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
10620 SCTP_STAT_INCR(sctps_sendswcrc);
10622 SCTP_STAT_INCR(sctps_sendnocrc);
10624 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), mlen - sizeof(struct ip6_hdr))) == 0) {
10625 udp->uh_sum = 0xffff;
10628 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
10630 (stcb->asoc.loopback_scope))) {
10631 mout->m_pkthdr.csum_flags = CSUM_SCTP;
10632 mout->m_pkthdr.csum_data = 0;
10633 SCTP_STAT_INCR(sctps_sendhwcrc);
10635 SCTP_STAT_INCR(sctps_sendnocrc);
10638 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
10640 /* Free the route if we got one back */
10645 SCTP_STAT_INCR(sctps_sendpackets);
10646 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10647 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
10652 static struct sctp_nets *
10653 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
10655 struct sctp_nets *net, *hnet;
10656 int ms_goneby, highest_ms, state_overide = 0;
10658 (void)SCTP_GETTIME_TIMEVAL(now);
10661 SCTP_TCB_LOCK_ASSERT(stcb);
10662 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
10664 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
10665 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
10668 * Skip this guy from consideration if HB is off AND
10673 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
10674 /* skip this dest net from consideration */
10677 if (net->last_sent_time.tv_sec) {
10678 /* Sent to so we subtract */
10679 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
10681 /* Never been sent to */
10682 ms_goneby = 0x7fffffff;
10684 * When the address state is unconfirmed but still
10685 * considered reachable, we HB at a higher rate. Once it
10686 * goes confirmed OR reaches the "unreachable" state, thenw
10687 * we cut it back to HB at a more normal pace.
10689 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
10695 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
10696 (ms_goneby > highest_ms)) {
10697 highest_ms = ms_goneby;
10702 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
10708 if (hnet && highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
10710 * Found the one with longest delay bounds OR it is
10711 * unconfirmed and still not marked unreachable.
10713 SCTPDBG(SCTP_DEBUG_OUTPUT4, "net:%p is the hb winner -", hnet);
10716 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT4,
10717 (struct sockaddr *)&hnet->ro._l_addr);
10719 SCTPDBG(SCTP_DEBUG_OUTPUT4, " none\n");
10722 /* update the timer now */
10723 hnet->last_sent_time = *now;
10726 /* Nothing to HB */
10731 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
10733 struct sctp_tmit_chunk *chk;
10734 struct sctp_nets *net;
10735 struct sctp_heartbeat_chunk *hb;
10736 struct timeval now;
10737 struct sockaddr_in *sin;
10738 struct sockaddr_in6 *sin6;
10740 SCTP_TCB_LOCK_ASSERT(stcb);
10741 if (user_req == 0) {
10742 net = sctp_select_hb_destination(stcb, &now);
10745 * All our busy none to send to, just start the
10748 if (stcb->asoc.state == 0) {
10751 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
10762 (void)SCTP_GETTIME_TIMEVAL(&now);
10764 sin = (struct sockaddr_in *)&net->ro._l_addr;
10765 if (sin->sin_family != AF_INET) {
10766 if (sin->sin_family != AF_INET6) {
10771 sctp_alloc_a_chunk(stcb, chk);
10773 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
10776 chk->copy_by_ref = 0;
10777 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
10778 chk->rec.chunk_id.can_take_data = 1;
10779 chk->asoc = &stcb->asoc;
10780 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
10782 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10783 if (chk->data == NULL) {
10784 sctp_free_a_chunk(stcb, chk);
10787 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10788 SCTP_BUF_LEN(chk->data) = chk->send_size;
10789 chk->sent = SCTP_DATAGRAM_UNSENT;
10790 chk->snd_count = 0;
10792 atomic_add_int(&chk->whoTo->ref_count, 1);
10793 /* Now we have a mbuf that we can fill in with the details */
10794 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
10795 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
10796 /* fill out chunk header */
10797 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
10798 hb->ch.chunk_flags = 0;
10799 hb->ch.chunk_length = htons(chk->send_size);
10800 /* Fill out hb parameter */
10801 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
10802 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
10803 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
10804 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
10805 /* Did our user request this one, put it in */
10806 hb->heartbeat.hb_info.user_req = user_req;
10807 hb->heartbeat.hb_info.addr_family = sin->sin_family;
10808 hb->heartbeat.hb_info.addr_len = sin->sin_len;
10809 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
10811 * we only take from the entropy pool if the address is not
10814 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
10815 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
10817 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
10818 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
10820 if (sin->sin_family == AF_INET) {
10821 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
10822 } else if (sin->sin_family == AF_INET6) {
10823 /* We leave the scope the way it is in our lookup table. */
10824 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
10825 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
10827 /* huh compiler bug */
10832 * JRS 5/14/07 - In CMT PF, the T3 timer is used to track
10833 * PF-heartbeats. Because of this, threshold management is done by
10834 * the t3 timer handler, and does not need to be done upon the send
10835 * of a PF-heartbeat. If CMT PF is on and the destination to which a
10836 * heartbeat is being sent is in PF state, do NOT do threshold
10839 if ((stcb->asoc.sctp_cmt_pf == 0) ||
10840 ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF)) {
10841 /* ok we have a destination that needs a beat */
10842 /* lets do the theshold management Qiaobing style */
10843 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
10844 stcb->asoc.max_send_times)) {
10846 * we have lost the association, in a way this is
10847 * quite bad since we really are one less time since
10848 * we really did not send yet. This is the down side
10849 * to the Q's style as defined in the RFC and not my
10850 * alternate style defined in the RFC.
10852 if (chk->data != NULL) {
10853 sctp_m_freem(chk->data);
10857 * Here we do NOT use the macro since the
10858 * association is now gone.
10861 sctp_free_remote_addr(chk->whoTo);
10864 sctp_free_a_chunk((struct sctp_tcb *)NULL, chk);
10868 net->hb_responded = 0;
10869 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10870 stcb->asoc.ctrl_queue_cnt++;
10871 SCTP_STAT_INCR(sctps_sendheartbeat);
10873 * Call directly med level routine to put out the chunk. It will
10874 * always tumble out control chunks aka HB but it may even tumble
10881 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
10884 struct sctp_association *asoc;
10885 struct sctp_ecne_chunk *ecne;
10886 struct sctp_tmit_chunk *chk;
10888 asoc = &stcb->asoc;
10889 SCTP_TCB_LOCK_ASSERT(stcb);
10890 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
10891 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
10892 /* found a previous ECN_ECHO update it if needed */
10893 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
10894 ecne->tsn = htonl(high_tsn);
10898 /* nope could not find one to update so we must build one */
10899 sctp_alloc_a_chunk(stcb, chk);
10903 chk->copy_by_ref = 0;
10904 SCTP_STAT_INCR(sctps_sendecne);
10905 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
10906 chk->rec.chunk_id.can_take_data = 0;
10907 chk->asoc = &stcb->asoc;
10908 chk->send_size = sizeof(struct sctp_ecne_chunk);
10909 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
10910 if (chk->data == NULL) {
10911 sctp_free_a_chunk(stcb, chk);
10914 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
10915 SCTP_BUF_LEN(chk->data) = chk->send_size;
10916 chk->sent = SCTP_DATAGRAM_UNSENT;
10917 chk->snd_count = 0;
10919 atomic_add_int(&chk->whoTo->ref_count, 1);
10920 stcb->asoc.ecn_echo_cnt_onq++;
10921 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
10922 ecne->ch.chunk_type = SCTP_ECN_ECHO;
10923 ecne->ch.chunk_flags = 0;
10924 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
10925 ecne->tsn = htonl(high_tsn);
10926 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
10927 asoc->ctrl_queue_cnt++;
10931 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
10932 struct mbuf *m, int iphlen, int bad_crc)
10934 struct sctp_association *asoc;
10935 struct sctp_pktdrop_chunk *drp;
10936 struct sctp_tmit_chunk *chk;
10943 struct ip6_hdr *ip6h;
10946 int fullsz = 0, extra = 0;
10949 struct sctp_chunkhdr *ch, chunk_buf;
10950 unsigned int chk_length;
10955 asoc = &stcb->asoc;
10956 SCTP_TCB_LOCK_ASSERT(stcb);
10957 if (asoc->peer_supports_pktdrop == 0) {
10959 * peer must declare support before I send one.
10963 if (stcb->sctp_socket == NULL) {
10966 sctp_alloc_a_chunk(stcb, chk);
10970 chk->copy_by_ref = 0;
10971 iph = mtod(m, struct ip *);
10973 sctp_free_a_chunk(stcb, chk);
10976 switch (iph->ip_v) {
10979 len = chk->send_size = iph->ip_len;
10982 case IPV6_VERSION >> 4:
10984 ip6h = mtod(m, struct ip6_hdr *);
10985 len = chk->send_size = htons(ip6h->ip6_plen);
10991 /* Validate that we do not have an ABORT in here. */
10992 offset = iphlen + sizeof(struct sctphdr);
10993 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
10994 sizeof(*ch), (uint8_t *) & chunk_buf);
10995 while (ch != NULL) {
10996 chk_length = ntohs(ch->chunk_length);
10997 if (chk_length < sizeof(*ch)) {
10998 /* break to abort land */
11001 switch (ch->chunk_type) {
11002 case SCTP_PACKET_DROPPED:
11003 case SCTP_ABORT_ASSOCIATION:
11005 * we don't respond with an PKT-DROP to an ABORT
11008 sctp_free_a_chunk(stcb, chk);
11013 offset += SCTP_SIZE32(chk_length);
11014 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
11015 sizeof(*ch), (uint8_t *) & chunk_buf);
11018 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
11019 min(stcb->asoc.smallest_mtu, MCLBYTES)) {
11021 * only send 1 mtu worth, trim off the excess on the end.
11023 fullsz = len - extra;
11024 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
11027 chk->asoc = &stcb->asoc;
11028 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11029 if (chk->data == NULL) {
11031 sctp_free_a_chunk(stcb, chk);
11034 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11035 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
11037 sctp_m_freem(chk->data);
11041 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
11042 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
11043 chk->book_size_scale = 0;
11045 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
11046 drp->trunc_len = htons(fullsz);
11048 * Len is already adjusted to size minus overhead above take
11049 * out the pkt_drop chunk itself from it.
11051 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk);
11052 len = chk->send_size;
11054 /* no truncation needed */
11055 drp->ch.chunk_flags = 0;
11056 drp->trunc_len = htons(0);
11059 drp->ch.chunk_flags |= SCTP_BADCRC;
11061 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
11062 SCTP_BUF_LEN(chk->data) = chk->send_size;
11063 chk->sent = SCTP_DATAGRAM_UNSENT;
11064 chk->snd_count = 0;
11066 /* we should hit here */
11069 chk->whoTo = asoc->primary_destination;
11071 atomic_add_int(&chk->whoTo->ref_count, 1);
11072 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
11073 chk->rec.chunk_id.can_take_data = 1;
11074 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
11075 drp->ch.chunk_length = htons(chk->send_size);
11076 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
11080 drp->bottle_bw = htonl(spc);
11081 if (asoc->my_rwnd) {
11082 drp->current_onq = htonl(asoc->size_on_reasm_queue +
11083 asoc->size_on_all_streams +
11084 asoc->my_rwnd_control_len +
11085 stcb->sctp_socket->so_rcv.sb_cc);
11088 * If my rwnd is 0, possibly from mbuf depletion as well as
11089 * space used, tell the peer there is NO space aka onq == bw
11091 drp->current_onq = htonl(spc);
11095 m_copydata(m, iphlen, len, (caddr_t)datap);
11096 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11097 asoc->ctrl_queue_cnt++;
11101 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
11103 struct sctp_association *asoc;
11104 struct sctp_cwr_chunk *cwr;
11105 struct sctp_tmit_chunk *chk;
11107 asoc = &stcb->asoc;
11108 SCTP_TCB_LOCK_ASSERT(stcb);
11109 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
11110 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
11111 /* found a previous ECN_CWR update it if needed */
11112 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11113 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
11115 cwr->tsn = htonl(high_tsn);
11120 /* nope could not find one to update so we must build one */
11121 sctp_alloc_a_chunk(stcb, chk);
11125 chk->copy_by_ref = 0;
11126 chk->rec.chunk_id.id = SCTP_ECN_CWR;
11127 chk->rec.chunk_id.can_take_data = 1;
11128 chk->asoc = &stcb->asoc;
11129 chk->send_size = sizeof(struct sctp_cwr_chunk);
11130 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
11131 if (chk->data == NULL) {
11132 sctp_free_a_chunk(stcb, chk);
11135 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11136 SCTP_BUF_LEN(chk->data) = chk->send_size;
11137 chk->sent = SCTP_DATAGRAM_UNSENT;
11138 chk->snd_count = 0;
11140 atomic_add_int(&chk->whoTo->ref_count, 1);
11141 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
11142 cwr->ch.chunk_type = SCTP_ECN_CWR;
11143 cwr->ch.chunk_flags = 0;
11144 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
11145 cwr->tsn = htonl(high_tsn);
11146 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
11147 asoc->ctrl_queue_cnt++;
11151 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
11152 int number_entries, uint16_t * list,
11153 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
11155 int len, old_len, i;
11156 struct sctp_stream_reset_out_request *req_out;
11157 struct sctp_chunkhdr *ch;
11159 ch = mtod(chk->data, struct sctp_chunkhdr *);
11162 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11164 /* get to new offset for the param. */
11165 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
11166 /* now how long will this param be? */
11167 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
11168 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
11169 req_out->ph.param_length = htons(len);
11170 req_out->request_seq = htonl(seq);
11171 req_out->response_seq = htonl(resp_seq);
11172 req_out->send_reset_at_tsn = htonl(last_sent);
11173 if (number_entries) {
11174 for (i = 0; i < number_entries; i++) {
11175 req_out->list_of_streams[i] = htons(list[i]);
11178 if (SCTP_SIZE32(len) > len) {
11180 * Need to worry about the pad we may end up adding to the
11181 * end. This is easy since the struct is either aligned to 4
11182 * bytes or 2 bytes off.
11184 req_out->list_of_streams[number_entries] = 0;
11186 /* now fix the chunk length */
11187 ch->chunk_length = htons(len + old_len);
11188 chk->book_size = len + old_len;
11189 chk->book_size_scale = 0;
11190 chk->send_size = SCTP_SIZE32(chk->book_size);
11191 SCTP_BUF_LEN(chk->data) = chk->send_size;
11197 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
11198 int number_entries, uint16_t * list,
11201 int len, old_len, i;
11202 struct sctp_stream_reset_in_request *req_in;
11203 struct sctp_chunkhdr *ch;
11205 ch = mtod(chk->data, struct sctp_chunkhdr *);
11208 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11210 /* get to new offset for the param. */
11211 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
11212 /* now how long will this param be? */
11213 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
11214 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
11215 req_in->ph.param_length = htons(len);
11216 req_in->request_seq = htonl(seq);
11217 if (number_entries) {
11218 for (i = 0; i < number_entries; i++) {
11219 req_in->list_of_streams[i] = htons(list[i]);
11222 if (SCTP_SIZE32(len) > len) {
11224 * Need to worry about the pad we may end up adding to the
11225 * end. This is easy since the struct is either aligned to 4
11226 * bytes or 2 bytes off.
11228 req_in->list_of_streams[number_entries] = 0;
11230 /* now fix the chunk length */
11231 ch->chunk_length = htons(len + old_len);
11232 chk->book_size = len + old_len;
11233 chk->book_size_scale = 0;
11234 chk->send_size = SCTP_SIZE32(chk->book_size);
11235 SCTP_BUF_LEN(chk->data) = chk->send_size;
11241 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
11245 struct sctp_stream_reset_tsn_request *req_tsn;
11246 struct sctp_chunkhdr *ch;
11248 ch = mtod(chk->data, struct sctp_chunkhdr *);
11251 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11253 /* get to new offset for the param. */
11254 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
11255 /* now how long will this param be? */
11256 len = sizeof(struct sctp_stream_reset_tsn_request);
11257 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
11258 req_tsn->ph.param_length = htons(len);
11259 req_tsn->request_seq = htonl(seq);
11261 /* now fix the chunk length */
11262 ch->chunk_length = htons(len + old_len);
11263 chk->send_size = len + old_len;
11264 chk->book_size = SCTP_SIZE32(chk->send_size);
11265 chk->book_size_scale = 0;
11266 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11271 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
11272 uint32_t resp_seq, uint32_t result)
11275 struct sctp_stream_reset_response *resp;
11276 struct sctp_chunkhdr *ch;
11278 ch = mtod(chk->data, struct sctp_chunkhdr *);
11281 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11283 /* get to new offset for the param. */
11284 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
11285 /* now how long will this param be? */
11286 len = sizeof(struct sctp_stream_reset_response);
11287 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11288 resp->ph.param_length = htons(len);
11289 resp->response_seq = htonl(resp_seq);
11290 resp->result = ntohl(result);
11292 /* now fix the chunk length */
11293 ch->chunk_length = htons(len + old_len);
11294 chk->book_size = len + old_len;
11295 chk->book_size_scale = 0;
11296 chk->send_size = SCTP_SIZE32(chk->book_size);
11297 SCTP_BUF_LEN(chk->data) = chk->send_size;
11304 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
11305 uint32_t resp_seq, uint32_t result,
11306 uint32_t send_una, uint32_t recv_next)
11309 struct sctp_stream_reset_response_tsn *resp;
11310 struct sctp_chunkhdr *ch;
11312 ch = mtod(chk->data, struct sctp_chunkhdr *);
11315 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11317 /* get to new offset for the param. */
11318 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
11319 /* now how long will this param be? */
11320 len = sizeof(struct sctp_stream_reset_response_tsn);
11321 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
11322 resp->ph.param_length = htons(len);
11323 resp->response_seq = htonl(resp_seq);
11324 resp->result = htonl(result);
11325 resp->senders_next_tsn = htonl(send_una);
11326 resp->receivers_next_tsn = htonl(recv_next);
11328 /* now fix the chunk length */
11329 ch->chunk_length = htons(len + old_len);
11330 chk->book_size = len + old_len;
11331 chk->send_size = SCTP_SIZE32(chk->book_size);
11332 chk->book_size_scale = 0;
11333 SCTP_BUF_LEN(chk->data) = chk->send_size;
11338 sctp_add_a_stream(struct sctp_tmit_chunk *chk,
11343 struct sctp_chunkhdr *ch;
11344 struct sctp_stream_reset_add_strm *addstr;
11346 ch = mtod(chk->data, struct sctp_chunkhdr *);
11347 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
11349 /* get to new offset for the param. */
11350 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
11351 /* now how long will this param be? */
11352 len = sizeof(struct sctp_stream_reset_add_strm);
11355 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_STREAMS);
11356 addstr->ph.param_length = htons(len);
11357 addstr->request_seq = htonl(seq);
11358 addstr->number_of_streams = htons(adding);
11359 addstr->reserved = 0;
11361 /* now fix the chunk length */
11362 ch->chunk_length = htons(len + old_len);
11363 chk->send_size = len + old_len;
11364 chk->book_size = SCTP_SIZE32(chk->send_size);
11365 chk->book_size_scale = 0;
11366 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
11371 sctp_send_str_reset_req(struct sctp_tcb *stcb,
11372 int number_entries, uint16_t * list,
11373 uint8_t send_out_req,
11375 uint8_t send_in_req,
11376 uint8_t send_tsn_req,
11377 uint8_t add_stream,
11382 struct sctp_association *asoc;
11383 struct sctp_tmit_chunk *chk;
11384 struct sctp_chunkhdr *ch;
11387 asoc = &stcb->asoc;
11388 if (asoc->stream_reset_outstanding) {
11390 * Already one pending, must get ACK back to clear the flag.
11392 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
11395 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) &&
11396 (add_stream == 0)) {
11397 /* nothing to do */
11398 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11401 if (send_tsn_req && (send_out_req || send_in_req)) {
11402 /* error, can't do that */
11403 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
11406 sctp_alloc_a_chunk(stcb, chk);
11408 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11411 chk->copy_by_ref = 0;
11412 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
11413 chk->rec.chunk_id.can_take_data = 0;
11414 chk->asoc = &stcb->asoc;
11415 chk->book_size = sizeof(struct sctp_chunkhdr);
11416 chk->send_size = SCTP_SIZE32(chk->book_size);
11417 chk->book_size_scale = 0;
11419 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
11420 if (chk->data == NULL) {
11421 sctp_free_a_chunk(stcb, chk);
11422 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11425 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
11427 /* setup chunk parameters */
11428 chk->sent = SCTP_DATAGRAM_UNSENT;
11429 chk->snd_count = 0;
11430 chk->whoTo = asoc->primary_destination;
11431 atomic_add_int(&chk->whoTo->ref_count, 1);
11433 ch = mtod(chk->data, struct sctp_chunkhdr *);
11434 ch->chunk_type = SCTP_STREAM_RESET;
11435 ch->chunk_flags = 0;
11436 ch->chunk_length = htons(chk->book_size);
11437 SCTP_BUF_LEN(chk->data) = chk->send_size;
11439 seq = stcb->asoc.str_reset_seq_out;
11440 if (send_out_req) {
11441 sctp_add_stream_reset_out(chk, number_entries, list,
11442 seq, resp_seq, (stcb->asoc.sending_seq - 1));
11443 asoc->stream_reset_out_is_outstanding = 1;
11445 asoc->stream_reset_outstanding++;
11448 sctp_add_a_stream(chk, seq, adding);
11450 asoc->stream_reset_outstanding++;
11453 sctp_add_stream_reset_in(chk, number_entries, list, seq);
11454 asoc->stream_reset_outstanding++;
11456 if (send_tsn_req) {
11457 sctp_add_stream_reset_tsn(chk, seq);
11458 asoc->stream_reset_outstanding++;
11460 asoc->str_reset = chk;
11462 /* insert the chunk for sending */
11463 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
11466 asoc->ctrl_queue_cnt++;
11467 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
11472 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
11473 struct mbuf *err_cause, uint32_t vrf_id, uint16_t port)
11476 * Formulate the abort message, and send it back down.
11478 struct mbuf *o_pak;
11480 struct sctp_abort_msg *abm;
11481 struct ip *iph, *iph_out;
11482 struct udphdr *udp;
11485 struct ip6_hdr *ip6, *ip6_out;
11488 int iphlen_out, len;
11490 /* don't respond to ABORT with ABORT */
11491 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
11493 sctp_m_freem(err_cause);
11496 iph = mtod(m, struct ip *);
11497 switch (iph->ip_v) {
11499 len = (sizeof(struct ip) + sizeof(struct sctp_abort_msg));
11502 case IPV6_VERSION >> 4:
11503 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg));
11508 sctp_m_freem(err_cause);
11513 len += sizeof(struct udphdr);
11515 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11516 if (mout == NULL) {
11518 sctp_m_freem(err_cause);
11522 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11523 SCTP_BUF_LEN(mout) = len;
11524 SCTP_BUF_NEXT(mout) = err_cause;
11529 switch (iph->ip_v) {
11531 iph_out = mtod(mout, struct ip *);
11533 /* Fill in the IP header for the ABORT */
11534 iph_out->ip_v = IPVERSION;
11535 iph_out->ip_hl = (sizeof(struct ip) / 4);
11536 iph_out->ip_tos = (u_char)0;
11537 iph_out->ip_id = 0;
11538 iph_out->ip_off = 0;
11539 iph_out->ip_ttl = MAXTTL;
11541 iph_out->ip_p = IPPROTO_UDP;
11543 iph_out->ip_p = IPPROTO_SCTP;
11545 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11546 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11547 /* let IP layer calculate this */
11548 iph_out->ip_sum = 0;
11550 iphlen_out = sizeof(*iph_out);
11551 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
11554 case IPV6_VERSION >> 4:
11555 ip6 = (struct ip6_hdr *)iph;
11556 ip6_out = mtod(mout, struct ip6_hdr *);
11558 /* Fill in the IP6 header for the ABORT */
11559 ip6_out->ip6_flow = ip6->ip6_flow;
11560 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11562 ip6_out->ip6_nxt = IPPROTO_UDP;
11564 ip6_out->ip6_nxt = IPPROTO_SCTP;
11566 ip6_out->ip6_src = ip6->ip6_dst;
11567 ip6_out->ip6_dst = ip6->ip6_src;
11569 iphlen_out = sizeof(*ip6_out);
11570 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
11574 /* Currently not supported */
11575 sctp_m_freem(mout);
11579 udp = (struct udphdr *)abm;
11581 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11582 udp->uh_dport = port;
11583 /* set udp->uh_ulen later */
11585 iphlen_out += sizeof(struct udphdr);
11586 abm = (struct sctp_abort_msg *)((caddr_t)abm + sizeof(struct udphdr));
11588 abm->sh.src_port = sh->dest_port;
11589 abm->sh.dest_port = sh->src_port;
11590 abm->sh.checksum = 0;
11592 abm->sh.v_tag = sh->v_tag;
11593 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
11595 abm->sh.v_tag = htonl(vtag);
11596 abm->msg.ch.chunk_flags = 0;
11598 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
11601 struct mbuf *m_tmp = err_cause;
11604 /* get length of the err_cause chain */
11605 while (m_tmp != NULL) {
11606 err_len += SCTP_BUF_LEN(m_tmp);
11607 m_tmp = SCTP_BUF_NEXT(m_tmp);
11609 len = SCTP_BUF_LEN(mout) + err_len;
11611 /* need pad at end of chunk */
11612 uint32_t cpthis = 0;
11615 padlen = 4 - (len % 4);
11616 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11619 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
11621 len = SCTP_BUF_LEN(mout);
11622 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
11625 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11627 sctp_m_freem(mout);
11630 if (iph_out != NULL) {
11632 struct sctp_tcb *stcb = NULL;
11635 /* zap the stack pointer to the route */
11636 bzero(&ro, sizeof ro);
11638 udp->uh_ulen = htons(len - sizeof(struct ip));
11639 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11641 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip_output:\n");
11642 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, iph_out, &abm->sh);
11643 /* set IPv4 length */
11644 iph_out->ip_len = len;
11646 #ifdef SCTP_PACKET_LOGGING
11647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11648 sctp_packet_log(mout, len);
11650 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11652 abm->sh.checksum = sctp_calculate_cksum(mout, iphlen_out);
11653 SCTP_STAT_INCR(sctps_sendswcrc);
11654 SCTP_ENABLE_UDP_CSUM(o_pak);
11656 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11657 mout->m_pkthdr.csum_data = 0;
11658 SCTP_STAT_INCR(sctps_sendhwcrc);
11660 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
11662 /* Free the route if we got one back */
11667 if (ip6_out != NULL) {
11668 struct route_in6 ro;
11670 struct sctp_tcb *stcb = NULL;
11671 struct ifnet *ifp = NULL;
11673 /* zap the stack pointer to the route */
11674 bzero(&ro, sizeof(ro));
11676 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11678 SCTPDBG(SCTP_DEBUG_OUTPUT2, "sctp_send_abort calling ip6_output:\n");
11679 SCTPDBG_PKT(SCTP_DEBUG_OUTPUT2, (struct ip *)ip6_out, &abm->sh);
11680 ip6_out->ip6_plen = len - sizeof(*ip6_out);
11681 #ifdef SCTP_PACKET_LOGGING
11682 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11683 sctp_packet_log(mout, len);
11685 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11687 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
11689 (stcb->asoc.loopback_scope))) {
11690 abm->sh.checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11691 SCTP_STAT_INCR(sctps_sendswcrc);
11693 SCTP_STAT_INCR(sctps_sendnocrc);
11695 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11696 udp->uh_sum = 0xffff;
11699 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
11701 (stcb->asoc.loopback_scope))) {
11702 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11703 mout->m_pkthdr.csum_data = 0;
11704 SCTP_STAT_INCR(sctps_sendhwcrc);
11706 SCTP_STAT_INCR(sctps_sendnocrc);
11709 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
11711 /* Free the route if we got one back */
11716 SCTP_STAT_INCR(sctps_sendpackets);
11717 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11718 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11722 sctp_send_operr_to(struct mbuf *m, int iphlen, struct mbuf *scm, uint32_t vtag,
11723 uint32_t vrf_id, uint16_t port)
11725 struct mbuf *o_pak;
11726 struct sctphdr *sh, *sh_out;
11727 struct sctp_chunkhdr *ch;
11728 struct ip *iph, *iph_out;
11729 struct udphdr *udp = NULL;
11733 struct ip6_hdr *ip6, *ip6_out;
11736 int iphlen_out, len;
11738 iph = mtod(m, struct ip *);
11739 sh = (struct sctphdr *)((caddr_t)iph + iphlen);
11740 switch (iph->ip_v) {
11742 len = (sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
11745 case IPV6_VERSION >> 4:
11746 len = (sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr));
11756 len += sizeof(struct udphdr);
11758 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_DONTWAIT, 1, MT_DATA);
11759 if (mout == NULL) {
11765 SCTP_BUF_RESV_UF(mout, max_linkhdr);
11766 SCTP_BUF_LEN(mout) = len;
11767 SCTP_BUF_NEXT(mout) = scm;
11772 switch (iph->ip_v) {
11774 iph_out = mtod(mout, struct ip *);
11776 /* Fill in the IP header for the ABORT */
11777 iph_out->ip_v = IPVERSION;
11778 iph_out->ip_hl = (sizeof(struct ip) / 4);
11779 iph_out->ip_tos = (u_char)0;
11780 iph_out->ip_id = 0;
11781 iph_out->ip_off = 0;
11782 iph_out->ip_ttl = MAXTTL;
11784 iph_out->ip_p = IPPROTO_UDP;
11786 iph_out->ip_p = IPPROTO_SCTP;
11788 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
11789 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
11790 /* let IP layer calculate this */
11791 iph_out->ip_sum = 0;
11793 iphlen_out = sizeof(struct ip);
11794 sh_out = (struct sctphdr *)((caddr_t)iph_out + iphlen_out);
11797 case IPV6_VERSION >> 4:
11798 ip6 = (struct ip6_hdr *)iph;
11799 ip6_out = mtod(mout, struct ip6_hdr *);
11801 /* Fill in the IP6 header for the ABORT */
11802 ip6_out->ip6_flow = ip6->ip6_flow;
11803 ip6_out->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
11805 ip6_out->ip6_nxt = IPPROTO_UDP;
11807 ip6_out->ip6_nxt = IPPROTO_SCTP;
11809 ip6_out->ip6_src = ip6->ip6_dst;
11810 ip6_out->ip6_dst = ip6->ip6_src;
11812 iphlen_out = sizeof(struct ip6_hdr);
11813 sh_out = (struct sctphdr *)((caddr_t)ip6_out + iphlen_out);
11817 /* Currently not supported */
11818 sctp_m_freem(mout);
11822 udp = (struct udphdr *)sh_out;
11824 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
11825 udp->uh_dport = port;
11826 /* set udp->uh_ulen later */
11828 iphlen_out += sizeof(struct udphdr);
11829 sh_out = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
11831 sh_out->src_port = sh->dest_port;
11832 sh_out->dest_port = sh->src_port;
11833 sh_out->v_tag = vtag;
11834 sh_out->checksum = 0;
11836 ch = (struct sctp_chunkhdr *)((caddr_t)sh_out + sizeof(struct sctphdr));
11837 ch->chunk_type = SCTP_OPERATION_ERROR;
11838 ch->chunk_flags = 0;
11841 struct mbuf *m_tmp = scm;
11844 /* get length of the err_cause chain */
11845 while (m_tmp != NULL) {
11846 cause_len += SCTP_BUF_LEN(m_tmp);
11847 m_tmp = SCTP_BUF_NEXT(m_tmp);
11849 len = SCTP_BUF_LEN(mout) + cause_len;
11850 if (cause_len % 4) {
11851 /* need pad at end of chunk */
11852 uint32_t cpthis = 0;
11855 padlen = 4 - (len % 4);
11856 m_copyback(mout, len, padlen, (caddr_t)&cpthis);
11859 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len);
11861 len = SCTP_BUF_LEN(mout);
11862 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr));
11865 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
11867 sctp_m_freem(mout);
11870 if (iph_out != NULL) {
11872 struct sctp_tcb *stcb = NULL;
11875 /* zap the stack pointer to the route */
11876 bzero(&ro, sizeof ro);
11878 udp->uh_ulen = htons(len - sizeof(struct ip));
11879 udp->uh_sum = in_pseudo(iph_out->ip_src.s_addr, iph_out->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
11881 /* set IPv4 length */
11882 iph_out->ip_len = len;
11884 #ifdef SCTP_PACKET_LOGGING
11885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11886 sctp_packet_log(mout, len);
11888 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11890 sh_out->checksum = sctp_calculate_cksum(mout, iphlen_out);
11891 SCTP_STAT_INCR(sctps_sendswcrc);
11892 SCTP_ENABLE_UDP_CSUM(o_pak);
11894 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11895 mout->m_pkthdr.csum_data = 0;
11896 SCTP_STAT_INCR(sctps_sendhwcrc);
11898 SCTP_IP_OUTPUT(ret, o_pak, &ro, stcb, vrf_id);
11900 /* Free the route if we got one back */
11905 if (ip6_out != NULL) {
11906 struct route_in6 ro;
11908 struct sctp_tcb *stcb = NULL;
11909 struct ifnet *ifp = NULL;
11911 /* zap the stack pointer to the route */
11912 bzero(&ro, sizeof(ro));
11914 udp->uh_ulen = htons(len - sizeof(struct ip6_hdr));
11916 ip6_out->ip6_plen = len - sizeof(*ip6_out);
11917 #ifdef SCTP_PACKET_LOGGING
11918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
11919 sctp_packet_log(mout, len);
11921 SCTP_ATTACH_CHAIN(o_pak, mout, len);
11923 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
11925 (stcb->asoc.loopback_scope))) {
11926 sh_out->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
11927 SCTP_STAT_INCR(sctps_sendswcrc);
11929 SCTP_STAT_INCR(sctps_sendnocrc);
11931 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
11932 udp->uh_sum = 0xffff;
11935 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
11937 (stcb->asoc.loopback_scope))) {
11938 mout->m_pkthdr.csum_flags = CSUM_SCTP;
11939 mout->m_pkthdr.csum_data = 0;
11940 SCTP_STAT_INCR(sctps_sendhwcrc);
11942 SCTP_STAT_INCR(sctps_sendnocrc);
11945 SCTP_IP6_OUTPUT(ret, o_pak, &ro, &ifp, stcb, vrf_id);
11947 /* Free the route if we got one back */
11952 SCTP_STAT_INCR(sctps_sendpackets);
11953 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
11954 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
11957 static struct mbuf *
11958 sctp_copy_resume(struct sctp_stream_queue_pending *sp,
11960 struct sctp_sndrcvinfo *srcv,
11962 int user_marks_eor,
11965 struct mbuf **new_tail)
11969 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
11970 (M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
11972 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11975 *sndout = m_length(m, NULL);
11976 *new_tail = m_last(m);
11982 sctp_copy_one(struct sctp_stream_queue_pending *sp,
11989 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
11991 if (sp->data == NULL) {
11992 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
11995 sp->tail_mbuf = m_last(sp->data);
12001 static struct sctp_stream_queue_pending *
12002 sctp_copy_it_in(struct sctp_tcb *stcb,
12003 struct sctp_association *asoc,
12004 struct sctp_sndrcvinfo *srcv,
12006 struct sctp_nets *net,
12008 int user_marks_eor,
12013 * This routine must be very careful in its work. Protocol
12014 * processing is up and running so care must be taken to spl...()
12015 * when you need to do something that may effect the stcb/asoc. The
12016 * sb is locked however. When data is copied the protocol processing
12017 * should be enabled since this is a slower operation...
12019 struct sctp_stream_queue_pending *sp = NULL;
12023 /* Now can we send this? */
12024 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12025 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12026 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12027 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12028 /* got data while shutting down */
12029 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12030 *error = ECONNRESET;
12033 sctp_alloc_a_strmoq(stcb, sp);
12035 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12040 sp->sender_all_done = 0;
12041 sp->sinfo_flags = srcv->sinfo_flags;
12042 sp->timetolive = srcv->sinfo_timetolive;
12043 sp->ppid = srcv->sinfo_ppid;
12044 sp->context = srcv->sinfo_context;
12046 (void)SCTP_GETTIME_TIMEVAL(&sp->ts);
12048 sp->stream = srcv->sinfo_stream;
12049 sp->length = min(uio->uio_resid, max_send_len);
12050 if ((sp->length == (uint32_t) uio->uio_resid) &&
12051 ((user_marks_eor == 0) ||
12052 (srcv->sinfo_flags & SCTP_EOF) ||
12053 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
12054 sp->msg_is_complete = 1;
12056 sp->msg_is_complete = 0;
12058 sp->sender_all_done = 0;
12059 sp->some_taken = 0;
12060 sp->put_last_out = 0;
12061 resv_in_first = sizeof(struct sctp_data_chunk);
12062 sp->data = sp->tail_mbuf = NULL;
12063 if (sp->length == 0) {
12067 sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
12068 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
12069 sctp_auth_key_acquire(stcb, stcb->asoc.authinfo.active_keyid);
12070 sp->holds_key_ref = 1;
12072 *error = sctp_copy_one(sp, uio, resv_in_first);
12075 sctp_free_a_strmoq(stcb, sp);
12078 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
12081 sp->net = asoc->primary_destination;
12083 atomic_add_int(&sp->net->ref_count, 1);
12084 sctp_set_prsctp_policy(sp);
12092 sctp_sosend(struct socket *so,
12093 struct sockaddr *addr,
12096 struct mbuf *control,
12101 struct sctp_inpcb *inp;
12102 int error, use_rcvinfo = 0;
12103 struct sctp_sndrcvinfo srcv;
12104 struct sockaddr *addr_to_use;
12106 #if defined(INET) && defined(INET6)
12107 struct sockaddr_in sin;
12111 inp = (struct sctp_inpcb *)so->so_pcb;
12113 /* process cmsg snd/rcv info (maybe a assoc-id) */
12114 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
12120 addr_to_use = addr;
12121 #if defined(INET) && defined(INET6)
12122 if ((addr) && (addr->sa_family == AF_INET6)) {
12123 struct sockaddr_in6 *sin6;
12125 sin6 = (struct sockaddr_in6 *)addr;
12126 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
12127 in6_sin6_2_sin(&sin, sin6);
12128 addr_to_use = (struct sockaddr *)&sin;
12132 error = sctp_lower_sosend(so, addr_to_use, uio, top,
12143 sctp_lower_sosend(struct socket *so,
12144 struct sockaddr *addr,
12146 struct mbuf *i_pak,
12147 struct mbuf *control,
12150 struct sctp_sndrcvinfo *srcv
12155 unsigned int sndlen = 0, max_len;
12157 struct mbuf *top = NULL;
12158 int queue_only = 0, queue_only_for_init = 0;
12159 int free_cnt_applied = 0;
12161 int now_filled = 0;
12162 unsigned int inqueue_bytes = 0;
12163 struct sctp_block_entry be;
12164 struct sctp_inpcb *inp;
12165 struct sctp_tcb *stcb = NULL;
12166 struct timeval now;
12167 struct sctp_nets *net;
12168 struct sctp_association *asoc;
12169 struct sctp_inpcb *t_inp;
12170 int user_marks_eor;
12171 int create_lock_applied = 0;
12172 int nagle_applies = 0;
12173 int some_on_control = 0;
12174 int got_all_of_the_send = 0;
12175 int hold_tcblock = 0;
12176 int non_blocking = 0;
12177 int temp_flags = 0;
12178 uint32_t local_add_more, local_soresv = 0;
12185 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
12187 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12190 SCTP_RELEASE_PKT(i_pak);
12194 if ((uio == NULL) && (i_pak == NULL)) {
12195 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12198 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
12199 atomic_add_int(&inp->total_sends, 1);
12201 if (uio->uio_resid < 0) {
12202 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12205 sndlen = uio->uio_resid;
12207 top = SCTP_HEADER_TO_CHAIN(i_pak);
12208 sndlen = SCTP_HEADER_LEN(i_pak);
12210 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
12214 * Pre-screen address, if one is given the sin-len
12215 * must be set correctly!
12218 switch (addr->sa_family) {
12221 if (addr->sa_len != sizeof(struct sockaddr_in)) {
12222 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12230 if (addr->sa_len != sizeof(struct sockaddr_in6)) {
12231 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12238 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
12239 error = EAFNOSUPPORT;
12245 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
12246 (inp->sctp_socket->so_qlimit)) {
12247 /* The listener can NOT send */
12248 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12252 if ((use_rcvinfo) && srcv) {
12253 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) ||
12254 PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) {
12255 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12259 if (srcv->sinfo_flags)
12260 SCTP_STAT_INCR(sctps_sends_with_flags);
12262 if (srcv->sinfo_flags & SCTP_SENDALL) {
12263 /* its a sendall */
12264 error = sctp_sendall(inp, uio, top, srcv);
12269 /* now we must find the assoc */
12270 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
12271 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12272 SCTP_INP_RLOCK(inp);
12273 stcb = LIST_FIRST(&inp->sctp_asoc_list);
12274 if (stcb == NULL) {
12275 SCTP_INP_RUNLOCK(inp);
12276 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12280 SCTP_TCB_LOCK(stcb);
12282 SCTP_INP_RUNLOCK(inp);
12284 /* Must locate the net structure if addr given */
12285 net = sctp_findnet(stcb, addr);
12287 /* validate port was 0 or correct */
12288 struct sockaddr_in *sin;
12290 sin = (struct sockaddr_in *)addr;
12291 if ((sin->sin_port != 0) &&
12292 (sin->sin_port != stcb->rport)) {
12296 temp_flags |= SCTP_ADDR_OVER;
12298 net = stcb->asoc.primary_destination;
12299 if (addr && (net == NULL)) {
12300 /* Could not find address, was it legal */
12301 if (addr->sa_family == AF_INET) {
12302 struct sockaddr_in *sin;
12304 sin = (struct sockaddr_in *)addr;
12305 if (sin->sin_addr.s_addr == 0) {
12306 if ((sin->sin_port == 0) ||
12307 (sin->sin_port == stcb->rport)) {
12308 net = stcb->asoc.primary_destination;
12312 struct sockaddr_in6 *sin6;
12314 sin6 = (struct sockaddr_in6 *)addr;
12315 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
12316 if ((sin6->sin6_port == 0) ||
12317 (sin6->sin6_port == stcb->rport)) {
12318 net = stcb->asoc.primary_destination;
12324 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12328 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) {
12329 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0);
12333 * Must locate the net structure if addr
12336 net = sctp_findnet(stcb, addr);
12338 net = stcb->asoc.primary_destination;
12339 if ((srcv->sinfo_flags & SCTP_ADDR_OVER) &&
12340 ((net == NULL) || (addr == NULL))) {
12341 struct sockaddr_in *sin;
12343 if (addr == NULL) {
12344 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12348 sin = (struct sockaddr_in *)addr;
12349 /* Validate port is 0 or correct */
12350 if ((sin->sin_port != 0) &&
12351 (sin->sin_port != stcb->rport)) {
12359 * Since we did not use findep we must
12360 * increment it, and if we don't find a tcb
12363 SCTP_INP_WLOCK(inp);
12364 SCTP_INP_INCR_REF(inp);
12365 SCTP_INP_WUNLOCK(inp);
12366 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12367 if (stcb == NULL) {
12368 SCTP_INP_WLOCK(inp);
12369 SCTP_INP_DECR_REF(inp);
12370 SCTP_INP_WUNLOCK(inp);
12375 if ((stcb == NULL) && (addr)) {
12376 /* Possible implicit send? */
12377 SCTP_ASOC_CREATE_LOCK(inp);
12378 create_lock_applied = 1;
12379 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
12380 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
12381 /* Should I really unlock ? */
12382 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12387 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
12388 (addr->sa_family == AF_INET6)) {
12389 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12393 SCTP_INP_WLOCK(inp);
12394 SCTP_INP_INCR_REF(inp);
12395 SCTP_INP_WUNLOCK(inp);
12396 /* With the lock applied look again */
12397 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
12398 if (stcb == NULL) {
12399 SCTP_INP_WLOCK(inp);
12400 SCTP_INP_DECR_REF(inp);
12401 SCTP_INP_WUNLOCK(inp);
12405 if (t_inp != inp) {
12406 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12411 if (stcb == NULL) {
12412 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
12413 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
12414 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
12418 if (addr == NULL) {
12419 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12424 * UDP style, we must go ahead and start the INIT
12429 if ((use_rcvinfo) && (srcv) &&
12430 ((srcv->sinfo_flags & SCTP_ABORT) ||
12431 ((srcv->sinfo_flags & SCTP_EOF) &&
12434 * User asks to abort a non-existant assoc,
12435 * or EOF a non-existant assoc with no data
12437 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
12441 /* get an asoc/stcb struct */
12442 vrf_id = inp->def_vrf_id;
12444 if (create_lock_applied == 0) {
12445 panic("Error, should hold create lock and I don't?");
12448 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
12451 if (stcb == NULL) {
12452 /* Error is setup for us in the call */
12455 if (create_lock_applied) {
12456 SCTP_ASOC_CREATE_UNLOCK(inp);
12457 create_lock_applied = 0;
12459 SCTP_PRINTF("Huh-3? create lock should have been on??\n");
12462 * Turn on queue only flag to prevent data from
12466 asoc = &stcb->asoc;
12467 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
12468 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
12470 /* initialize authentication params for the assoc */
12471 sctp_initialize_auth_params(inp, stcb);
12475 * see if a init structure exists in cmsg
12478 struct sctp_initmsg initm;
12481 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
12484 * we have an INIT override of the
12487 if (initm.sinit_max_attempts)
12488 asoc->max_init_times = initm.sinit_max_attempts;
12489 if (initm.sinit_num_ostreams)
12490 asoc->pre_open_streams = initm.sinit_num_ostreams;
12491 if (initm.sinit_max_instreams)
12492 asoc->max_inbound_streams = initm.sinit_max_instreams;
12493 if (initm.sinit_max_init_timeo)
12494 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
12495 if (asoc->streamoutcnt < asoc->pre_open_streams) {
12496 struct sctp_stream_out *tmp_str;
12499 /* Default is NOT correct */
12500 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, defout:%d pre_open:%d\n",
12501 asoc->streamoutcnt, asoc->pre_open_streams);
12503 * What happens if this
12504 * fails? we panic ...
12507 if (hold_tcblock) {
12509 SCTP_TCB_UNLOCK(stcb);
12511 SCTP_MALLOC(tmp_str,
12512 struct sctp_stream_out *,
12513 (asoc->pre_open_streams *
12514 sizeof(struct sctp_stream_out)),
12517 SCTP_TCB_LOCK(stcb);
12519 if (tmp_str != NULL) {
12520 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
12521 asoc->strmout = tmp_str;
12522 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams;
12524 asoc->pre_open_streams = asoc->streamoutcnt;
12526 for (i = 0; i < asoc->streamoutcnt; i++) {
12528 * inbound side must be set
12529 * to 0xffff, also NOTE when
12530 * we get the INIT-ACK back
12531 * (for INIT sender) we MUST
12533 * (streamoutcnt) but first
12534 * check if we sent to any
12535 * of the upper streams that
12536 * were dropped (if some
12537 * were). Those that were
12538 * dropped must be notified
12539 * to the upper layer as
12542 asoc->strmout[i].next_sequence_sent = 0x0;
12543 TAILQ_INIT(&asoc->strmout[i].outqueue);
12544 asoc->strmout[i].stream_no = i;
12545 asoc->strmout[i].last_msg_incomplete = 0;
12546 asoc->strmout[i].next_spoke.tqe_next = 0;
12547 asoc->strmout[i].next_spoke.tqe_prev = 0;
12553 /* out with the INIT */
12554 queue_only_for_init = 1;
12556 * we may want to dig in after this call and adjust the MTU
12557 * value. It defaulted to 1500 (constant) but the ro
12558 * structure may now have an update and thus we may need to
12559 * change it BEFORE we append the message.
12561 net = stcb->asoc.primary_destination;
12562 asoc = &stcb->asoc;
12565 if ((SCTP_SO_IS_NBIO(so)
12566 || (flags & MSG_NBIO)
12570 asoc = &stcb->asoc;
12571 atomic_add_int(&stcb->total_sends, 1);
12573 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
12574 if (sndlen > asoc->smallest_mtu) {
12575 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12580 /* would we block? */
12581 if (non_blocking) {
12582 if (hold_tcblock == 0) {
12583 SCTP_TCB_LOCK(stcb);
12586 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12587 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
12588 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12589 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
12590 if (sndlen > SCTP_SB_LIMIT_SND(so))
12593 error = EWOULDBLOCK;
12596 stcb->asoc.sb_send_resv += sndlen;
12597 SCTP_TCB_UNLOCK(stcb);
12600 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
12602 local_soresv = sndlen;
12603 /* Keep the stcb from being freed under our feet */
12604 if (free_cnt_applied) {
12606 panic("refcnt already incremented");
12608 printf("refcnt:1 already incremented?\n");
12611 atomic_add_int(&stcb->asoc.refcnt, 1);
12612 free_cnt_applied = 1;
12614 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12615 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12616 error = ECONNRESET;
12619 if (create_lock_applied) {
12620 SCTP_ASOC_CREATE_UNLOCK(inp);
12621 create_lock_applied = 0;
12623 if (asoc->stream_reset_outstanding) {
12625 * Can't queue any data while stream reset is underway.
12627 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN);
12631 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12632 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12635 if ((use_rcvinfo == 0) || (srcv == NULL)) {
12636 /* Grab the default stuff from the asoc */
12637 srcv = (struct sctp_sndrcvinfo *)&stcb->asoc.def_send;
12639 /* we are now done with all control */
12641 sctp_m_freem(control);
12644 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
12645 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
12646 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
12647 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
12648 if ((use_rcvinfo) &&
12649 (srcv->sinfo_flags & SCTP_ABORT)) {
12652 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
12653 error = ECONNRESET;
12657 /* Ok, we will attempt a msgsnd :> */
12659 p->td_ru.ru_msgsnd++;
12662 if (((srcv->sinfo_flags | temp_flags) & SCTP_ADDR_OVER) == 0) {
12663 net = stcb->asoc.primary_destination;
12667 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12671 if ((net->flight_size > net->cwnd) &&
12672 (asoc->sctp_cmt_on_off == 0)) {
12674 * CMT: Added check for CMT above. net above is the primary
12675 * dest. If CMT is ON, sender should always attempt to send
12676 * with the output routine sctp_fill_outqueue() that loops
12677 * through all destination addresses. Therefore, if CMT is
12678 * ON, queue_only is NOT set to 1 here, so that
12679 * sctp_chunk_output() can be called below.
12682 } else if (asoc->ifp_had_enobuf) {
12683 SCTP_STAT_INCR(sctps_ifnomemqueued);
12684 if (net->flight_size > (net->mtu * 2))
12686 asoc->ifp_had_enobuf = 0;
12688 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
12689 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
12691 /* Are we aborting? */
12692 if (srcv->sinfo_flags & SCTP_ABORT) {
12694 int tot_demand, tot_out = 0, max_out;
12696 SCTP_STAT_INCR(sctps_sends_with_abort);
12697 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
12698 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
12699 /* It has to be up before we abort */
12700 /* how big is the user initiated abort? */
12701 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12705 if (hold_tcblock) {
12706 SCTP_TCB_UNLOCK(stcb);
12710 struct mbuf *cntm = NULL;
12712 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
12716 tot_out += SCTP_BUF_LEN(cntm);
12717 cntm = SCTP_BUF_NEXT(cntm);
12720 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12722 /* Must fit in a MTU */
12724 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
12725 if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
12727 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12731 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
12734 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
12738 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
12739 max_out -= sizeof(struct sctp_abort_msg);
12740 if (tot_out > max_out) {
12744 struct sctp_paramhdr *ph;
12746 /* now move forward the data pointer */
12747 ph = mtod(mm, struct sctp_paramhdr *);
12748 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
12749 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
12751 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
12753 error = uiomove((caddr_t)ph, (int)tot_out, uio);
12756 * Here if we can't get his data we
12757 * still abort we just don't get to
12758 * send the users note :-0
12765 SCTP_BUF_NEXT(mm) = top;
12769 if (hold_tcblock == 0) {
12770 SCTP_TCB_LOCK(stcb);
12773 atomic_add_int(&stcb->asoc.refcnt, -1);
12774 free_cnt_applied = 0;
12775 /* release this lock, otherwise we hang on ourselves */
12776 sctp_abort_an_association(stcb->sctp_ep, stcb,
12777 SCTP_RESPONSE_TO_USER_REQ,
12778 mm, SCTP_SO_LOCKED);
12779 /* now relock the stcb so everything is sane */
12783 * In this case top is already chained to mm avoid double
12784 * free, since we free it below if top != NULL and driver
12785 * would free it after sending the packet out
12792 /* Calculate the maximum we can send */
12793 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12794 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12795 if (non_blocking) {
12796 /* we already checked for non-blocking above. */
12799 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12804 if (hold_tcblock) {
12805 SCTP_TCB_UNLOCK(stcb);
12808 /* Is the stream no. valid? */
12809 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
12810 /* Invalid stream number */
12811 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12815 if (asoc->strmout == NULL) {
12816 /* huh? software error */
12817 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
12821 /* Unless E_EOR mode is on, we must make a send FIT in one call. */
12822 if ((user_marks_eor == 0) &&
12823 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
12824 /* It will NEVER fit */
12825 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
12829 if ((uio == NULL) && user_marks_eor) {
12831 * We do not support eeor mode for
12832 * sending with mbuf chains (like sendfile).
12834 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12838 if (user_marks_eor) {
12839 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
12842 * For non-eeor the whole message must fit in
12843 * the socket send buffer.
12845 local_add_more = sndlen;
12848 if (non_blocking) {
12849 goto skip_preblock;
12851 if (((max_len <= local_add_more) &&
12852 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
12854 ((stcb->asoc.chunks_on_out_queue + stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12855 /* No room right now ! */
12856 SOCKBUF_LOCK(&so->so_snd);
12857 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12858 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
12859 ((stcb->asoc.stream_queue_cnt + stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
12860 SCTPDBG(SCTP_DEBUG_OUTPUT1, "pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
12861 (unsigned int)SCTP_SB_LIMIT_SND(so),
12864 stcb->asoc.stream_queue_cnt,
12865 stcb->asoc.chunks_on_out_queue,
12866 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
12867 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12868 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, so, asoc, sndlen);
12871 stcb->block_entry = &be;
12872 error = sbwait(&so->so_snd);
12873 stcb->block_entry = NULL;
12874 if (error || so->so_error || be.error) {
12877 error = so->so_error;
12882 SOCKBUF_UNLOCK(&so->so_snd);
12885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
12886 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
12887 so, asoc, stcb->asoc.total_output_queue_size);
12889 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12892 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12894 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
12895 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
12896 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
12900 SOCKBUF_UNLOCK(&so->so_snd);
12903 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
12907 * sndlen covers for mbuf case uio_resid covers for the non-mbuf
12908 * case NOTE: uio will be null when top/mbuf is passed
12911 if (srcv->sinfo_flags & SCTP_EOF) {
12912 got_all_of_the_send = 1;
12915 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12921 struct sctp_stream_queue_pending *sp;
12922 struct sctp_stream_out *strm;
12923 uint32_t sndout, initial_out;
12925 initial_out = uio->uio_resid;
12927 SCTP_TCB_SEND_LOCK(stcb);
12928 if ((asoc->stream_locked) &&
12929 (asoc->stream_locked_on != srcv->sinfo_stream)) {
12930 SCTP_TCB_SEND_UNLOCK(stcb);
12931 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
12935 SCTP_TCB_SEND_UNLOCK(stcb);
12937 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
12938 if (strm->last_msg_incomplete == 0) {
12940 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
12941 if ((sp == NULL) || (error)) {
12944 SCTP_TCB_SEND_LOCK(stcb);
12945 if (sp->msg_is_complete) {
12946 strm->last_msg_incomplete = 0;
12947 asoc->stream_locked = 0;
12950 * Just got locked to this guy in case of an
12953 strm->last_msg_incomplete = 1;
12954 asoc->stream_locked = 1;
12955 asoc->stream_locked_on = srcv->sinfo_stream;
12956 sp->sender_all_done = 0;
12958 sctp_snd_sb_alloc(stcb, sp->length);
12959 atomic_add_int(&asoc->stream_queue_cnt, 1);
12960 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
12961 sp->strseq = strm->next_sequence_sent;
12962 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_SCTP) {
12963 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
12964 (uintptr_t) stcb, sp->length,
12965 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
12967 strm->next_sequence_sent++;
12969 SCTP_STAT_INCR(sctps_sends_with_unord);
12971 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
12972 if ((strm->next_spoke.tqe_next == NULL) &&
12973 (strm->next_spoke.tqe_prev == NULL)) {
12974 /* Not on wheel, insert */
12975 sctp_insert_on_wheel(stcb, asoc, strm, 1);
12977 SCTP_TCB_SEND_UNLOCK(stcb);
12979 SCTP_TCB_SEND_LOCK(stcb);
12980 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
12981 SCTP_TCB_SEND_UNLOCK(stcb);
12983 /* ???? Huh ??? last msg is gone */
12985 panic("Warning: Last msg marked incomplete, yet nothing left?");
12987 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
12988 strm->last_msg_incomplete = 0;
12994 while (uio->uio_resid > 0) {
12995 /* How much room do we have? */
12996 struct mbuf *new_tail, *mm;
12998 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
12999 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
13003 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
13004 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
13005 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
13008 if (hold_tcblock) {
13009 SCTP_TCB_UNLOCK(stcb);
13012 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
13013 if ((mm == NULL) || error) {
13019 /* Update the mbuf and count */
13020 SCTP_TCB_SEND_LOCK(stcb);
13021 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13023 * we need to get out. Peer probably
13027 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
13028 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
13029 error = ECONNRESET;
13031 SCTP_TCB_SEND_UNLOCK(stcb);
13034 if (sp->tail_mbuf) {
13035 /* tack it to the end */
13036 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
13037 sp->tail_mbuf = new_tail;
13039 /* A stolen mbuf */
13041 sp->tail_mbuf = new_tail;
13043 sctp_snd_sb_alloc(stcb, sndout);
13044 atomic_add_int(&sp->length, sndout);
13047 /* Did we reach EOR? */
13048 if ((uio->uio_resid == 0) &&
13049 ((user_marks_eor == 0) ||
13050 (srcv->sinfo_flags & SCTP_EOF) ||
13051 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
13052 sp->msg_is_complete = 1;
13054 sp->msg_is_complete = 0;
13056 SCTP_TCB_SEND_UNLOCK(stcb);
13058 if (uio->uio_resid == 0) {
13063 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
13065 * This is ugly but we must assure locking
13068 if (hold_tcblock == 0) {
13069 SCTP_TCB_LOCK(stcb);
13072 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
13073 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
13074 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
13075 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
13081 SCTP_TCB_UNLOCK(stcb);
13084 /* wait for space now */
13085 if (non_blocking) {
13086 /* Non-blocking io in place out */
13089 if ((net->flight_size > net->cwnd) &&
13090 (asoc->sctp_cmt_on_off == 0)) {
13092 } else if (asoc->ifp_had_enobuf) {
13093 SCTP_STAT_INCR(sctps_ifnomemqueued);
13094 if (net->flight_size > (net->mtu * 2)) {
13099 asoc->ifp_had_enobuf = 0;
13100 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13101 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13103 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13104 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13105 if (net->flight_size > net->cwnd) {
13107 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13112 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13113 (stcb->asoc.total_flight > 0) &&
13114 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13115 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13118 * Ok, Nagle is set on and we have data outstanding.
13119 * Don't send anything and let SACKs drive out the
13120 * data unless wen have a "full" segment to send.
13122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13123 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13125 SCTP_STAT_INCR(sctps_naglequeued);
13128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13129 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13130 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13132 SCTP_STAT_INCR(sctps_naglesent);
13135 /* What about the INIT, send it maybe */
13136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13138 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
13139 nagle_applies, un_sent);
13140 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
13141 stcb->asoc.total_flight,
13142 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
13144 if (queue_only_for_init) {
13145 if (hold_tcblock == 0) {
13146 SCTP_TCB_LOCK(stcb);
13149 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13150 /* a collision took us forward? */
13151 queue_only_for_init = 0;
13154 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13155 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
13156 queue_only_for_init = 0;
13160 if ((queue_only == 0) && (nagle_applies == 0)) {
13162 * need to start chunk output
13163 * before blocking.. note that if
13164 * a lock is already applied, then
13165 * the input via the net is happening
13166 * and I don't need to start output :-D
13168 if (hold_tcblock == 0) {
13169 if (SCTP_TCB_TRYLOCK(stcb)) {
13171 sctp_chunk_output(inp,
13173 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13176 sctp_chunk_output(inp,
13178 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13180 if (hold_tcblock == 1) {
13181 SCTP_TCB_UNLOCK(stcb);
13185 SOCKBUF_LOCK(&so->so_snd);
13187 * This is a bit strange, but I think it will
13188 * work. The total_output_queue_size is locked and
13189 * protected by the TCB_LOCK, which we just released.
13190 * There is a race that can occur between releasing it
13191 * above, and me getting the socket lock, where sacks
13192 * come in but we have not put the SB_WAIT on the
13193 * so_snd buffer to get the wakeup. After the LOCK
13194 * is applied the sack_processing will also need to
13195 * LOCK the so->so_snd to do the actual sowwakeup(). So
13196 * once we have the socket buffer lock if we recheck the
13197 * size we KNOW we will get to sleep safely with the
13198 * wakeup flag in place.
13200 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
13201 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
13202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13203 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
13204 so, asoc, uio->uio_resid);
13207 stcb->block_entry = &be;
13208 error = sbwait(&so->so_snd);
13209 stcb->block_entry = NULL;
13211 if (error || so->so_error || be.error) {
13214 error = so->so_error;
13219 SOCKBUF_UNLOCK(&so->so_snd);
13222 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
13223 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
13224 so, asoc, stcb->asoc.total_output_queue_size);
13227 SOCKBUF_UNLOCK(&so->so_snd);
13228 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
13232 SCTP_TCB_SEND_LOCK(stcb);
13234 if (sp->msg_is_complete == 0) {
13235 strm->last_msg_incomplete = 1;
13236 asoc->stream_locked = 1;
13237 asoc->stream_locked_on = srcv->sinfo_stream;
13239 sp->sender_all_done = 1;
13240 strm->last_msg_incomplete = 0;
13241 asoc->stream_locked = 0;
13244 SCTP_PRINTF("Huh no sp TSNH?\n");
13245 strm->last_msg_incomplete = 0;
13246 asoc->stream_locked = 0;
13248 SCTP_TCB_SEND_UNLOCK(stcb);
13249 if (uio->uio_resid == 0) {
13250 got_all_of_the_send = 1;
13253 /* We send in a 0, since we do NOT have any locks */
13254 error = sctp_msg_append(stcb, net, top, srcv, 0);
13256 if (srcv->sinfo_flags & SCTP_EOF) {
13258 * This should only happen for Panda for the mbuf
13259 * send case, which does NOT yet support EEOR mode.
13260 * Thus, we can just set this flag to do the proper
13263 got_all_of_the_send = 1;
13271 if ((srcv->sinfo_flags & SCTP_EOF) &&
13272 (got_all_of_the_send == 1) &&
13273 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
13276 SCTP_STAT_INCR(sctps_sends_with_eof);
13278 if (hold_tcblock == 0) {
13279 SCTP_TCB_LOCK(stcb);
13282 cnt = sctp_is_there_unsent_data(stcb);
13283 if (TAILQ_EMPTY(&asoc->send_queue) &&
13284 TAILQ_EMPTY(&asoc->sent_queue) &&
13286 if (asoc->locked_on_sending) {
13289 /* there is nothing queued to send, so I'm done... */
13290 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13291 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13292 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13293 /* only send SHUTDOWN the first time through */
13294 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
13295 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
13296 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
13298 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
13299 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
13300 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
13301 asoc->primary_destination);
13302 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13303 asoc->primary_destination);
13307 * we still got (or just got) data to send, so set
13311 * XXX sockets draft says that SCTP_EOF should be
13312 * sent with no data. currently, we will allow user
13313 * data to be sent first and move to
13316 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
13317 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
13318 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
13319 if (hold_tcblock == 0) {
13320 SCTP_TCB_LOCK(stcb);
13323 if (asoc->locked_on_sending) {
13324 /* Locked to send out the data */
13325 struct sctp_stream_queue_pending *sp;
13327 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
13329 if ((sp->length == 0) && (sp->msg_is_complete == 0))
13330 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
13333 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
13334 if (TAILQ_EMPTY(&asoc->send_queue) &&
13335 TAILQ_EMPTY(&asoc->sent_queue) &&
13336 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
13338 if (free_cnt_applied) {
13339 atomic_add_int(&stcb->asoc.refcnt, -1);
13340 free_cnt_applied = 0;
13342 sctp_abort_an_association(stcb->sctp_ep, stcb,
13343 SCTP_RESPONSE_TO_USER_REQ,
13344 NULL, SCTP_SO_LOCKED);
13346 * now relock the stcb so everything
13353 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
13354 asoc->primary_destination);
13355 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
13360 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
13361 some_on_control = 1;
13363 if ((net->flight_size > net->cwnd) &&
13364 (stcb->asoc.sctp_cmt_on_off == 0)) {
13366 } else if (asoc->ifp_had_enobuf) {
13367 SCTP_STAT_INCR(sctps_ifnomemqueued);
13368 if (net->flight_size > (net->mtu * 2)) {
13373 asoc->ifp_had_enobuf = 0;
13374 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13375 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13377 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
13378 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
13379 if (net->flight_size > net->cwnd) {
13381 SCTP_STAT_INCR(sctps_send_cwnd_avoid);
13386 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
13387 (stcb->asoc.total_flight > 0) &&
13388 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
13389 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
13391 * Ok, Nagle is set on and we have data outstanding.
13392 * Don't send anything and let SACKs drive out the
13393 * data unless wen have a "full" segment to send.
13395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13396 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
13398 SCTP_STAT_INCR(sctps_naglequeued);
13401 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
13402 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
13403 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
13405 SCTP_STAT_INCR(sctps_naglesent);
13408 if (queue_only_for_init) {
13409 if (hold_tcblock == 0) {
13410 SCTP_TCB_LOCK(stcb);
13413 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
13414 /* a collision took us forward? */
13415 queue_only_for_init = 0;
13418 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
13419 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
13420 queue_only_for_init = 0;
13424 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
13425 /* we can attempt to send too. */
13426 if (hold_tcblock == 0) {
13428 * If there is activity recv'ing sacks no need to
13431 if (SCTP_TCB_TRYLOCK(stcb)) {
13432 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13436 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13438 } else if ((queue_only == 0) &&
13439 (stcb->asoc.peers_rwnd == 0) &&
13440 (stcb->asoc.total_flight == 0)) {
13441 /* We get to have a probe outstanding */
13442 if (hold_tcblock == 0) {
13444 SCTP_TCB_LOCK(stcb);
13446 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
13447 } else if (some_on_control) {
13448 int num_out, reason, frag_point;
13450 /* Here we do control only */
13451 if (hold_tcblock == 0) {
13453 SCTP_TCB_LOCK(stcb);
13455 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
13456 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
13457 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
13459 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
13460 queue_only, stcb->asoc.peers_rwnd, un_sent,
13461 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
13462 stcb->asoc.total_output_queue_size, error);
13467 if (local_soresv && stcb) {
13468 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
13471 if (create_lock_applied) {
13472 SCTP_ASOC_CREATE_UNLOCK(inp);
13473 create_lock_applied = 0;
13475 if ((stcb) && hold_tcblock) {
13476 SCTP_TCB_UNLOCK(stcb);
13478 if (stcb && free_cnt_applied) {
13479 atomic_add_int(&stcb->asoc.refcnt, -1);
13483 if (mtx_owned(&stcb->tcb_mtx)) {
13484 panic("Leaving with tcb mtx owned?");
13486 if (mtx_owned(&stcb->tcb_send_mtx)) {
13487 panic("Leaving with tcb send mtx owned?");
13493 sctp_validate_no_locks(inp);
13495 printf("Warning - inp is NULL so cant validate locks\n");
13502 sctp_m_freem(control);
13509 * generate an AUTHentication chunk, if required
13512 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
13513 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
13514 struct sctp_tcb *stcb, uint8_t chunk)
13516 struct mbuf *m_auth;
13517 struct sctp_auth_chunk *auth;
13520 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
13524 /* sysctl disabled auth? */
13525 if (SCTP_BASE_SYSCTL(sctp_auth_disable))
13528 /* peer doesn't do auth... */
13529 if (!stcb->asoc.peer_supports_auth) {
13532 /* does the requested chunk require auth? */
13533 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
13536 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
13537 if (m_auth == NULL) {
13541 /* reserve some space if this will be the first mbuf */
13543 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
13544 /* fill in the AUTH chunk details */
13545 auth = mtod(m_auth, struct sctp_auth_chunk *);
13546 bzero(auth, sizeof(*auth));
13547 auth->ch.chunk_type = SCTP_AUTHENTICATION;
13548 auth->ch.chunk_flags = 0;
13549 chunk_len = sizeof(*auth) +
13550 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
13551 auth->ch.chunk_length = htons(chunk_len);
13552 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
13553 /* key id and hmac digest will be computed and filled in upon send */
13555 /* save the offset where the auth was inserted into the chain */
13562 *offset += SCTP_BUF_LEN(cn);
13563 cn = SCTP_BUF_NEXT(cn);
13568 /* update length and return pointer to the auth chunk */
13569 SCTP_BUF_LEN(m_auth) = chunk_len;
13570 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
13571 if (auth_ret != NULL)
13579 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t * ro)
13581 struct nd_prefix *pfx = NULL;
13582 struct nd_pfxrouter *pfxrtr = NULL;
13583 struct sockaddr_in6 gw6;
13585 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
13588 /* get prefix entry of address */
13589 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
13590 if (pfx->ndpr_stateflags & NDPRF_DETACHED)
13592 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
13593 &src6->sin6_addr, &pfx->ndpr_mask))
13596 /* no prefix entry in the prefix list */
13598 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
13599 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13602 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
13603 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
13605 /* search installed gateway from prefix entry */
13606 for (pfxrtr = pfx->ndpr_advrtrs.lh_first; pfxrtr; pfxrtr =
13607 pfxrtr->pfr_next) {
13608 memset(&gw6, 0, sizeof(struct sockaddr_in6));
13609 gw6.sin6_family = AF_INET6;
13610 gw6.sin6_len = sizeof(struct sockaddr_in6);
13611 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
13612 sizeof(struct in6_addr));
13613 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
13614 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
13615 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
13616 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13617 if (sctp_cmpaddr((struct sockaddr *)&gw6,
13618 ro->ro_rt->rt_gateway)) {
13619 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
13623 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
13630 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t * ro)
13632 struct sockaddr_in *sin, *mask;
13633 struct ifaddr *ifa;
13634 struct in_addr srcnetaddr, gwnetaddr;
13636 if (ro == NULL || ro->ro_rt == NULL ||
13637 sifa->address.sa.sa_family != AF_INET) {
13640 ifa = (struct ifaddr *)sifa->ifa;
13641 mask = (struct sockaddr_in *)(ifa->ifa_netmask);
13642 sin = (struct sockaddr_in *)&sifa->address.sin;
13643 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13644 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
13645 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
13646 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
13648 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
13649 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
13650 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
13651 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
13652 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
13653 if (srcnetaddr.s_addr == gwnetaddr.s_addr) {