2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 if (stcb->asoc.sb_cc == 0 &&
98 asoc->cnt_on_reasm_queue == 0 &&
99 asoc->cnt_on_all_streams == 0) {
100 /* Full rwnd granted */
101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
104 /* get actual space */
105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 * take out what has NOT been put on socket queue and we yet hold
110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 asoc->cnt_on_reasm_queue * MSIZE));
112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 asoc->cnt_on_all_streams * MSIZE));
118 /* what is the overhead of all these rwnd's */
119 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 * If the window gets too small due to ctrl-stuff, reduce it to 1,
122 * even it is 0. SWS engaged
124 if (calc < stcb->asoc.my_rwnd_control_len) {
133 * Build out our readq entry based on the incoming packet.
135 struct sctp_queued_to_read *
136 sctp_build_readq_entry(struct sctp_tcb *stcb,
137 struct sctp_nets *net,
138 uint32_t tsn, uint32_t ppid,
139 uint32_t context, uint16_t sid,
140 uint32_t mid, uint8_t flags,
143 struct sctp_queued_to_read *read_queue_e = NULL;
145 sctp_alloc_a_readq(stcb, read_queue_e);
146 if (read_queue_e == NULL) {
149 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
150 read_queue_e->sinfo_stream = sid;
151 read_queue_e->sinfo_flags = (flags << 8);
152 read_queue_e->sinfo_ppid = ppid;
153 read_queue_e->sinfo_context = context;
154 read_queue_e->sinfo_tsn = tsn;
155 read_queue_e->sinfo_cumtsn = tsn;
156 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
157 read_queue_e->mid = mid;
158 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
159 TAILQ_INIT(&read_queue_e->reasm);
160 read_queue_e->whoFrom = net;
161 atomic_add_int(&net->ref_count, 1);
162 read_queue_e->data = dm;
163 read_queue_e->stcb = stcb;
164 read_queue_e->port_from = stcb->rport;
166 return (read_queue_e);
170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
172 struct sctp_extrcvinfo *seinfo;
173 struct sctp_sndrcvinfo *outinfo;
174 struct sctp_rcvinfo *rcvinfo;
175 struct sctp_nxtinfo *nxtinfo;
182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
185 /* user does not want any ancillary data */
189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
190 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
192 seinfo = (struct sctp_extrcvinfo *)sinfo;
193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
194 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
196 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
203 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
206 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
212 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
217 SCTP_BUF_LEN(ret) = 0;
219 /* We need a CMSG header followed by the struct */
220 cmh = mtod(ret, struct cmsghdr *);
222 * Make sure that there is no un-initialized padding between the
223 * cmsg header and cmsg data and after the cmsg data.
226 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
227 cmh->cmsg_level = IPPROTO_SCTP;
228 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
229 cmh->cmsg_type = SCTP_RCVINFO;
230 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
231 rcvinfo->rcv_sid = sinfo->sinfo_stream;
232 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
233 rcvinfo->rcv_flags = sinfo->sinfo_flags;
234 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
235 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
236 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
237 rcvinfo->rcv_context = sinfo->sinfo_context;
238 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
239 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
240 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
243 cmh->cmsg_level = IPPROTO_SCTP;
244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
245 cmh->cmsg_type = SCTP_NXTINFO;
246 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
247 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
248 nxtinfo->nxt_flags = 0;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
250 nxtinfo->nxt_flags |= SCTP_UNORDERED;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
253 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
256 nxtinfo->nxt_flags |= SCTP_COMPLETE;
258 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
259 nxtinfo->nxt_length = seinfo->serinfo_next_length;
260 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
262 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
265 cmh->cmsg_level = IPPROTO_SCTP;
266 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
268 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
269 cmh->cmsg_type = SCTP_EXTRCV;
270 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
271 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
273 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
274 cmh->cmsg_type = SCTP_SNDRCV;
276 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
286 uint32_t gap, i, cumackp1;
288 int in_r = 0, in_nr = 0;
290 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
293 cumackp1 = asoc->cumulative_tsn + 1;
294 if (SCTP_TSN_GT(cumackp1, tsn)) {
296 * this tsn is behind the cum ack and thus we don't need to
297 * worry about it being moved from one to the other.
301 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
302 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 if ((in_r == 0) && (in_nr == 0)) {
306 panic("Things are really messed up now");
308 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
309 sctp_print_mapping_array(asoc);
313 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
315 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
317 asoc->highest_tsn_inside_nr_map = tsn;
319 if (tsn == asoc->highest_tsn_inside_map) {
320 /* We must back down to see what the new highest is */
321 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
322 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
323 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
324 asoc->highest_tsn_inside_map = i;
330 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
336 sctp_place_control_in_stream(struct sctp_stream_in *strm,
337 struct sctp_association *asoc,
338 struct sctp_queued_to_read *control)
340 struct sctp_queued_to_read *at;
341 struct sctp_readhead *q;
342 uint8_t flags, unordered;
344 flags = (control->sinfo_flags >> 8);
345 unordered = flags & SCTP_DATA_UNORDERED;
347 q = &strm->uno_inqueue;
348 if (asoc->idata_supported == 0) {
349 if (!TAILQ_EMPTY(q)) {
351 * Only one stream can be here in old style
356 TAILQ_INSERT_TAIL(q, control, next_instrm);
357 control->on_strm_q = SCTP_ON_UNORDERED;
363 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
364 control->end_added = 1;
365 control->first_frag_seen = 1;
366 control->last_frag_seen = 1;
368 if (TAILQ_EMPTY(q)) {
370 TAILQ_INSERT_HEAD(q, control, next_instrm);
372 control->on_strm_q = SCTP_ON_UNORDERED;
374 control->on_strm_q = SCTP_ON_ORDERED;
378 TAILQ_FOREACH(at, q, next_instrm) {
379 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
381 * one in queue is bigger than the new one,
382 * insert before this one
384 TAILQ_INSERT_BEFORE(at, control, next_instrm);
386 control->on_strm_q = SCTP_ON_UNORDERED;
388 control->on_strm_q = SCTP_ON_ORDERED;
391 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
393 * Gak, He sent me a duplicate msg id
394 * number?? return -1 to abort.
398 if (TAILQ_NEXT(at, next_instrm) == NULL) {
400 * We are at the end, insert it
403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
404 sctp_log_strm_del(control, at,
405 SCTP_STR_LOG_FROM_INSERT_TL);
407 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
409 control->on_strm_q = SCTP_ON_UNORDERED;
411 control->on_strm_q = SCTP_ON_ORDERED;
422 sctp_abort_in_reasm(struct sctp_tcb *stcb,
423 struct sctp_queued_to_read *control,
424 struct sctp_tmit_chunk *chk,
425 int *abort_flag, int opspot)
427 char msg[SCTP_DIAG_INFO_LEN];
430 if (stcb->asoc.idata_supported) {
431 snprintf(msg, sizeof(msg),
432 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
434 control->fsn_included,
437 chk->rec.data.fsn, chk->rec.data.mid);
439 snprintf(msg, sizeof(msg),
440 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
442 control->fsn_included,
446 (uint16_t)chk->rec.data.mid);
448 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
449 sctp_m_freem(chk->data);
451 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
452 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
453 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
458 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
461 * The control could not be placed and must be cleaned.
463 struct sctp_tmit_chunk *chk, *nchk;
465 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
466 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
468 sctp_m_freem(chk->data);
470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
472 sctp_free_a_readq(stcb, control);
476 * Queue the chunk either right into the socket buffer if it is the next one
477 * to go OR put it in the correct place in the delivery queue. If we do
478 * append to the so_buf, keep doing so until we are out of order as
479 * long as the control's entered are non-fragmented.
482 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
483 struct sctp_association *asoc,
484 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
487 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
488 * all the data in one stream this could happen quite rapidly. One
489 * could use the TSN to keep track of things, but this scheme breaks
490 * down in the other type of stream usage that could occur. Send a
491 * single msg to stream 0, send 4Billion messages to stream 1, now
492 * send a message to stream 0. You have a situation where the TSN
493 * has wrapped but not in the stream. Is this worth worrying about
494 * or should we just change our queue sort at the bottom to be by
497 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
498 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
499 * assignment this could happen... and I don't see how this would be
500 * a violation. So for now I am undecided an will leave the sort by
501 * SSN alone. Maybe a hybred approach is the answer
504 struct sctp_queued_to_read *at;
508 struct sctp_stream_in *strm;
509 char msg[SCTP_DIAG_INFO_LEN];
511 strm = &asoc->strmin[control->sinfo_stream];
512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
513 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
515 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
516 /* The incoming sseq is behind where we last delivered? */
517 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
518 strm->last_mid_delivered, control->mid);
520 * throw it in the stream so it gets cleaned up in
521 * association destruction
523 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
524 if (asoc->idata_supported) {
525 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
526 strm->last_mid_delivered, control->sinfo_tsn,
527 control->sinfo_stream, control->mid);
529 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
530 (uint16_t)strm->last_mid_delivered,
532 control->sinfo_stream,
533 (uint16_t)control->mid);
535 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
536 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
537 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
543 asoc->size_on_all_streams += control->length;
544 sctp_ucount_incr(asoc->cnt_on_all_streams);
545 nxt_todel = strm->last_mid_delivered + 1;
546 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
550 so = SCTP_INP_SO(stcb->sctp_ep);
551 atomic_add_int(&stcb->asoc.refcnt, 1);
552 SCTP_TCB_UNLOCK(stcb);
553 SCTP_SOCKET_LOCK(so, 1);
555 atomic_subtract_int(&stcb->asoc.refcnt, 1);
556 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
557 SCTP_SOCKET_UNLOCK(so, 1);
561 /* can be delivered right away? */
562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
563 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
565 /* EY it wont be queued if it could be delivered directly */
567 if (asoc->size_on_all_streams >= control->length) {
568 asoc->size_on_all_streams -= control->length;
571 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
573 asoc->size_on_all_streams = 0;
576 sctp_ucount_decr(asoc->cnt_on_all_streams);
577 strm->last_mid_delivered++;
578 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
579 sctp_add_to_readq(stcb->sctp_ep, stcb,
581 &stcb->sctp_socket->so_rcv, 1,
582 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
583 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
585 nxt_todel = strm->last_mid_delivered + 1;
586 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
587 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
588 if (control->on_strm_q == SCTP_ON_ORDERED) {
589 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
590 if (asoc->size_on_all_streams >= control->length) {
591 asoc->size_on_all_streams -= control->length;
594 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
596 asoc->size_on_all_streams = 0;
599 sctp_ucount_decr(asoc->cnt_on_all_streams);
602 panic("Huh control: %p is on_strm_q: %d",
603 control, control->on_strm_q);
606 control->on_strm_q = 0;
607 strm->last_mid_delivered++;
609 * We ignore the return of deliver_data here
610 * since we always can hold the chunk on the
611 * d-queue. And we have a finite number that
612 * can be delivered from the strq.
614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
615 sctp_log_strm_del(control, NULL,
616 SCTP_STR_LOG_FROM_IMMED_DEL);
618 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
619 sctp_add_to_readq(stcb->sctp_ep, stcb,
621 &stcb->sctp_socket->so_rcv, 1,
622 SCTP_READ_LOCK_NOT_HELD,
625 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
631 SCTP_SOCKET_UNLOCK(so, 1);
636 * Ok, we did not deliver this guy, find the correct place
637 * to put it on the queue.
639 if (sctp_place_control_in_stream(strm, asoc, control)) {
640 snprintf(msg, sizeof(msg),
641 "Queue to str MID: %u duplicate",
643 sctp_clean_up_control(stcb, control);
644 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
645 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
646 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
654 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
656 struct mbuf *m, *prev = NULL;
657 struct sctp_tcb *stcb;
659 stcb = control->stcb;
660 control->held_length = 0;
664 if (SCTP_BUF_LEN(m) == 0) {
665 /* Skip mbufs with NO length */
668 control->data = sctp_m_free(m);
671 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
672 m = SCTP_BUF_NEXT(prev);
675 control->tail_mbuf = prev;
680 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
681 if (control->on_read_q) {
683 * On read queue so we must increment the SB stuff,
684 * we assume caller has done any locks of SB.
686 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
688 m = SCTP_BUF_NEXT(m);
691 control->tail_mbuf = prev;
696 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
698 struct mbuf *prev = NULL;
699 struct sctp_tcb *stcb;
701 stcb = control->stcb;
704 panic("Control broken");
709 if (control->tail_mbuf == NULL) {
712 sctp_setup_tail_pointer(control);
715 control->tail_mbuf->m_next = m;
717 if (SCTP_BUF_LEN(m) == 0) {
718 /* Skip mbufs with NO length */
721 control->tail_mbuf->m_next = sctp_m_free(m);
722 m = control->tail_mbuf->m_next;
724 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
725 m = SCTP_BUF_NEXT(prev);
728 control->tail_mbuf = prev;
733 if (control->on_read_q) {
735 * On read queue so we must increment the SB stuff,
736 * we assume caller has done any locks of SB.
738 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
740 *added += SCTP_BUF_LEN(m);
741 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
742 m = SCTP_BUF_NEXT(m);
745 control->tail_mbuf = prev;
750 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
752 memset(nc, 0, sizeof(struct sctp_queued_to_read));
753 nc->sinfo_stream = control->sinfo_stream;
754 nc->mid = control->mid;
755 TAILQ_INIT(&nc->reasm);
756 nc->top_fsn = control->top_fsn;
757 nc->mid = control->mid;
758 nc->sinfo_flags = control->sinfo_flags;
759 nc->sinfo_ppid = control->sinfo_ppid;
760 nc->sinfo_context = control->sinfo_context;
761 nc->fsn_included = 0xffffffff;
762 nc->sinfo_tsn = control->sinfo_tsn;
763 nc->sinfo_cumtsn = control->sinfo_cumtsn;
764 nc->sinfo_assoc_id = control->sinfo_assoc_id;
765 nc->whoFrom = control->whoFrom;
766 atomic_add_int(&nc->whoFrom->ref_count, 1);
767 nc->stcb = control->stcb;
768 nc->port_from = control->port_from;
772 sctp_reset_a_control(struct sctp_queued_to_read *control,
773 struct sctp_inpcb *inp, uint32_t tsn)
775 control->fsn_included = tsn;
776 if (control->on_read_q) {
778 * We have to purge it from there, hopefully this will work
781 TAILQ_REMOVE(&inp->read_queue, control, next);
782 control->on_read_q = 0;
787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
788 struct sctp_association *asoc,
789 struct sctp_stream_in *strm,
790 struct sctp_queued_to_read *control,
792 int inp_read_lock_held)
795 * Special handling for the old un-ordered data chunk. All the
796 * chunks/TSN's go to mid 0. So we have to do the old style watching
797 * to see if we have it all. If you return one, no other control
798 * entries on the un-ordered queue will be looked at. In theory
799 * there should be no others entries in reality, unless the guy is
800 * sending both unordered NDATA and unordered DATA...
802 struct sctp_tmit_chunk *chk, *lchk, *tchk;
804 struct sctp_queued_to_read *nc;
807 if (control->first_frag_seen == 0) {
808 /* Nothing we can do, we have not seen the first piece yet */
811 /* Collapse any we can */
814 fsn = control->fsn_included + 1;
815 /* Now what can we add? */
816 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
817 if (chk->rec.data.fsn == fsn) {
819 sctp_alloc_a_readq(stcb, nc);
823 memset(nc, 0, sizeof(struct sctp_queued_to_read));
824 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
825 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
829 if (control->end_added) {
831 if (!TAILQ_EMPTY(&control->reasm)) {
833 * Ok we have to move anything left
834 * on the control queue to a new
837 sctp_build_readq_entry_from_ctl(nc, control);
838 tchk = TAILQ_FIRST(&control->reasm);
839 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
840 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
841 if (asoc->size_on_reasm_queue >= tchk->send_size) {
842 asoc->size_on_reasm_queue -= tchk->send_size;
845 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
847 asoc->size_on_reasm_queue = 0;
850 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
851 nc->first_frag_seen = 1;
852 nc->fsn_included = tchk->rec.data.fsn;
853 nc->data = tchk->data;
854 nc->sinfo_ppid = tchk->rec.data.ppid;
855 nc->sinfo_tsn = tchk->rec.data.tsn;
856 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
858 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
859 sctp_setup_tail_pointer(nc);
860 tchk = TAILQ_FIRST(&control->reasm);
862 /* Spin the rest onto the queue */
864 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
865 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
866 tchk = TAILQ_FIRST(&control->reasm);
869 * Now lets add it to the queue
870 * after removing control
872 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
873 nc->on_strm_q = SCTP_ON_UNORDERED;
874 if (control->on_strm_q) {
875 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
876 control->on_strm_q = 0;
879 if (control->pdapi_started) {
880 strm->pd_api_started = 0;
881 control->pdapi_started = 0;
883 if (control->on_strm_q) {
884 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 control->on_strm_q = 0;
886 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
888 if (control->on_read_q == 0) {
889 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
890 &stcb->sctp_socket->so_rcv, control->end_added,
891 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
893 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
894 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
896 * Switch to the new guy and
902 if (nc->on_strm_q == 0) {
903 sctp_free_a_readq(stcb, nc);
908 sctp_free_a_readq(stcb, nc);
915 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
916 strm->pd_api_started = 1;
917 control->pdapi_started = 1;
918 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
919 &stcb->sctp_socket->so_rcv, control->end_added,
920 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
921 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
929 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
930 struct sctp_association *asoc,
931 struct sctp_queued_to_read *control,
932 struct sctp_tmit_chunk *chk,
935 struct sctp_tmit_chunk *at;
939 * Here we need to place the chunk into the control structure sorted
940 * in the correct order.
942 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
943 /* Its the very first one. */
944 SCTPDBG(SCTP_DEBUG_XXX,
945 "chunk is a first fsn: %u becomes fsn_included\n",
947 if (control->first_frag_seen) {
949 * In old un-ordered we can reassembly on one
950 * control multiple messages. As long as the next
951 * FIRST is greater then the old first (TSN i.e. FSN
957 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
959 * Easy way the start of a new guy beyond
964 if ((chk->rec.data.fsn == control->fsn_included) ||
965 (control->pdapi_started)) {
967 * Ok this should not happen, if it does we
968 * started the pd-api on the higher TSN
969 * (since the equals part is a TSN failure
972 * We are completly hosed in that case since
973 * I have no way to recover. This really
974 * will only happen if we can get more TSN's
975 * higher before the pd-api-point.
977 sctp_abort_in_reasm(stcb, control, chk,
979 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
984 * Ok we have two firsts and the one we just got is
985 * smaller than the one we previously placed.. yuck!
986 * We must swap them out.
989 tdata = control->data;
990 control->data = chk->data;
992 /* Save the lengths */
993 chk->send_size = control->length;
994 /* Recompute length of control and tail pointer */
995 sctp_setup_tail_pointer(control);
996 /* Fix the FSN included */
997 tmp = control->fsn_included;
998 control->fsn_included = chk->rec.data.fsn;
999 chk->rec.data.fsn = tmp;
1000 /* Fix the TSN included */
1001 tmp = control->sinfo_tsn;
1002 control->sinfo_tsn = chk->rec.data.tsn;
1003 chk->rec.data.tsn = tmp;
1004 /* Fix the PPID included */
1005 tmp = control->sinfo_ppid;
1006 control->sinfo_ppid = chk->rec.data.ppid;
1007 chk->rec.data.ppid = tmp;
1008 /* Fix tail pointer */
1011 control->first_frag_seen = 1;
1012 control->fsn_included = chk->rec.data.fsn;
1013 control->top_fsn = chk->rec.data.fsn;
1014 control->sinfo_tsn = chk->rec.data.tsn;
1015 control->sinfo_ppid = chk->rec.data.ppid;
1016 control->data = chk->data;
1017 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1019 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1020 sctp_setup_tail_pointer(control);
1025 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1026 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1028 * This one in queue is bigger than the new one,
1029 * insert the new one before at.
1031 asoc->size_on_reasm_queue += chk->send_size;
1032 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1034 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1036 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1038 * They sent a duplicate fsn number. This really
1039 * should not happen since the FSN is a TSN and it
1040 * should have been dropped earlier.
1042 sctp_abort_in_reasm(stcb, control, chk,
1044 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1048 if (inserted == 0) {
1049 /* Its at the end */
1050 asoc->size_on_reasm_queue += chk->send_size;
1051 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1052 control->top_fsn = chk->rec.data.fsn;
1053 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1059 struct sctp_stream_in *strm, int inp_read_lock_held)
1062 * Given a stream, strm, see if any of the SSN's on it that are
1063 * fragmented are ready to deliver. If so go ahead and place them on
1064 * the read queue. In so placing if we have hit the end, then we
1065 * need to remove them from the stream's queue.
1067 struct sctp_queued_to_read *control, *nctl = NULL;
1068 uint32_t next_to_del;
1072 if (stcb->sctp_socket) {
1073 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1074 stcb->sctp_ep->partial_delivery_point);
1076 pd_point = stcb->sctp_ep->partial_delivery_point;
1078 control = TAILQ_FIRST(&strm->uno_inqueue);
1080 if ((control != NULL) &&
1081 (asoc->idata_supported == 0)) {
1082 /* Special handling needed for "old" data format */
1083 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1087 if (strm->pd_api_started) {
1088 /* Can't add more */
1092 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1093 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1094 nctl = TAILQ_NEXT(control, next_instrm);
1095 if (control->end_added) {
1096 /* We just put the last bit on */
1097 if (control->on_strm_q) {
1099 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1100 panic("Huh control: %p on_q: %d -- not unordered?",
1101 control, control->on_strm_q);
1104 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1105 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1106 control->on_strm_q = 0;
1108 if (control->on_read_q == 0) {
1109 sctp_add_to_readq(stcb->sctp_ep, stcb,
1111 &stcb->sctp_socket->so_rcv, control->end_added,
1112 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1115 /* Can we do a PD-API for this un-ordered guy? */
1116 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1117 strm->pd_api_started = 1;
1118 control->pdapi_started = 1;
1119 sctp_add_to_readq(stcb->sctp_ep, stcb,
1121 &stcb->sctp_socket->so_rcv, control->end_added,
1122 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1130 control = TAILQ_FIRST(&strm->inqueue);
1131 if (strm->pd_api_started) {
1132 /* Can't add more */
1135 if (control == NULL) {
1138 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1140 * Ok the guy at the top was being partially delivered
1141 * completed, so we remove it. Note the pd_api flag was
1142 * taken off when the chunk was merged on in
1143 * sctp_queue_data_for_reasm below.
1145 nctl = TAILQ_NEXT(control, next_instrm);
1146 SCTPDBG(SCTP_DEBUG_XXX,
1147 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1148 control, control->end_added, control->mid,
1149 control->top_fsn, control->fsn_included,
1150 strm->last_mid_delivered);
1151 if (control->end_added) {
1152 if (control->on_strm_q) {
1154 if (control->on_strm_q != SCTP_ON_ORDERED) {
1155 panic("Huh control: %p on_q: %d -- not ordered?",
1156 control, control->on_strm_q);
1159 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1160 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1161 if (asoc->size_on_all_streams >= control->length) {
1162 asoc->size_on_all_streams -= control->length;
1165 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1167 asoc->size_on_all_streams = 0;
1170 sctp_ucount_decr(asoc->cnt_on_all_streams);
1171 control->on_strm_q = 0;
1173 if (strm->pd_api_started && control->pdapi_started) {
1174 control->pdapi_started = 0;
1175 strm->pd_api_started = 0;
1177 if (control->on_read_q == 0) {
1178 sctp_add_to_readq(stcb->sctp_ep, stcb,
1180 &stcb->sctp_socket->so_rcv, control->end_added,
1181 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1186 if (strm->pd_api_started) {
1188 * Can't add more must have gotten an un-ordered above being
1189 * partially delivered.
1194 next_to_del = strm->last_mid_delivered + 1;
1196 SCTPDBG(SCTP_DEBUG_XXX,
1197 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1198 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1200 nctl = TAILQ_NEXT(control, next_instrm);
1201 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1202 (control->first_frag_seen)) {
1205 /* Ok we can deliver it onto the stream. */
1206 if (control->end_added) {
1207 /* We are done with it afterwards */
1208 if (control->on_strm_q) {
1210 if (control->on_strm_q != SCTP_ON_ORDERED) {
1211 panic("Huh control: %p on_q: %d -- not ordered?",
1212 control, control->on_strm_q);
1215 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1216 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1217 if (asoc->size_on_all_streams >= control->length) {
1218 asoc->size_on_all_streams -= control->length;
1221 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1223 asoc->size_on_all_streams = 0;
1226 sctp_ucount_decr(asoc->cnt_on_all_streams);
1227 control->on_strm_q = 0;
1231 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1233 * A singleton now slipping through - mark
1234 * it non-revokable too
1236 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1237 } else if (control->end_added == 0) {
1239 * Check if we can defer adding until its
1242 if ((control->length < pd_point) || (strm->pd_api_started)) {
1244 * Don't need it or cannot add more
1245 * (one being delivered that way)
1250 done = (control->end_added) && (control->last_frag_seen);
1251 if (control->on_read_q == 0) {
1253 if (asoc->size_on_all_streams >= control->length) {
1254 asoc->size_on_all_streams -= control->length;
1257 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1259 asoc->size_on_all_streams = 0;
1262 strm->pd_api_started = 1;
1263 control->pdapi_started = 1;
1265 sctp_add_to_readq(stcb->sctp_ep, stcb,
1267 &stcb->sctp_socket->so_rcv, control->end_added,
1268 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1270 strm->last_mid_delivered = next_to_del;
1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1284 struct sctp_stream_in *strm,
1285 struct sctp_tcb *stcb, struct sctp_association *asoc,
1286 struct sctp_tmit_chunk *chk, int hold_rlock)
1289 * Given a control and a chunk, merge the data from the chk onto the
1290 * control and free up the chunk resources.
1295 if (control->on_read_q && (hold_rlock == 0)) {
1297 * Its being pd-api'd so we must do some locks.
1299 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1302 if (control->data == NULL) {
1303 control->data = chk->data;
1304 sctp_setup_tail_pointer(control);
1306 sctp_add_to_tail_pointer(control, chk->data, &added);
1308 control->fsn_included = chk->rec.data.fsn;
1309 asoc->size_on_reasm_queue -= chk->send_size;
1310 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1311 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1313 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1314 control->first_frag_seen = 1;
1315 control->sinfo_tsn = chk->rec.data.tsn;
1316 control->sinfo_ppid = chk->rec.data.ppid;
1318 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1320 if ((control->on_strm_q) && (control->on_read_q)) {
1321 if (control->pdapi_started) {
1322 control->pdapi_started = 0;
1323 strm->pd_api_started = 0;
1325 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1327 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1328 control->on_strm_q = 0;
1329 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1331 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1333 * Don't need to decrement
1334 * size_on_all_streams, since control is on
1337 sctp_ucount_decr(asoc->cnt_on_all_streams);
1338 control->on_strm_q = 0;
1340 } else if (control->on_strm_q) {
1341 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1342 control->on_strm_q);
1346 control->end_added = 1;
1347 control->last_frag_seen = 1;
1350 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1352 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1357 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1358 * queue, see if anthing can be delivered. If so pull it off (or as much as
1359 * we can. If we run out of space then we must dump what we can and set the
1360 * appropriate flag to say we queued what we could.
1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1364 struct sctp_queued_to_read *control,
1365 struct sctp_tmit_chunk *chk,
1366 int created_control,
1367 int *abort_flag, uint32_t tsn)
1370 struct sctp_tmit_chunk *at, *nat;
1371 struct sctp_stream_in *strm;
1372 int do_wakeup, unordered;
1375 strm = &asoc->strmin[control->sinfo_stream];
1377 * For old un-ordered data chunks.
1379 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1384 /* Must be added to the stream-in queue */
1385 if (created_control) {
1386 if (unordered == 0) {
1387 sctp_ucount_incr(asoc->cnt_on_all_streams);
1389 if (sctp_place_control_in_stream(strm, asoc, control)) {
1390 /* Duplicate SSN? */
1391 sctp_abort_in_reasm(stcb, control, chk,
1393 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1394 sctp_clean_up_control(stcb, control);
1397 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1399 * Ok we created this control and now lets validate
1400 * that its legal i.e. there is a B bit set, if not
1401 * and we have up to the cum-ack then its invalid.
1403 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1404 sctp_abort_in_reasm(stcb, control, chk,
1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1411 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1412 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1416 * Ok we must queue the chunk into the reasembly portion: o if its
1417 * the first it goes to the control mbuf. o if its not first but the
1418 * next in sequence it goes to the control, and each succeeding one
1419 * in order also goes. o if its not in order we place it on the list
1422 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1423 /* Its the very first one. */
1424 SCTPDBG(SCTP_DEBUG_XXX,
1425 "chunk is a first fsn: %u becomes fsn_included\n",
1427 if (control->first_frag_seen) {
1429 * Error on senders part, they either sent us two
1430 * data chunks with FIRST, or they sent two
1431 * un-ordered chunks that were fragmented at the
1432 * same time in the same stream.
1434 sctp_abort_in_reasm(stcb, control, chk,
1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1439 control->first_frag_seen = 1;
1440 control->sinfo_ppid = chk->rec.data.ppid;
1441 control->sinfo_tsn = chk->rec.data.tsn;
1442 control->fsn_included = chk->rec.data.fsn;
1443 control->data = chk->data;
1444 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1446 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1447 sctp_setup_tail_pointer(control);
1448 asoc->size_on_all_streams += control->length;
1450 /* Place the chunk in our list */
1453 if (control->last_frag_seen == 0) {
1454 /* Still willing to raise highest FSN seen */
1455 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1456 SCTPDBG(SCTP_DEBUG_XXX,
1457 "We have a new top_fsn: %u\n",
1459 control->top_fsn = chk->rec.data.fsn;
1461 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1462 SCTPDBG(SCTP_DEBUG_XXX,
1463 "The last fsn is now in place fsn: %u\n",
1465 control->last_frag_seen = 1;
1467 if (asoc->idata_supported || control->first_frag_seen) {
1469 * For IDATA we always check since we know
1470 * that the first fragment is 0. For old
1471 * DATA we have to receive the first before
1472 * we know the first FSN (which is the TSN).
1474 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1476 * We have already delivered up to
1479 sctp_abort_in_reasm(stcb, control, chk,
1481 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1486 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 /* Second last? huh? */
1488 SCTPDBG(SCTP_DEBUG_XXX,
1489 "Duplicate last fsn: %u (top: %u) -- abort\n",
1490 chk->rec.data.fsn, control->top_fsn);
1491 sctp_abort_in_reasm(stcb, control,
1493 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1496 if (asoc->idata_supported || control->first_frag_seen) {
1498 * For IDATA we always check since we know
1499 * that the first fragment is 0. For old
1500 * DATA we have to receive the first before
1501 * we know the first FSN (which is the TSN).
1504 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1506 * We have already delivered up to
1509 SCTPDBG(SCTP_DEBUG_XXX,
1510 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1511 chk->rec.data.fsn, control->fsn_included);
1512 sctp_abort_in_reasm(stcb, control, chk,
1514 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1519 * validate not beyond top FSN if we have seen last
1522 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1523 SCTPDBG(SCTP_DEBUG_XXX,
1524 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1527 sctp_abort_in_reasm(stcb, control, chk,
1529 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1534 * If we reach here, we need to place the new chunk in the
1535 * reassembly for this control.
1537 SCTPDBG(SCTP_DEBUG_XXX,
1538 "chunk is a not first fsn: %u needs to be inserted\n",
1540 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1541 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1543 * This one in queue is bigger than the new
1544 * one, insert the new one before at.
1546 SCTPDBG(SCTP_DEBUG_XXX,
1547 "Insert it before fsn: %u\n",
1549 asoc->size_on_reasm_queue += chk->send_size;
1550 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1551 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1554 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1556 * Gak, He sent me a duplicate str seq
1560 * foo bar, I guess I will just free this
1561 * new guy, should we abort too? FIX ME
1562 * MAYBE? Or it COULD be that the SSN's have
1563 * wrapped. Maybe I should compare to TSN
1564 * somehow... sigh for now just blow away
1567 SCTPDBG(SCTP_DEBUG_XXX,
1568 "Duplicate to fsn: %u -- abort\n",
1570 sctp_abort_in_reasm(stcb, control,
1572 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1576 if (inserted == 0) {
1577 /* Goes on the end */
1578 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1580 asoc->size_on_reasm_queue += chk->send_size;
1581 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1582 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1586 * Ok lets see if we can suck any up into the control structure that
1587 * are in seq if it makes sense.
1591 * If the first fragment has not been seen there is no sense in
1594 if (control->first_frag_seen) {
1595 next_fsn = control->fsn_included + 1;
1596 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1597 if (at->rec.data.fsn == next_fsn) {
1598 /* We can add this one now to the control */
1599 SCTPDBG(SCTP_DEBUG_XXX,
1600 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1603 next_fsn, control->fsn_included);
1604 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1605 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1606 if (control->on_read_q) {
1610 * We only add to the
1611 * size-on-all-streams if its not on
1612 * the read q. The read q flag will
1613 * cause a sballoc so its accounted
1616 asoc->size_on_all_streams += lenadded;
1619 if (control->end_added && control->pdapi_started) {
1620 if (strm->pd_api_started) {
1621 strm->pd_api_started = 0;
1622 control->pdapi_started = 0;
1624 if (control->on_read_q == 0) {
1625 sctp_add_to_readq(stcb->sctp_ep, stcb,
1627 &stcb->sctp_socket->so_rcv, control->end_added,
1628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1638 /* Need to wakeup the reader */
1639 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1643 static struct sctp_queued_to_read *
1644 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1646 struct sctp_queued_to_read *control;
1649 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1650 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1655 if (idata_supported) {
1656 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1657 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1662 control = TAILQ_FIRST(&strm->uno_inqueue);
1669 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1670 struct mbuf **m, int offset, int chk_length,
1671 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1672 int *break_flag, int last_chunk, uint8_t chk_type)
1674 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1675 uint32_t tsn, fsn, gap, mid;
1678 int need_reasm_check = 0;
1680 struct mbuf *op_err;
1681 char msg[SCTP_DIAG_INFO_LEN];
1682 struct sctp_queued_to_read *control, *ncontrol;
1685 struct sctp_stream_reset_list *liste;
1688 int created_control = 0;
1690 if (chk_type == SCTP_IDATA) {
1691 struct sctp_idata_chunk *chunk, chunk_buf;
1693 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1694 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1695 chk_flags = chunk->ch.chunk_flags;
1696 clen = sizeof(struct sctp_idata_chunk);
1697 tsn = ntohl(chunk->dp.tsn);
1698 sid = ntohs(chunk->dp.sid);
1699 mid = ntohl(chunk->dp.mid);
1700 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1702 ppid = chunk->dp.ppid_fsn.ppid;
1704 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1705 ppid = 0xffffffff; /* Use as an invalid value. */
1708 struct sctp_data_chunk *chunk, chunk_buf;
1710 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1711 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1712 chk_flags = chunk->ch.chunk_flags;
1713 clen = sizeof(struct sctp_data_chunk);
1714 tsn = ntohl(chunk->dp.tsn);
1715 sid = ntohs(chunk->dp.sid);
1716 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1718 ppid = chunk->dp.ppid;
1720 if ((size_t)chk_length == clen) {
1722 * Need to send an abort since we had a empty data chunk.
1724 op_err = sctp_generate_no_user_data_cause(tsn);
1725 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1726 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1730 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1731 asoc->send_sack = 1;
1733 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1735 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1740 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1741 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1742 /* It is a duplicate */
1743 SCTP_STAT_INCR(sctps_recvdupdata);
1744 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1745 /* Record a dup for the next outbound sack */
1746 asoc->dup_tsns[asoc->numduptsns] = tsn;
1749 asoc->send_sack = 1;
1752 /* Calculate the number of TSN's between the base and this TSN */
1753 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1754 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1755 /* Can't hold the bit in the mapping at max array, toss it */
1758 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1759 SCTP_TCB_LOCK_ASSERT(stcb);
1760 if (sctp_expand_mapping_array(asoc, gap)) {
1761 /* Can't expand, drop it */
1765 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1768 /* See if we have received this one already */
1769 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1770 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1771 SCTP_STAT_INCR(sctps_recvdupdata);
1772 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1773 /* Record a dup for the next outbound sack */
1774 asoc->dup_tsns[asoc->numduptsns] = tsn;
1777 asoc->send_sack = 1;
1781 * Check to see about the GONE flag, duplicates would cause a sack
1782 * to be sent up above
1784 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1785 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1786 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1788 * wait a minute, this guy is gone, there is no longer a
1789 * receiver. Send peer an ABORT!
1791 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1792 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1797 * Now before going further we see if there is room. If NOT then we
1798 * MAY let one through only IF this TSN is the one we are waiting
1799 * for on a partial delivery API.
1802 /* Is the stream valid? */
1803 if (sid >= asoc->streamincnt) {
1804 struct sctp_error_invalid_stream *cause;
1806 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1807 0, M_NOWAIT, 1, MT_DATA);
1808 if (op_err != NULL) {
1809 /* add some space up front so prepend will work well */
1810 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1811 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1813 * Error causes are just param's and this one has
1814 * two back to back phdr, one with the error type
1815 * and size, the other with the streamid and a rsvd
1817 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1818 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1819 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1820 cause->stream_id = htons(sid);
1821 cause->reserved = htons(0);
1822 sctp_queue_op_err(stcb, op_err);
1824 SCTP_STAT_INCR(sctps_badsid);
1825 SCTP_TCB_LOCK_ASSERT(stcb);
1826 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1827 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1828 asoc->highest_tsn_inside_nr_map = tsn;
1830 if (tsn == (asoc->cumulative_tsn + 1)) {
1831 /* Update cum-ack */
1832 asoc->cumulative_tsn = tsn;
1837 * If its a fragmented message, lets see if we can find the control
1838 * on the reassembly queues.
1840 if ((chk_type == SCTP_IDATA) &&
1841 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1844 * The first *must* be fsn 0, and other (middle/end) pieces
1845 * can *not* be fsn 0. XXX: This can happen in case of a
1846 * wrap around. Ignore is for now.
1848 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1852 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1853 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1854 chk_flags, control);
1855 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1856 /* See if we can find the re-assembly entity */
1857 if (control != NULL) {
1858 /* We found something, does it belong? */
1859 if (ordered && (mid != control->mid)) {
1860 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1862 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1863 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1864 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1868 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1870 * We can't have a switched order with an
1873 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1877 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1879 * We can't have a switched unordered with a
1882 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1889 * Its a complete segment. Lets validate we don't have a
1890 * re-assembly going on with the same Stream/Seq (for
1891 * ordered) or in the same Stream for unordered.
1893 if (control != NULL) {
1894 if (ordered || asoc->idata_supported) {
1895 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1897 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1900 if ((tsn == control->fsn_included + 1) &&
1901 (control->end_added == 0)) {
1902 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1910 /* now do the tests */
1911 if (((asoc->cnt_on_all_streams +
1912 asoc->cnt_on_reasm_queue +
1913 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1914 (((int)asoc->my_rwnd) <= 0)) {
1916 * When we have NO room in the rwnd we check to make sure
1917 * the reader is doing its job...
1919 if (stcb->sctp_socket->so_rcv.sb_cc) {
1920 /* some to read, wake-up */
1921 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1924 so = SCTP_INP_SO(stcb->sctp_ep);
1925 atomic_add_int(&stcb->asoc.refcnt, 1);
1926 SCTP_TCB_UNLOCK(stcb);
1927 SCTP_SOCKET_LOCK(so, 1);
1928 SCTP_TCB_LOCK(stcb);
1929 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1930 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1931 /* assoc was freed while we were unlocked */
1932 SCTP_SOCKET_UNLOCK(so, 1);
1936 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1937 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1938 SCTP_SOCKET_UNLOCK(so, 1);
1941 /* now is it in the mapping array of what we have accepted? */
1942 if (chk_type == SCTP_DATA) {
1943 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1944 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1945 /* Nope not in the valid range dump it */
1947 sctp_set_rwnd(stcb, asoc);
1948 if ((asoc->cnt_on_all_streams +
1949 asoc->cnt_on_reasm_queue +
1950 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1951 SCTP_STAT_INCR(sctps_datadropchklmt);
1953 SCTP_STAT_INCR(sctps_datadroprwnd);
1959 if (control == NULL) {
1962 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1967 #ifdef SCTP_ASOCLOG_OF_TSNS
1968 SCTP_TCB_LOCK_ASSERT(stcb);
1969 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1970 asoc->tsn_in_at = 0;
1971 asoc->tsn_in_wrapped = 1;
1973 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1974 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1975 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1976 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1977 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1978 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1979 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1980 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1984 * Before we continue lets validate that we are not being fooled by
1985 * an evil attacker. We can only have Nk chunks based on our TSN
1986 * spread allowed by the mapping array N * 8 bits, so there is no
1987 * way our stream sequence numbers could have wrapped. We of course
1988 * only validate the FIRST fragment so the bit must be set.
1990 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1991 (TAILQ_EMPTY(&asoc->resetHead)) &&
1992 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1993 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1994 /* The incoming sseq is behind where we last delivered? */
1995 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1996 mid, asoc->strmin[sid].last_mid_delivered);
1998 if (asoc->idata_supported) {
1999 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2000 asoc->strmin[sid].last_mid_delivered,
2005 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2006 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2011 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2012 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2013 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2017 if (chk_type == SCTP_IDATA) {
2018 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2020 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2022 if (last_chunk == 0) {
2023 if (chk_type == SCTP_IDATA) {
2024 dmbuf = SCTP_M_COPYM(*m,
2025 (offset + sizeof(struct sctp_idata_chunk)),
2028 dmbuf = SCTP_M_COPYM(*m,
2029 (offset + sizeof(struct sctp_data_chunk)),
2032 #ifdef SCTP_MBUF_LOGGING
2033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2034 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2038 /* We can steal the last chunk */
2042 /* lop off the top part */
2043 if (chk_type == SCTP_IDATA) {
2044 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2046 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2048 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2049 l_len = SCTP_BUF_LEN(dmbuf);
2052 * need to count up the size hopefully does not hit
2058 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2059 l_len += SCTP_BUF_LEN(lat);
2062 if (l_len > the_len) {
2063 /* Trim the end round bytes off too */
2064 m_adj(dmbuf, -(l_len - the_len));
2067 if (dmbuf == NULL) {
2068 SCTP_STAT_INCR(sctps_nomem);
2072 * Now no matter what, we need a control, get one if we don't have
2073 * one (we may have gotten it above when we found the message was
2076 if (control == NULL) {
2077 sctp_alloc_a_readq(stcb, control);
2078 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2083 if (control == NULL) {
2084 SCTP_STAT_INCR(sctps_nomem);
2087 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2090 control->data = dmbuf;
2091 for (mm = control->data; mm; mm = mm->m_next) {
2092 control->length += SCTP_BUF_LEN(mm);
2094 control->tail_mbuf = NULL;
2095 control->end_added = 1;
2096 control->last_frag_seen = 1;
2097 control->first_frag_seen = 1;
2098 control->fsn_included = fsn;
2099 control->top_fsn = fsn;
2101 created_control = 1;
2103 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2104 chk_flags, ordered, mid, control);
2105 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2106 TAILQ_EMPTY(&asoc->resetHead) &&
2108 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2109 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2110 /* Candidate for express delivery */
2112 * Its not fragmented, No PD-API is up, Nothing in the
2113 * delivery queue, Its un-ordered OR ordered and the next to
2114 * deliver AND nothing else is stuck on the stream queue,
2115 * And there is room for it in the socket buffer. Lets just
2116 * stuff it up the buffer....
2118 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2119 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2120 asoc->highest_tsn_inside_nr_map = tsn;
2122 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2125 sctp_add_to_readq(stcb->sctp_ep, stcb,
2126 control, &stcb->sctp_socket->so_rcv,
2127 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2129 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2130 /* for ordered, bump what we delivered */
2131 asoc->strmin[sid].last_mid_delivered++;
2133 SCTP_STAT_INCR(sctps_recvexpress);
2134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2135 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2136 SCTP_STR_LOG_FROM_EXPRS_DEL);
2139 goto finish_express_del;
2141 /* Now will we need a chunk too? */
2142 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2143 sctp_alloc_a_chunk(stcb, chk);
2145 /* No memory so we drop the chunk */
2146 SCTP_STAT_INCR(sctps_nomem);
2147 if (last_chunk == 0) {
2148 /* we copied it, free the copy */
2149 sctp_m_freem(dmbuf);
2153 chk->rec.data.tsn = tsn;
2154 chk->no_fr_allowed = 0;
2155 chk->rec.data.fsn = fsn;
2156 chk->rec.data.mid = mid;
2157 chk->rec.data.sid = sid;
2158 chk->rec.data.ppid = ppid;
2159 chk->rec.data.context = stcb->asoc.context;
2160 chk->rec.data.doing_fast_retransmit = 0;
2161 chk->rec.data.rcv_flags = chk_flags;
2163 chk->send_size = the_len;
2165 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2168 atomic_add_int(&net->ref_count, 1);
2171 /* Set the appropriate TSN mark */
2172 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2173 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2174 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2175 asoc->highest_tsn_inside_nr_map = tsn;
2178 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2179 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2180 asoc->highest_tsn_inside_map = tsn;
2183 /* Now is it complete (i.e. not fragmented)? */
2184 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2186 * Special check for when streams are resetting. We could be
2187 * more smart about this and check the actual stream to see
2188 * if it is not being reset.. that way we would not create a
2189 * HOLB when amongst streams being reset and those not being
2193 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2194 SCTP_TSN_GT(tsn, liste->tsn)) {
2196 * yep its past where we need to reset... go ahead
2199 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2201 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2203 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2204 unsigned char inserted = 0;
2206 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2207 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2212 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2217 if (inserted == 0) {
2219 * must be put at end, use prevP
2220 * (all setup from loop) to setup
2223 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2226 goto finish_express_del;
2228 if (chk_flags & SCTP_DATA_UNORDERED) {
2229 /* queue directly into socket buffer */
2230 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2232 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2233 sctp_add_to_readq(stcb->sctp_ep, stcb,
2235 &stcb->sctp_socket->so_rcv, 1,
2236 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2239 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2241 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2249 goto finish_express_del;
2251 /* If we reach here its a reassembly */
2252 need_reasm_check = 1;
2253 SCTPDBG(SCTP_DEBUG_XXX,
2254 "Queue data to stream for reasm control: %p MID: %u\n",
2256 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2259 * the assoc is now gone and chk was put onto the reasm
2260 * queue, which has all been freed.
2268 /* Here we tidy up things */
2269 if (tsn == (asoc->cumulative_tsn + 1)) {
2270 /* Update cum-ack */
2271 asoc->cumulative_tsn = tsn;
2277 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2279 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2281 SCTP_STAT_INCR(sctps_recvdata);
2282 /* Set it present please */
2283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2284 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2287 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2288 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2290 if (need_reasm_check) {
2291 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2292 need_reasm_check = 0;
2294 /* check the special flag for stream resets */
2295 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2296 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2298 * we have finished working through the backlogged TSN's now
2299 * time to reset streams. 1: call reset function. 2: free
2300 * pending_reply space 3: distribute any chunks in
2301 * pending_reply_queue.
2303 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2304 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2305 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2306 SCTP_FREE(liste, SCTP_M_STRESET);
2307 /* sa_ignore FREED_MEMORY */
2308 liste = TAILQ_FIRST(&asoc->resetHead);
2309 if (TAILQ_EMPTY(&asoc->resetHead)) {
2310 /* All can be removed */
2311 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2312 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2313 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2317 if (need_reasm_check) {
2318 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2319 need_reasm_check = 0;
2323 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2324 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2328 * if control->sinfo_tsn is <= liste->tsn we
2329 * can process it which is the NOT of
2330 * control->sinfo_tsn > liste->tsn
2332 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2333 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2337 if (need_reasm_check) {
2338 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2339 need_reasm_check = 0;
2347 static const int8_t sctp_map_lookup_tab[256] = {
2348 0, 1, 0, 2, 0, 1, 0, 3,
2349 0, 1, 0, 2, 0, 1, 0, 4,
2350 0, 1, 0, 2, 0, 1, 0, 3,
2351 0, 1, 0, 2, 0, 1, 0, 5,
2352 0, 1, 0, 2, 0, 1, 0, 3,
2353 0, 1, 0, 2, 0, 1, 0, 4,
2354 0, 1, 0, 2, 0, 1, 0, 3,
2355 0, 1, 0, 2, 0, 1, 0, 6,
2356 0, 1, 0, 2, 0, 1, 0, 3,
2357 0, 1, 0, 2, 0, 1, 0, 4,
2358 0, 1, 0, 2, 0, 1, 0, 3,
2359 0, 1, 0, 2, 0, 1, 0, 5,
2360 0, 1, 0, 2, 0, 1, 0, 3,
2361 0, 1, 0, 2, 0, 1, 0, 4,
2362 0, 1, 0, 2, 0, 1, 0, 3,
2363 0, 1, 0, 2, 0, 1, 0, 7,
2364 0, 1, 0, 2, 0, 1, 0, 3,
2365 0, 1, 0, 2, 0, 1, 0, 4,
2366 0, 1, 0, 2, 0, 1, 0, 3,
2367 0, 1, 0, 2, 0, 1, 0, 5,
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 4,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 6,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 4,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 5,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 4,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 8
2384 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2387 * Now we also need to check the mapping array in a couple of ways.
2388 * 1) Did we move the cum-ack point?
2390 * When you first glance at this you might think that all entries
2391 * that make up the position of the cum-ack would be in the
2392 * nr-mapping array only.. i.e. things up to the cum-ack are always
2393 * deliverable. Thats true with one exception, when its a fragmented
2394 * message we may not deliver the data until some threshold (or all
2395 * of it) is in place. So we must OR the nr_mapping_array and
2396 * mapping_array to get a true picture of the cum-ack.
2398 struct sctp_association *asoc;
2401 int slide_from, slide_end, lgap, distance;
2402 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2406 old_cumack = asoc->cumulative_tsn;
2407 old_base = asoc->mapping_array_base_tsn;
2408 old_highest = asoc->highest_tsn_inside_map;
2410 * We could probably improve this a small bit by calculating the
2411 * offset of the current cum-ack as the starting point.
2414 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2415 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2419 /* there is a 0 bit */
2420 at += sctp_map_lookup_tab[val];
2424 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2426 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2427 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2429 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2430 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2432 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2433 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2434 sctp_print_mapping_array(asoc);
2435 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2436 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2438 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2439 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2442 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2443 highest_tsn = asoc->highest_tsn_inside_nr_map;
2445 highest_tsn = asoc->highest_tsn_inside_map;
2447 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2448 /* The complete array was completed by a single FR */
2449 /* highest becomes the cum-ack */
2455 /* clear the array */
2456 clr = ((at + 7) >> 3);
2457 if (clr > asoc->mapping_array_size) {
2458 clr = asoc->mapping_array_size;
2460 memset(asoc->mapping_array, 0, clr);
2461 memset(asoc->nr_mapping_array, 0, clr);
2463 for (i = 0; i < asoc->mapping_array_size; i++) {
2464 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2465 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2466 sctp_print_mapping_array(asoc);
2470 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2471 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2472 } else if (at >= 8) {
2473 /* we can slide the mapping array down */
2474 /* slide_from holds where we hit the first NON 0xff byte */
2477 * now calculate the ceiling of the move using our highest
2480 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2481 slide_end = (lgap >> 3);
2482 if (slide_end < slide_from) {
2483 sctp_print_mapping_array(asoc);
2485 panic("impossible slide");
2487 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2488 lgap, slide_end, slide_from, at);
2492 if (slide_end > asoc->mapping_array_size) {
2494 panic("would overrun buffer");
2496 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2497 asoc->mapping_array_size, slide_end);
2498 slide_end = asoc->mapping_array_size;
2501 distance = (slide_end - slide_from) + 1;
2502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2503 sctp_log_map(old_base, old_cumack, old_highest,
2504 SCTP_MAP_PREPARE_SLIDE);
2505 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2506 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2508 if (distance + slide_from > asoc->mapping_array_size ||
2511 * Here we do NOT slide forward the array so that
2512 * hopefully when more data comes in to fill it up
2513 * we will be able to slide it forward. Really I
2514 * don't think this should happen :-0
2517 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2518 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2519 (uint32_t)asoc->mapping_array_size,
2520 SCTP_MAP_SLIDE_NONE);
2525 for (ii = 0; ii < distance; ii++) {
2526 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2527 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2530 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2531 asoc->mapping_array[ii] = 0;
2532 asoc->nr_mapping_array[ii] = 0;
2534 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2535 asoc->highest_tsn_inside_map += (slide_from << 3);
2537 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2538 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2540 asoc->mapping_array_base_tsn += (slide_from << 3);
2541 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2542 sctp_log_map(asoc->mapping_array_base_tsn,
2543 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2544 SCTP_MAP_SLIDE_RESULT);
2551 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2553 struct sctp_association *asoc;
2554 uint32_t highest_tsn;
2557 sctp_slide_mapping_arrays(stcb);
2559 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2560 highest_tsn = asoc->highest_tsn_inside_nr_map;
2562 highest_tsn = asoc->highest_tsn_inside_map;
2564 /* Is there a gap now? */
2565 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2568 * Now we need to see if we need to queue a sack or just start the
2569 * timer (if allowed).
2571 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2573 * Ok special case, in SHUTDOWN-SENT case. here we maker
2574 * sure SACK timer is off and instead send a SHUTDOWN and a
2577 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2578 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2579 stcb->sctp_ep, stcb, NULL,
2580 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2582 sctp_send_shutdown(stcb,
2583 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2585 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2589 * CMT DAC algorithm: increase number of packets received
2592 stcb->asoc.cmt_dac_pkts_rcvd++;
2594 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2596 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2598 (stcb->asoc.numduptsns) || /* we have dup's */
2599 (is_a_gap) || /* is still a gap */
2600 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2601 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2604 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2605 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2606 (stcb->asoc.send_sack == 0) &&
2607 (stcb->asoc.numduptsns == 0) &&
2608 (stcb->asoc.delayed_ack) &&
2609 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2612 * CMT DAC algorithm: With CMT, delay acks
2613 * even in the face of
2615 * reordering. Therefore, if acks that do
2616 * not have to be sent because of the above
2617 * reasons, will be delayed. That is, acks
2618 * that would have been sent due to gap
2619 * reports will be delayed with DAC. Start
2620 * the delayed ack timer.
2622 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2623 stcb->sctp_ep, stcb, NULL);
2626 * Ok we must build a SACK since the timer
2627 * is pending, we got our first packet OR
2628 * there are gaps or duplicates.
2630 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2631 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2634 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2635 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2636 stcb->sctp_ep, stcb, NULL);
2643 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2644 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2645 struct sctp_nets *net, uint32_t *high_tsn)
2647 struct sctp_chunkhdr *ch, chunk_buf;
2648 struct sctp_association *asoc;
2649 int num_chunks = 0; /* number of control chunks processed */
2651 int break_flag, last_chunk;
2652 int abort_flag = 0, was_a_gap;
2654 uint32_t highest_tsn;
2655 uint16_t chk_length;
2658 sctp_set_rwnd(stcb, &stcb->asoc);
2661 SCTP_TCB_LOCK_ASSERT(stcb);
2663 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2664 highest_tsn = asoc->highest_tsn_inside_nr_map;
2666 highest_tsn = asoc->highest_tsn_inside_map;
2668 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2670 * setup where we got the last DATA packet from for any SACK that
2671 * may need to go out. Don't bump the net. This is done ONLY when a
2672 * chunk is assigned.
2674 asoc->last_data_chunk_from = net;
2677 * Now before we proceed we must figure out if this is a wasted
2678 * cluster... i.e. it is a small packet sent in and yet the driver
2679 * underneath allocated a full cluster for it. If so we must copy it
2680 * to a smaller mbuf and free up the cluster mbuf. This will help
2681 * with cluster starvation. Note for __Panda__ we don't do this
2682 * since it has clusters all the way down to 64 bytes.
2684 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2685 /* we only handle mbufs that are singletons.. not chains */
2686 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2688 /* ok lets see if we can copy the data up */
2691 /* get the pointers and copy */
2692 to = mtod(m, caddr_t *);
2693 from = mtod((*mm), caddr_t *);
2694 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2695 /* copy the length and free up the old */
2696 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2698 /* success, back copy */
2701 /* We are in trouble in the mbuf world .. yikes */
2705 /* get pointer to the first chunk header */
2706 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2707 sizeof(struct sctp_chunkhdr),
2708 (uint8_t *)&chunk_buf);
2713 * process all DATA chunks...
2715 *high_tsn = asoc->cumulative_tsn;
2717 asoc->data_pkts_seen++;
2718 while (stop_proc == 0) {
2719 /* validate chunk length */
2720 chk_length = ntohs(ch->chunk_length);
2721 if (length - *offset < chk_length) {
2722 /* all done, mutulated chunk */
2726 if ((asoc->idata_supported == 1) &&
2727 (ch->chunk_type == SCTP_DATA)) {
2728 struct mbuf *op_err;
2729 char msg[SCTP_DIAG_INFO_LEN];
2731 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2732 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2733 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2734 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2737 if ((asoc->idata_supported == 0) &&
2738 (ch->chunk_type == SCTP_IDATA)) {
2739 struct mbuf *op_err;
2740 char msg[SCTP_DIAG_INFO_LEN];
2742 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2743 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2744 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2745 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2748 if ((ch->chunk_type == SCTP_DATA) ||
2749 (ch->chunk_type == SCTP_IDATA)) {
2752 if (ch->chunk_type == SCTP_DATA) {
2753 clen = sizeof(struct sctp_data_chunk);
2755 clen = sizeof(struct sctp_idata_chunk);
2757 if (chk_length < clen) {
2759 * Need to send an abort since we had a
2760 * invalid data chunk.
2762 struct mbuf *op_err;
2763 char msg[SCTP_DIAG_INFO_LEN];
2765 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2766 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2768 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2769 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2770 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2773 #ifdef SCTP_AUDITING_ENABLED
2774 sctp_audit_log(0xB1, 0);
2776 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2781 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2782 chk_length, net, high_tsn, &abort_flag, &break_flag,
2783 last_chunk, ch->chunk_type)) {
2791 * Set because of out of rwnd space and no
2792 * drop rep space left.
2798 /* not a data chunk in the data region */
2799 switch (ch->chunk_type) {
2800 case SCTP_INITIATION:
2801 case SCTP_INITIATION_ACK:
2802 case SCTP_SELECTIVE_ACK:
2803 case SCTP_NR_SELECTIVE_ACK:
2804 case SCTP_HEARTBEAT_REQUEST:
2805 case SCTP_HEARTBEAT_ACK:
2806 case SCTP_ABORT_ASSOCIATION:
2808 case SCTP_SHUTDOWN_ACK:
2809 case SCTP_OPERATION_ERROR:
2810 case SCTP_COOKIE_ECHO:
2811 case SCTP_COOKIE_ACK:
2814 case SCTP_SHUTDOWN_COMPLETE:
2815 case SCTP_AUTHENTICATION:
2816 case SCTP_ASCONF_ACK:
2817 case SCTP_PACKET_DROPPED:
2818 case SCTP_STREAM_RESET:
2819 case SCTP_FORWARD_CUM_TSN:
2823 * Now, what do we do with KNOWN
2824 * chunks that are NOT in the right
2827 * For now, I do nothing but ignore
2828 * them. We may later want to add
2829 * sysctl stuff to switch out and do
2830 * either an ABORT() or possibly
2833 struct mbuf *op_err;
2834 char msg[SCTP_DIAG_INFO_LEN];
2836 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2838 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2839 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2844 * Unknown chunk type: use bit rules after
2847 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2849 * Need to send an abort since we
2850 * had a invalid chunk.
2852 struct mbuf *op_err;
2853 char msg[SCTP_DIAG_INFO_LEN];
2855 snprintf(msg, sizeof(msg), "Chunk of length %u",
2857 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2858 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2859 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2862 if (ch->chunk_type & 0x40) {
2863 /* Add a error report to the queue */
2864 struct mbuf *op_err;
2865 struct sctp_gen_error_cause *cause;
2867 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2868 0, M_NOWAIT, 1, MT_DATA);
2869 if (op_err != NULL) {
2870 cause = mtod(op_err, struct sctp_gen_error_cause *);
2871 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2872 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2873 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2874 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2875 if (SCTP_BUF_NEXT(op_err) != NULL) {
2876 sctp_queue_op_err(stcb, op_err);
2878 sctp_m_freem(op_err);
2882 if ((ch->chunk_type & 0x80) == 0) {
2883 /* discard the rest of this packet */
2885 } /* else skip this bad chunk and
2888 } /* switch of chunk type */
2890 *offset += SCTP_SIZE32(chk_length);
2891 if ((*offset >= length) || stop_proc) {
2892 /* no more data left in the mbuf chain */
2896 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2897 sizeof(struct sctp_chunkhdr),
2898 (uint8_t *)&chunk_buf);
2907 * we need to report rwnd overrun drops.
2909 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2913 * Did we get data, if so update the time for auto-close and
2914 * give peer credit for being alive.
2916 SCTP_STAT_INCR(sctps_recvpktwithdata);
2917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2918 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2919 stcb->asoc.overall_error_count,
2921 SCTP_FROM_SCTP_INDATA,
2924 stcb->asoc.overall_error_count = 0;
2925 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2927 /* now service all of the reassm queue if needed */
2928 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2929 /* Assure that we ack right away */
2930 stcb->asoc.send_sack = 1;
2932 /* Start a sack timer or QUEUE a SACK for sending */
2933 sctp_sack_check(stcb, was_a_gap);
2938 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2939 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2941 uint32_t *biggest_newly_acked_tsn,
2942 uint32_t *this_sack_lowest_newack,
2945 struct sctp_tmit_chunk *tp1;
2946 unsigned int theTSN;
2947 int j, wake_him = 0, circled = 0;
2949 /* Recover the tp1 we last saw */
2952 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2954 for (j = frag_strt; j <= frag_end; j++) {
2955 theTSN = j + last_tsn;
2957 if (tp1->rec.data.doing_fast_retransmit)
2961 * CMT: CUCv2 algorithm. For each TSN being
2962 * processed from the sent queue, track the
2963 * next expected pseudo-cumack, or
2964 * rtx_pseudo_cumack, if required. Separate
2965 * cumack trackers for first transmissions,
2966 * and retransmissions.
2968 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2969 (tp1->whoTo->find_pseudo_cumack == 1) &&
2970 (tp1->snd_count == 1)) {
2971 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2972 tp1->whoTo->find_pseudo_cumack = 0;
2974 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2975 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2976 (tp1->snd_count > 1)) {
2977 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2978 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2980 if (tp1->rec.data.tsn == theTSN) {
2981 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2983 * must be held until
2986 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2988 * If it is less than RESEND, it is
2989 * now no-longer in flight.
2990 * Higher values may already be set
2991 * via previous Gap Ack Blocks...
2992 * i.e. ACKED or RESEND.
2994 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2995 *biggest_newly_acked_tsn)) {
2996 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
2999 * CMT: SFR algo (and HTNA) - set
3000 * saw_newack to 1 for dest being
3001 * newly acked. update
3002 * this_sack_highest_newack if
3005 if (tp1->rec.data.chunk_was_revoked == 0)
3006 tp1->whoTo->saw_newack = 1;
3008 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3009 tp1->whoTo->this_sack_highest_newack)) {
3010 tp1->whoTo->this_sack_highest_newack =
3014 * CMT DAC algo: also update
3015 * this_sack_lowest_newack
3017 if (*this_sack_lowest_newack == 0) {
3018 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3019 sctp_log_sack(*this_sack_lowest_newack,
3024 SCTP_LOG_TSN_ACKED);
3026 *this_sack_lowest_newack = tp1->rec.data.tsn;
3029 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3030 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3031 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3032 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3033 * Separate pseudo_cumack trackers for first transmissions and
3036 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3037 if (tp1->rec.data.chunk_was_revoked == 0) {
3038 tp1->whoTo->new_pseudo_cumack = 1;
3040 tp1->whoTo->find_pseudo_cumack = 1;
3042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3043 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3045 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3046 if (tp1->rec.data.chunk_was_revoked == 0) {
3047 tp1->whoTo->new_pseudo_cumack = 1;
3049 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3052 sctp_log_sack(*biggest_newly_acked_tsn,
3057 SCTP_LOG_TSN_ACKED);
3059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3060 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3061 tp1->whoTo->flight_size,
3063 (uint32_t)(uintptr_t)tp1->whoTo,
3066 sctp_flight_size_decrease(tp1);
3067 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3068 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3071 sctp_total_flight_decrease(stcb, tp1);
3073 tp1->whoTo->net_ack += tp1->send_size;
3074 if (tp1->snd_count < 2) {
3076 * True non-retransmited chunk
3078 tp1->whoTo->net_ack2 += tp1->send_size;
3086 sctp_calculate_rto(stcb,
3089 &tp1->sent_rcv_time,
3090 SCTP_RTT_FROM_DATA);
3093 if (tp1->whoTo->rto_needed == 0) {
3094 tp1->whoTo->rto_needed = 1;
3100 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3101 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3102 stcb->asoc.this_sack_highest_gap)) {
3103 stcb->asoc.this_sack_highest_gap =
3106 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3107 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3108 #ifdef SCTP_AUDITING_ENABLED
3109 sctp_audit_log(0xB2,
3110 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3115 * All chunks NOT UNSENT fall through here and are marked
3116 * (leave PR-SCTP ones that are to skip alone though)
3118 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3119 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3120 tp1->sent = SCTP_DATAGRAM_MARKED;
3122 if (tp1->rec.data.chunk_was_revoked) {
3123 /* deflate the cwnd */
3124 tp1->whoTo->cwnd -= tp1->book_size;
3125 tp1->rec.data.chunk_was_revoked = 0;
3127 /* NR Sack code here */
3129 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3130 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3131 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3134 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3137 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3138 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3139 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3140 stcb->asoc.trigger_reset = 1;
3142 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3148 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3149 sctp_m_freem(tp1->data);
3156 } /* if (tp1->tsn == theTSN) */
3157 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3160 tp1 = TAILQ_NEXT(tp1, sctp_next);
3161 if ((tp1 == NULL) && (circled == 0)) {
3163 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3165 } /* end while (tp1) */
3168 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3170 /* In case the fragments were not in order we must reset */
3171 } /* end for (j = fragStart */
3173 return (wake_him); /* Return value only used for nr-sack */
3178 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3179 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3180 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3181 int num_seg, int num_nr_seg, int *rto_ok)
3183 struct sctp_gap_ack_block *frag, block;
3184 struct sctp_tmit_chunk *tp1;
3189 uint16_t frag_strt, frag_end, prev_frag_end;
3191 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3195 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3198 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3200 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3201 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3202 *offset += sizeof(block);
3204 return (chunk_freed);
3206 frag_strt = ntohs(frag->start);
3207 frag_end = ntohs(frag->end);
3209 if (frag_strt > frag_end) {
3210 /* This gap report is malformed, skip it. */
3213 if (frag_strt <= prev_frag_end) {
3214 /* This gap report is not in order, so restart. */
3215 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3217 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3218 *biggest_tsn_acked = last_tsn + frag_end;
3225 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3226 non_revocable, &num_frs, biggest_newly_acked_tsn,
3227 this_sack_lowest_newack, rto_ok)) {
3230 prev_frag_end = frag_end;
3232 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3234 sctp_log_fr(*biggest_tsn_acked,
3235 *biggest_newly_acked_tsn,
3236 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3238 return (chunk_freed);
3242 sctp_check_for_revoked(struct sctp_tcb *stcb,
3243 struct sctp_association *asoc, uint32_t cumack,
3244 uint32_t biggest_tsn_acked)
3246 struct sctp_tmit_chunk *tp1;
3248 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3249 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3251 * ok this guy is either ACK or MARKED. If it is
3252 * ACKED it has been previously acked but not this
3253 * time i.e. revoked. If it is MARKED it was ACK'ed
3256 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3259 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3260 /* it has been revoked */
3261 tp1->sent = SCTP_DATAGRAM_SENT;
3262 tp1->rec.data.chunk_was_revoked = 1;
3264 * We must add this stuff back in to assure
3265 * timers and such get started.
3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3268 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3269 tp1->whoTo->flight_size,
3271 (uint32_t)(uintptr_t)tp1->whoTo,
3274 sctp_flight_size_increase(tp1);
3275 sctp_total_flight_increase(stcb, tp1);
3277 * We inflate the cwnd to compensate for our
3278 * artificial inflation of the flight_size.
3280 tp1->whoTo->cwnd += tp1->book_size;
3281 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3282 sctp_log_sack(asoc->last_acked_seq,
3287 SCTP_LOG_TSN_REVOKED);
3289 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3290 /* it has been re-acked in this SACK */
3291 tp1->sent = SCTP_DATAGRAM_ACKED;
3294 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3301 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3302 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3304 struct sctp_tmit_chunk *tp1;
3305 int strike_flag = 0;
3307 int tot_retrans = 0;
3308 uint32_t sending_seq;
3309 struct sctp_nets *net;
3310 int num_dests_sacked = 0;
3313 * select the sending_seq, this is either the next thing ready to be
3314 * sent but not transmitted, OR, the next seq we assign.
3316 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3318 sending_seq = asoc->sending_seq;
3320 sending_seq = tp1->rec.data.tsn;
3323 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3324 if ((asoc->sctp_cmt_on_off > 0) &&
3325 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3326 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3327 if (net->saw_newack)
3331 if (stcb->asoc.prsctp_supported) {
3332 (void)SCTP_GETTIME_TIMEVAL(&now);
3334 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3336 if (tp1->no_fr_allowed) {
3337 /* this one had a timeout or something */
3340 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3341 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3342 sctp_log_fr(biggest_tsn_newly_acked,
3345 SCTP_FR_LOG_CHECK_STRIKE);
3347 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3348 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3352 if (stcb->asoc.prsctp_supported) {
3353 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3354 /* Is it expired? */
3355 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3356 /* Yes so drop it */
3357 if (tp1->data != NULL) {
3358 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3359 SCTP_SO_NOT_LOCKED);
3365 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3366 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3367 /* we are beyond the tsn in the sack */
3370 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3371 /* either a RESEND, ACKED, or MARKED */
3373 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3374 /* Continue strikin FWD-TSN chunks */
3375 tp1->rec.data.fwd_tsn_cnt++;
3380 * CMT : SFR algo (covers part of DAC and HTNA as well)
3382 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3384 * No new acks were receieved for data sent to this
3385 * dest. Therefore, according to the SFR algo for
3386 * CMT, no data sent to this dest can be marked for
3387 * FR using this SACK.
3390 } else if (tp1->whoTo &&
3391 SCTP_TSN_GT(tp1->rec.data.tsn,
3392 tp1->whoTo->this_sack_highest_newack) &&
3393 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3395 * CMT: New acks were receieved for data sent to
3396 * this dest. But no new acks were seen for data
3397 * sent after tp1. Therefore, according to the SFR
3398 * algo for CMT, tp1 cannot be marked for FR using
3399 * this SACK. This step covers part of the DAC algo
3400 * and the HTNA algo as well.
3405 * Here we check to see if we were have already done a FR
3406 * and if so we see if the biggest TSN we saw in the sack is
3407 * smaller than the recovery point. If so we don't strike
3408 * the tsn... otherwise we CAN strike the TSN.
3411 * @@@ JRI: Check for CMT if (accum_moved &&
3412 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3415 if (accum_moved && asoc->fast_retran_loss_recovery) {
3417 * Strike the TSN if in fast-recovery and cum-ack
3420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3421 sctp_log_fr(biggest_tsn_newly_acked,
3424 SCTP_FR_LOG_STRIKE_CHUNK);
3426 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3429 if ((asoc->sctp_cmt_on_off > 0) &&
3430 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3432 * CMT DAC algorithm: If SACK flag is set to
3433 * 0, then lowest_newack test will not pass
3434 * because it would have been set to the
3435 * cumack earlier. If not already to be
3436 * rtx'd, If not a mixed sack and if tp1 is
3437 * not between two sacked TSNs, then mark by
3438 * one more. NOTE that we are marking by one
3439 * additional time since the SACK DAC flag
3440 * indicates that two packets have been
3441 * received after this missing TSN.
3443 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3444 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3445 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3446 sctp_log_fr(16 + num_dests_sacked,
3449 SCTP_FR_LOG_STRIKE_CHUNK);
3454 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3455 (asoc->sctp_cmt_on_off == 0)) {
3457 * For those that have done a FR we must take
3458 * special consideration if we strike. I.e the
3459 * biggest_newly_acked must be higher than the
3460 * sending_seq at the time we did the FR.
3463 #ifdef SCTP_FR_TO_ALTERNATE
3465 * If FR's go to new networks, then we must only do
3466 * this for singly homed asoc's. However if the FR's
3467 * go to the same network (Armando's work) then its
3468 * ok to FR multiple times.
3476 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3477 tp1->rec.data.fast_retran_tsn)) {
3479 * Strike the TSN, since this ack is
3480 * beyond where things were when we
3483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 sctp_log_fr(biggest_tsn_newly_acked,
3487 SCTP_FR_LOG_STRIKE_CHUNK);
3489 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3493 if ((asoc->sctp_cmt_on_off > 0) &&
3494 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3496 * CMT DAC algorithm: If
3497 * SACK flag is set to 0,
3498 * then lowest_newack test
3499 * will not pass because it
3500 * would have been set to
3501 * the cumack earlier. If
3502 * not already to be rtx'd,
3503 * If not a mixed sack and
3504 * if tp1 is not between two
3505 * sacked TSNs, then mark by
3506 * one more. NOTE that we
3507 * are marking by one
3508 * additional time since the
3509 * SACK DAC flag indicates
3510 * that two packets have
3511 * been received after this
3514 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3515 (num_dests_sacked == 1) &&
3516 SCTP_TSN_GT(this_sack_lowest_newack,
3517 tp1->rec.data.tsn)) {
3518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3519 sctp_log_fr(32 + num_dests_sacked,
3522 SCTP_FR_LOG_STRIKE_CHUNK);
3524 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3532 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3535 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3536 biggest_tsn_newly_acked)) {
3538 * We don't strike these: This is the HTNA
3539 * algorithm i.e. we don't strike If our TSN is
3540 * larger than the Highest TSN Newly Acked.
3544 /* Strike the TSN */
3545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3546 sctp_log_fr(biggest_tsn_newly_acked,
3549 SCTP_FR_LOG_STRIKE_CHUNK);
3551 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3554 if ((asoc->sctp_cmt_on_off > 0) &&
3555 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3557 * CMT DAC algorithm: If SACK flag is set to
3558 * 0, then lowest_newack test will not pass
3559 * because it would have been set to the
3560 * cumack earlier. If not already to be
3561 * rtx'd, If not a mixed sack and if tp1 is
3562 * not between two sacked TSNs, then mark by
3563 * one more. NOTE that we are marking by one
3564 * additional time since the SACK DAC flag
3565 * indicates that two packets have been
3566 * received after this missing TSN.
3568 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3569 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3571 sctp_log_fr(48 + num_dests_sacked,
3574 SCTP_FR_LOG_STRIKE_CHUNK);
3580 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3581 struct sctp_nets *alt;
3583 /* fix counts and things */
3584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3585 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3586 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3588 (uint32_t)(uintptr_t)tp1->whoTo,
3592 tp1->whoTo->net_ack++;
3593 sctp_flight_size_decrease(tp1);
3594 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3595 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3600 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3601 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3603 /* add back to the rwnd */
3604 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3606 /* remove from the total flight */
3607 sctp_total_flight_decrease(stcb, tp1);
3609 if ((stcb->asoc.prsctp_supported) &&
3610 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3612 * Has it been retransmitted tv_sec times? -
3613 * we store the retran count there.
3615 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3616 /* Yes, so drop it */
3617 if (tp1->data != NULL) {
3618 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3619 SCTP_SO_NOT_LOCKED);
3621 /* Make sure to flag we had a FR */
3622 if (tp1->whoTo != NULL) {
3623 tp1->whoTo->net_ack++;
3629 * SCTP_PRINTF("OK, we are now ready to FR this
3632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3633 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3637 /* This is a subsequent FR */
3638 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3640 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3641 if (asoc->sctp_cmt_on_off > 0) {
3643 * CMT: Using RTX_SSTHRESH policy for CMT.
3644 * If CMT is being used, then pick dest with
3645 * largest ssthresh for any retransmission.
3647 tp1->no_fr_allowed = 1;
3649 /* sa_ignore NO_NULL_CHK */
3650 if (asoc->sctp_cmt_pf > 0) {
3652 * JRS 5/18/07 - If CMT PF is on,
3653 * use the PF version of
3656 alt = sctp_find_alternate_net(stcb, alt, 2);
3659 * JRS 5/18/07 - If only CMT is on,
3660 * use the CMT version of
3663 /* sa_ignore NO_NULL_CHK */
3664 alt = sctp_find_alternate_net(stcb, alt, 1);
3670 * CUCv2: If a different dest is picked for
3671 * the retransmission, then new
3672 * (rtx-)pseudo_cumack needs to be tracked
3673 * for orig dest. Let CUCv2 track new (rtx-)
3674 * pseudo-cumack always.
3677 tp1->whoTo->find_pseudo_cumack = 1;
3678 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3680 } else { /* CMT is OFF */
3682 #ifdef SCTP_FR_TO_ALTERNATE
3683 /* Can we find an alternate? */
3684 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3687 * default behavior is to NOT retransmit
3688 * FR's to an alternate. Armando Caro's
3689 * paper details why.
3695 tp1->rec.data.doing_fast_retransmit = 1;
3697 /* mark the sending seq for possible subsequent FR's */
3699 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3700 * (uint32_t)tpi->rec.data.tsn);
3702 if (TAILQ_EMPTY(&asoc->send_queue)) {
3704 * If the queue of send is empty then its
3705 * the next sequence number that will be
3706 * assigned so we subtract one from this to
3707 * get the one we last sent.
3709 tp1->rec.data.fast_retran_tsn = sending_seq;
3712 * If there are chunks on the send queue
3713 * (unsent data that has made it from the
3714 * stream queues but not out the door, we
3715 * take the first one (which will have the
3716 * lowest TSN) and subtract one to get the
3719 struct sctp_tmit_chunk *ttt;
3721 ttt = TAILQ_FIRST(&asoc->send_queue);
3722 tp1->rec.data.fast_retran_tsn =
3728 * this guy had a RTO calculation pending on
3731 if ((tp1->whoTo != NULL) &&
3732 (tp1->whoTo->rto_needed == 0)) {
3733 tp1->whoTo->rto_needed = 1;
3737 if (alt != tp1->whoTo) {
3738 /* yes, there is an alternate. */
3739 sctp_free_remote_addr(tp1->whoTo);
3740 /* sa_ignore FREED_MEMORY */
3742 atomic_add_int(&alt->ref_count, 1);
3748 struct sctp_tmit_chunk *
3749 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3750 struct sctp_association *asoc)
3752 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3756 if (asoc->prsctp_supported == 0) {
3759 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3760 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3761 tp1->sent != SCTP_DATAGRAM_RESEND &&
3762 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3763 /* no chance to advance, out of here */
3766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3767 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3768 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3769 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3770 asoc->advanced_peer_ack_point,
3771 tp1->rec.data.tsn, 0, 0);
3774 if (!PR_SCTP_ENABLED(tp1->flags)) {
3776 * We can't fwd-tsn past any that are reliable aka
3777 * retransmitted until the asoc fails.
3782 (void)SCTP_GETTIME_TIMEVAL(&now);
3786 * now we got a chunk which is marked for another
3787 * retransmission to a PR-stream but has run out its chances
3788 * already maybe OR has been marked to skip now. Can we skip
3789 * it if its a resend?
3791 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3792 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3794 * Now is this one marked for resend and its time is
3797 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3798 /* Yes so drop it */
3800 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3801 1, SCTP_SO_NOT_LOCKED);
3805 * No, we are done when hit one for resend
3806 * whos time as not expired.
3812 * Ok now if this chunk is marked to drop it we can clean up
3813 * the chunk, advance our peer ack point and we can check
3816 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3817 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3818 /* advance PeerAckPoint goes forward */
3819 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3820 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3822 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3823 /* No update but we do save the chk */
3828 * If it is still in RESEND we can advance no
3838 sctp_fs_audit(struct sctp_association *asoc)
3840 struct sctp_tmit_chunk *chk;
3841 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3844 int entry_flight, entry_cnt;
3849 entry_flight = asoc->total_flight;
3850 entry_cnt = asoc->total_flight_count;
3852 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3855 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3856 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3857 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3862 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3864 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3866 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3873 if ((inflight > 0) || (inbetween > 0)) {
3875 panic("Flight size-express incorrect? \n");
3877 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3878 entry_flight, entry_cnt);
3880 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3881 inflight, inbetween, resend, above, acked);
3890 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3891 struct sctp_association *asoc,
3892 struct sctp_tmit_chunk *tp1)
3894 tp1->window_probe = 0;
3895 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3896 /* TSN's skipped we do NOT move back. */
3897 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3898 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3900 (uint32_t)(uintptr_t)tp1->whoTo,
3904 /* First setup this by shrinking flight */
3905 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3906 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3909 sctp_flight_size_decrease(tp1);
3910 sctp_total_flight_decrease(stcb, tp1);
3911 /* Now mark for resend */
3912 tp1->sent = SCTP_DATAGRAM_RESEND;
3913 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3916 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3917 tp1->whoTo->flight_size,
3919 (uint32_t)(uintptr_t)tp1->whoTo,
3925 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3926 uint32_t rwnd, int *abort_now, int ecne_seen)
3928 struct sctp_nets *net;
3929 struct sctp_association *asoc;
3930 struct sctp_tmit_chunk *tp1, *tp2;
3932 int win_probe_recovery = 0;
3933 int win_probe_recovered = 0;
3934 int j, done_once = 0;
3938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3939 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3940 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3942 SCTP_TCB_LOCK_ASSERT(stcb);
3943 #ifdef SCTP_ASOCLOG_OF_TSNS
3944 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3945 stcb->asoc.cumack_log_at++;
3946 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3947 stcb->asoc.cumack_log_at = 0;
3951 old_rwnd = asoc->peers_rwnd;
3952 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3955 } else if (asoc->last_acked_seq == cumack) {
3956 /* Window update sack */
3957 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3958 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3959 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3960 /* SWS sender side engages */
3961 asoc->peers_rwnd = 0;
3963 if (asoc->peers_rwnd > old_rwnd) {
3968 /* First setup for CC stuff */
3969 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3970 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3971 /* Drag along the window_tsn for cwr's */
3972 net->cwr_window_tsn = cumack;
3974 net->prev_cwnd = net->cwnd;
3979 * CMT: Reset CUC and Fast recovery algo variables before
3982 net->new_pseudo_cumack = 0;
3983 net->will_exit_fast_recovery = 0;
3984 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3985 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3988 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3989 tp1 = TAILQ_LAST(&asoc->sent_queue,
3990 sctpchunk_listhead);
3991 send_s = tp1->rec.data.tsn + 1;
3993 send_s = asoc->sending_seq;
3995 if (SCTP_TSN_GE(cumack, send_s)) {
3996 struct mbuf *op_err;
3997 char msg[SCTP_DIAG_INFO_LEN];
4001 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4003 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4005 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4008 asoc->this_sack_highest_gap = cumack;
4009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4010 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4011 stcb->asoc.overall_error_count,
4013 SCTP_FROM_SCTP_INDATA,
4016 stcb->asoc.overall_error_count = 0;
4017 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4018 /* process the new consecutive TSN first */
4019 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4020 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4021 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4022 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4024 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4026 * If it is less than ACKED, it is
4027 * now no-longer in flight. Higher
4028 * values may occur during marking
4030 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4032 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4033 tp1->whoTo->flight_size,
4035 (uint32_t)(uintptr_t)tp1->whoTo,
4038 sctp_flight_size_decrease(tp1);
4039 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4040 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4043 /* sa_ignore NO_NULL_CHK */
4044 sctp_total_flight_decrease(stcb, tp1);
4046 tp1->whoTo->net_ack += tp1->send_size;
4047 if (tp1->snd_count < 2) {
4049 * True non-retransmited
4052 tp1->whoTo->net_ack2 +=
4055 /* update RTO too? */
4063 sctp_calculate_rto(stcb,
4065 &tp1->sent_rcv_time,
4066 SCTP_RTT_FROM_DATA);
4069 if (tp1->whoTo->rto_needed == 0) {
4070 tp1->whoTo->rto_needed = 1;
4076 * CMT: CUCv2 algorithm. From the
4077 * cumack'd TSNs, for each TSN being
4078 * acked for the first time, set the
4079 * following variables for the
4080 * corresp destination.
4081 * new_pseudo_cumack will trigger a
4083 * find_(rtx_)pseudo_cumack will
4084 * trigger search for the next
4085 * expected (rtx-)pseudo-cumack.
4087 tp1->whoTo->new_pseudo_cumack = 1;
4088 tp1->whoTo->find_pseudo_cumack = 1;
4089 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4092 /* sa_ignore NO_NULL_CHK */
4093 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4096 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4097 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4099 if (tp1->rec.data.chunk_was_revoked) {
4100 /* deflate the cwnd */
4101 tp1->whoTo->cwnd -= tp1->book_size;
4102 tp1->rec.data.chunk_was_revoked = 0;
4104 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4105 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4106 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4109 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4113 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4114 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4115 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4116 asoc->trigger_reset = 1;
4118 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4120 /* sa_ignore NO_NULL_CHK */
4121 sctp_free_bufspace(stcb, asoc, tp1, 1);
4122 sctp_m_freem(tp1->data);
4125 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4126 sctp_log_sack(asoc->last_acked_seq,
4131 SCTP_LOG_FREE_SENT);
4133 asoc->sent_queue_cnt--;
4134 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4141 /* sa_ignore NO_NULL_CHK */
4142 if (stcb->sctp_socket) {
4143 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4147 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4149 /* sa_ignore NO_NULL_CHK */
4150 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4152 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4153 so = SCTP_INP_SO(stcb->sctp_ep);
4154 atomic_add_int(&stcb->asoc.refcnt, 1);
4155 SCTP_TCB_UNLOCK(stcb);
4156 SCTP_SOCKET_LOCK(so, 1);
4157 SCTP_TCB_LOCK(stcb);
4158 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4159 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4160 /* assoc was freed while we were unlocked */
4161 SCTP_SOCKET_UNLOCK(so, 1);
4165 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4166 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4167 SCTP_SOCKET_UNLOCK(so, 1);
4170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4171 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4175 /* JRS - Use the congestion control given in the CC module */
4176 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4177 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4178 if (net->net_ack2 > 0) {
4180 * Karn's rule applies to clearing error
4181 * count, this is optional.
4183 net->error_count = 0;
4184 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4185 /* addr came good */
4186 net->dest_state |= SCTP_ADDR_REACHABLE;
4187 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4188 0, (void *)net, SCTP_SO_NOT_LOCKED);
4190 if (net == stcb->asoc.primary_destination) {
4191 if (stcb->asoc.alternate) {
4193 * release the alternate,
4196 sctp_free_remote_addr(stcb->asoc.alternate);
4197 stcb->asoc.alternate = NULL;
4200 if (net->dest_state & SCTP_ADDR_PF) {
4201 net->dest_state &= ~SCTP_ADDR_PF;
4202 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4203 stcb->sctp_ep, stcb, net,
4204 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4205 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4206 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4207 /* Done with this net */
4210 /* restore any doubled timers */
4211 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4212 if (net->RTO < stcb->asoc.minrto) {
4213 net->RTO = stcb->asoc.minrto;
4215 if (net->RTO > stcb->asoc.maxrto) {
4216 net->RTO = stcb->asoc.maxrto;
4220 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4222 asoc->last_acked_seq = cumack;
4224 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4225 /* nothing left in-flight */
4226 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4227 net->flight_size = 0;
4228 net->partial_bytes_acked = 0;
4230 asoc->total_flight = 0;
4231 asoc->total_flight_count = 0;
4234 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4235 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4236 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4237 /* SWS sender side engages */
4238 asoc->peers_rwnd = 0;
4240 if (asoc->peers_rwnd > old_rwnd) {
4241 win_probe_recovery = 1;
4243 /* Now assure a timer where data is queued at */
4246 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4247 if (win_probe_recovery && (net->window_probe)) {
4248 win_probe_recovered = 1;
4250 * Find first chunk that was used with window probe
4251 * and clear the sent
4253 /* sa_ignore FREED_MEMORY */
4254 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4255 if (tp1->window_probe) {
4256 /* move back to data send queue */
4257 sctp_window_probe_recovery(stcb, asoc, tp1);
4262 if (net->flight_size) {
4264 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4265 if (net->window_probe) {
4266 net->window_probe = 0;
4269 if (net->window_probe) {
4271 * In window probes we must assure a timer
4272 * is still running there
4274 net->window_probe = 0;
4275 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4276 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4278 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4279 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4281 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4286 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4287 (asoc->sent_queue_retran_cnt == 0) &&
4288 (win_probe_recovered == 0) &&
4291 * huh, this should not happen unless all packets are
4292 * PR-SCTP and marked to skip of course.
4294 if (sctp_fs_audit(asoc)) {
4295 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4296 net->flight_size = 0;
4298 asoc->total_flight = 0;
4299 asoc->total_flight_count = 0;
4300 asoc->sent_queue_retran_cnt = 0;
4301 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4302 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4303 sctp_flight_size_increase(tp1);
4304 sctp_total_flight_increase(stcb, tp1);
4305 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4306 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4313 /**********************************/
4314 /* Now what about shutdown issues */
4315 /**********************************/
4316 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4317 /* nothing left on sendqueue.. consider done */
4319 if ((asoc->stream_queue_cnt == 1) &&
4320 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4321 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4322 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4323 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4325 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4326 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4327 (asoc->stream_queue_cnt == 1) &&
4328 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4329 struct mbuf *op_err;
4333 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4334 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4335 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4338 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4339 (asoc->stream_queue_cnt == 0)) {
4340 struct sctp_nets *netp;
4342 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4343 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4344 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4346 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4347 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4348 sctp_stop_timers_for_shutdown(stcb);
4349 if (asoc->alternate) {
4350 netp = asoc->alternate;
4352 netp = asoc->primary_destination;
4354 sctp_send_shutdown(stcb, netp);
4355 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4356 stcb->sctp_ep, stcb, netp);
4357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4358 stcb->sctp_ep, stcb, netp);
4359 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4360 (asoc->stream_queue_cnt == 0)) {
4361 struct sctp_nets *netp;
4363 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4364 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4365 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4366 sctp_stop_timers_for_shutdown(stcb);
4367 if (asoc->alternate) {
4368 netp = asoc->alternate;
4370 netp = asoc->primary_destination;
4372 sctp_send_shutdown_ack(stcb, netp);
4373 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4374 stcb->sctp_ep, stcb, netp);
4377 /*********************************************/
4378 /* Here we perform PR-SCTP procedures */
4380 /*********************************************/
4381 /* C1. update advancedPeerAckPoint */
4382 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4383 asoc->advanced_peer_ack_point = cumack;
4385 /* PR-Sctp issues need to be addressed too */
4386 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4387 struct sctp_tmit_chunk *lchk;
4388 uint32_t old_adv_peer_ack_point;
4390 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4391 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4392 /* C3. See if we need to send a Fwd-TSN */
4393 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4395 * ISSUE with ECN, see FWD-TSN processing.
4397 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4398 send_forward_tsn(stcb, asoc);
4400 /* try to FR fwd-tsn's that get lost too */
4401 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4402 send_forward_tsn(stcb, asoc);
4407 /* Assure a timer is up */
4408 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4409 stcb->sctp_ep, stcb, lchk->whoTo);
4412 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4413 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4415 stcb->asoc.peers_rwnd,
4416 stcb->asoc.total_flight,
4417 stcb->asoc.total_output_queue_size);
4422 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4423 struct sctp_tcb *stcb,
4424 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4425 int *abort_now, uint8_t flags,
4426 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4428 struct sctp_association *asoc;
4429 struct sctp_tmit_chunk *tp1, *tp2;
4430 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4431 uint16_t wake_him = 0;
4432 uint32_t send_s = 0;
4434 int accum_moved = 0;
4435 int will_exit_fast_recovery = 0;
4436 uint32_t a_rwnd, old_rwnd;
4437 int win_probe_recovery = 0;
4438 int win_probe_recovered = 0;
4439 struct sctp_nets *net = NULL;
4442 uint8_t reneged_all = 0;
4443 uint8_t cmt_dac_flag;
4446 * we take any chance we can to service our queues since we cannot
4447 * get awoken when the socket is read from :<
4450 * Now perform the actual SACK handling: 1) Verify that it is not an
4451 * old sack, if so discard. 2) If there is nothing left in the send
4452 * queue (cum-ack is equal to last acked) then you have a duplicate
4453 * too, update any rwnd change and verify no timers are running.
4454 * then return. 3) Process any new consequtive data i.e. cum-ack
4455 * moved process these first and note that it moved. 4) Process any
4456 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4457 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4458 * sync up flightsizes and things, stop all timers and also check
4459 * for shutdown_pending state. If so then go ahead and send off the
4460 * shutdown. If in shutdown recv, send off the shutdown-ack and
4461 * start that timer, Ret. 9) Strike any non-acked things and do FR
4462 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4463 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4464 * if in shutdown_recv state.
4466 SCTP_TCB_LOCK_ASSERT(stcb);
4468 this_sack_lowest_newack = 0;
4469 SCTP_STAT_INCR(sctps_slowpath_sack);
4471 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4472 #ifdef SCTP_ASOCLOG_OF_TSNS
4473 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4474 stcb->asoc.cumack_log_at++;
4475 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4476 stcb->asoc.cumack_log_at = 0;
4481 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4482 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4483 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4485 old_rwnd = stcb->asoc.peers_rwnd;
4486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4487 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4488 stcb->asoc.overall_error_count,
4490 SCTP_FROM_SCTP_INDATA,
4493 stcb->asoc.overall_error_count = 0;
4495 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4496 sctp_log_sack(asoc->last_acked_seq,
4503 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4505 uint32_t *dupdata, dblock;
4507 for (i = 0; i < num_dup; i++) {
4508 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4509 sizeof(uint32_t), (uint8_t *)&dblock);
4510 if (dupdata == NULL) {
4513 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4517 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4518 tp1 = TAILQ_LAST(&asoc->sent_queue,
4519 sctpchunk_listhead);
4520 send_s = tp1->rec.data.tsn + 1;
4523 send_s = asoc->sending_seq;
4525 if (SCTP_TSN_GE(cum_ack, send_s)) {
4526 struct mbuf *op_err;
4527 char msg[SCTP_DIAG_INFO_LEN];
4530 * no way, we have not even sent this TSN out yet. Peer is
4531 * hopelessly messed up with us.
4533 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4536 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4537 tp1->rec.data.tsn, (void *)tp1);
4542 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4544 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4545 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4546 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4549 /**********************/
4550 /* 1) check the range */
4551 /**********************/
4552 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4553 /* acking something behind */
4556 /* update the Rwnd of the peer */
4557 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4558 TAILQ_EMPTY(&asoc->send_queue) &&
4559 (asoc->stream_queue_cnt == 0)) {
4560 /* nothing left on send/sent and strmq */
4561 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4562 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4563 asoc->peers_rwnd, 0, 0, a_rwnd);
4565 asoc->peers_rwnd = a_rwnd;
4566 if (asoc->sent_queue_retran_cnt) {
4567 asoc->sent_queue_retran_cnt = 0;
4569 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4570 /* SWS sender side engages */
4571 asoc->peers_rwnd = 0;
4573 /* stop any timers */
4574 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4575 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4576 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4577 net->partial_bytes_acked = 0;
4578 net->flight_size = 0;
4580 asoc->total_flight = 0;
4581 asoc->total_flight_count = 0;
4585 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4586 * things. The total byte count acked is tracked in netAckSz AND
4587 * netAck2 is used to track the total bytes acked that are un-
4588 * amibguious and were never retransmitted. We track these on a per
4589 * destination address basis.
4591 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4592 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4593 /* Drag along the window_tsn for cwr's */
4594 net->cwr_window_tsn = cum_ack;
4596 net->prev_cwnd = net->cwnd;
4601 * CMT: Reset CUC and Fast recovery algo variables before
4604 net->new_pseudo_cumack = 0;
4605 net->will_exit_fast_recovery = 0;
4606 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4607 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4610 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4611 * to be greater than the cumack. Also reset saw_newack to 0
4614 net->saw_newack = 0;
4615 net->this_sack_highest_newack = last_tsn;
4617 /* process the new consecutive TSN first */
4618 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4619 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4620 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4622 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4624 * If it is less than ACKED, it is
4625 * now no-longer in flight. Higher
4626 * values may occur during marking
4628 if ((tp1->whoTo->dest_state &
4629 SCTP_ADDR_UNCONFIRMED) &&
4630 (tp1->snd_count < 2)) {
4632 * If there was no retran
4633 * and the address is
4634 * un-confirmed and we sent
4636 * sacked.. its confirmed,
4639 tp1->whoTo->dest_state &=
4640 ~SCTP_ADDR_UNCONFIRMED;
4642 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4644 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4645 tp1->whoTo->flight_size,
4647 (uint32_t)(uintptr_t)tp1->whoTo,
4650 sctp_flight_size_decrease(tp1);
4651 sctp_total_flight_decrease(stcb, tp1);
4652 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4653 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4657 tp1->whoTo->net_ack += tp1->send_size;
4659 /* CMT SFR and DAC algos */
4660 this_sack_lowest_newack = tp1->rec.data.tsn;
4661 tp1->whoTo->saw_newack = 1;
4663 if (tp1->snd_count < 2) {
4665 * True non-retransmited
4668 tp1->whoTo->net_ack2 +=
4671 /* update RTO too? */
4675 sctp_calculate_rto(stcb,
4677 &tp1->sent_rcv_time,
4678 SCTP_RTT_FROM_DATA);
4681 if (tp1->whoTo->rto_needed == 0) {
4682 tp1->whoTo->rto_needed = 1;
4688 * CMT: CUCv2 algorithm. From the
4689 * cumack'd TSNs, for each TSN being
4690 * acked for the first time, set the
4691 * following variables for the
4692 * corresp destination.
4693 * new_pseudo_cumack will trigger a
4695 * find_(rtx_)pseudo_cumack will
4696 * trigger search for the next
4697 * expected (rtx-)pseudo-cumack.
4699 tp1->whoTo->new_pseudo_cumack = 1;
4700 tp1->whoTo->find_pseudo_cumack = 1;
4701 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4705 sctp_log_sack(asoc->last_acked_seq,
4710 SCTP_LOG_TSN_ACKED);
4712 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4713 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4716 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4717 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4718 #ifdef SCTP_AUDITING_ENABLED
4719 sctp_audit_log(0xB3,
4720 (asoc->sent_queue_retran_cnt & 0x000000ff));
4723 if (tp1->rec.data.chunk_was_revoked) {
4724 /* deflate the cwnd */
4725 tp1->whoTo->cwnd -= tp1->book_size;
4726 tp1->rec.data.chunk_was_revoked = 0;
4728 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4729 tp1->sent = SCTP_DATAGRAM_ACKED;
4736 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4737 /* always set this up to cum-ack */
4738 asoc->this_sack_highest_gap = last_tsn;
4740 if ((num_seg > 0) || (num_nr_seg > 0)) {
4743 * thisSackHighestGap will increase while handling NEW
4744 * segments this_sack_highest_newack will increase while
4745 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4746 * used for CMT DAC algo. saw_newack will also change.
4748 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4749 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4750 num_seg, num_nr_seg, &rto_ok)) {
4754 * validate the biggest_tsn_acked in the gap acks if strict
4755 * adherence is wanted.
4757 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4759 * peer is either confused or we are under attack.
4762 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4763 biggest_tsn_acked, send_s);
4767 /*******************************************/
4768 /* cancel ALL T3-send timer if accum moved */
4769 /*******************************************/
4770 if (asoc->sctp_cmt_on_off > 0) {
4771 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4772 if (net->new_pseudo_cumack)
4773 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4775 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4781 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4782 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4786 /********************************************/
4787 /* drop the acked chunks from the sentqueue */
4788 /********************************************/
4789 asoc->last_acked_seq = cum_ack;
4791 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4792 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4795 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4796 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4797 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4800 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4804 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4805 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4806 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4807 asoc->trigger_reset = 1;
4809 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4810 if (PR_SCTP_ENABLED(tp1->flags)) {
4811 if (asoc->pr_sctp_cnt != 0)
4812 asoc->pr_sctp_cnt--;
4814 asoc->sent_queue_cnt--;
4816 /* sa_ignore NO_NULL_CHK */
4817 sctp_free_bufspace(stcb, asoc, tp1, 1);
4818 sctp_m_freem(tp1->data);
4820 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4821 asoc->sent_queue_cnt_removeable--;
4824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4825 sctp_log_sack(asoc->last_acked_seq,
4830 SCTP_LOG_FREE_SENT);
4832 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4835 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4837 panic("Warning flight size is positive and should be 0");
4839 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4840 asoc->total_flight);
4842 asoc->total_flight = 0;
4844 /* sa_ignore NO_NULL_CHK */
4845 if ((wake_him) && (stcb->sctp_socket)) {
4846 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4850 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4852 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4854 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4855 so = SCTP_INP_SO(stcb->sctp_ep);
4856 atomic_add_int(&stcb->asoc.refcnt, 1);
4857 SCTP_TCB_UNLOCK(stcb);
4858 SCTP_SOCKET_LOCK(so, 1);
4859 SCTP_TCB_LOCK(stcb);
4860 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4861 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4862 /* assoc was freed while we were unlocked */
4863 SCTP_SOCKET_UNLOCK(so, 1);
4867 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4868 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4869 SCTP_SOCKET_UNLOCK(so, 1);
4872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4873 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4877 if (asoc->fast_retran_loss_recovery && accum_moved) {
4878 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4879 /* Setup so we will exit RFC2582 fast recovery */
4880 will_exit_fast_recovery = 1;
4884 * Check for revoked fragments:
4886 * if Previous sack - Had no frags then we can't have any revoked if
4887 * Previous sack - Had frag's then - If we now have frags aka
4888 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4889 * some of them. else - The peer revoked all ACKED fragments, since
4890 * we had some before and now we have NONE.
4894 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4895 asoc->saw_sack_with_frags = 1;
4896 } else if (asoc->saw_sack_with_frags) {
4897 int cnt_revoked = 0;
4899 /* Peer revoked all dg's marked or acked */
4900 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4901 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4902 tp1->sent = SCTP_DATAGRAM_SENT;
4903 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4904 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4905 tp1->whoTo->flight_size,
4907 (uint32_t)(uintptr_t)tp1->whoTo,
4910 sctp_flight_size_increase(tp1);
4911 sctp_total_flight_increase(stcb, tp1);
4912 tp1->rec.data.chunk_was_revoked = 1;
4914 * To ensure that this increase in
4915 * flightsize, which is artificial, does not
4916 * throttle the sender, we also increase the
4917 * cwnd artificially.
4919 tp1->whoTo->cwnd += tp1->book_size;
4926 asoc->saw_sack_with_frags = 0;
4929 asoc->saw_sack_with_nr_frags = 1;
4931 asoc->saw_sack_with_nr_frags = 0;
4933 /* JRS - Use the congestion control given in the CC module */
4934 if (ecne_seen == 0) {
4935 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4936 if (net->net_ack2 > 0) {
4938 * Karn's rule applies to clearing error
4939 * count, this is optional.
4941 net->error_count = 0;
4942 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4943 /* addr came good */
4944 net->dest_state |= SCTP_ADDR_REACHABLE;
4945 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4946 0, (void *)net, SCTP_SO_NOT_LOCKED);
4948 if (net == stcb->asoc.primary_destination) {
4949 if (stcb->asoc.alternate) {
4951 * release the alternate,
4954 sctp_free_remote_addr(stcb->asoc.alternate);
4955 stcb->asoc.alternate = NULL;
4958 if (net->dest_state & SCTP_ADDR_PF) {
4959 net->dest_state &= ~SCTP_ADDR_PF;
4960 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4961 stcb->sctp_ep, stcb, net,
4962 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4963 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4964 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4965 /* Done with this net */
4968 /* restore any doubled timers */
4969 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4970 if (net->RTO < stcb->asoc.minrto) {
4971 net->RTO = stcb->asoc.minrto;
4973 if (net->RTO > stcb->asoc.maxrto) {
4974 net->RTO = stcb->asoc.maxrto;
4978 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4980 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4981 /* nothing left in-flight */
4982 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4983 /* stop all timers */
4984 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4986 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4987 net->flight_size = 0;
4988 net->partial_bytes_acked = 0;
4990 asoc->total_flight = 0;
4991 asoc->total_flight_count = 0;
4993 /**********************************/
4994 /* Now what about shutdown issues */
4995 /**********************************/
4996 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4997 /* nothing left on sendqueue.. consider done */
4998 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4999 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5000 asoc->peers_rwnd, 0, 0, a_rwnd);
5002 asoc->peers_rwnd = a_rwnd;
5003 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5004 /* SWS sender side engages */
5005 asoc->peers_rwnd = 0;
5008 if ((asoc->stream_queue_cnt == 1) &&
5009 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5010 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5011 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5012 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5014 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5015 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5016 (asoc->stream_queue_cnt == 1) &&
5017 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5018 struct mbuf *op_err;
5022 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5023 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5024 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5027 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5028 (asoc->stream_queue_cnt == 0)) {
5029 struct sctp_nets *netp;
5031 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5032 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5033 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5035 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5036 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5037 sctp_stop_timers_for_shutdown(stcb);
5038 if (asoc->alternate) {
5039 netp = asoc->alternate;
5041 netp = asoc->primary_destination;
5043 sctp_send_shutdown(stcb, netp);
5044 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5045 stcb->sctp_ep, stcb, netp);
5046 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5047 stcb->sctp_ep, stcb, netp);
5049 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5050 (asoc->stream_queue_cnt == 0)) {
5051 struct sctp_nets *netp;
5053 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5054 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5055 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5056 sctp_stop_timers_for_shutdown(stcb);
5057 if (asoc->alternate) {
5058 netp = asoc->alternate;
5060 netp = asoc->primary_destination;
5062 sctp_send_shutdown_ack(stcb, netp);
5063 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5064 stcb->sctp_ep, stcb, netp);
5069 * Now here we are going to recycle net_ack for a different use...
5072 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5077 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5078 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5079 * automatically ensure that.
5081 if ((asoc->sctp_cmt_on_off > 0) &&
5082 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5083 (cmt_dac_flag == 0)) {
5084 this_sack_lowest_newack = cum_ack;
5086 if ((num_seg > 0) || (num_nr_seg > 0)) {
5087 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5088 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5090 /* JRS - Use the congestion control given in the CC module */
5091 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5093 /* Now are we exiting loss recovery ? */
5094 if (will_exit_fast_recovery) {
5095 /* Ok, we must exit fast recovery */
5096 asoc->fast_retran_loss_recovery = 0;
5098 if ((asoc->sat_t3_loss_recovery) &&
5099 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5100 /* end satellite t3 loss recovery */
5101 asoc->sat_t3_loss_recovery = 0;
5106 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5107 if (net->will_exit_fast_recovery) {
5108 /* Ok, we must exit fast recovery */
5109 net->fast_retran_loss_recovery = 0;
5113 /* Adjust and set the new rwnd value */
5114 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5115 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5116 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5118 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5119 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5120 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5121 /* SWS sender side engages */
5122 asoc->peers_rwnd = 0;
5124 if (asoc->peers_rwnd > old_rwnd) {
5125 win_probe_recovery = 1;
5128 * Now we must setup so we have a timer up for anyone with
5134 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5135 if (win_probe_recovery && (net->window_probe)) {
5136 win_probe_recovered = 1;
5138 * Find first chunk that was used with
5139 * window probe and clear the event. Put
5140 * it back into the send queue as if has
5143 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5144 if (tp1->window_probe) {
5145 sctp_window_probe_recovery(stcb, asoc, tp1);
5150 if (net->flight_size) {
5152 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5153 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5154 stcb->sctp_ep, stcb, net);
5156 if (net->window_probe) {
5157 net->window_probe = 0;
5160 if (net->window_probe) {
5162 * In window probes we must assure a timer
5163 * is still running there
5165 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5166 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5167 stcb->sctp_ep, stcb, net);
5170 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5171 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5173 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5178 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5179 (asoc->sent_queue_retran_cnt == 0) &&
5180 (win_probe_recovered == 0) &&
5183 * huh, this should not happen unless all packets are
5184 * PR-SCTP and marked to skip of course.
5186 if (sctp_fs_audit(asoc)) {
5187 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5188 net->flight_size = 0;
5190 asoc->total_flight = 0;
5191 asoc->total_flight_count = 0;
5192 asoc->sent_queue_retran_cnt = 0;
5193 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5194 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5195 sctp_flight_size_increase(tp1);
5196 sctp_total_flight_increase(stcb, tp1);
5197 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5198 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5205 /*********************************************/
5206 /* Here we perform PR-SCTP procedures */
5208 /*********************************************/
5209 /* C1. update advancedPeerAckPoint */
5210 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5211 asoc->advanced_peer_ack_point = cum_ack;
5213 /* C2. try to further move advancedPeerAckPoint ahead */
5214 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5215 struct sctp_tmit_chunk *lchk;
5216 uint32_t old_adv_peer_ack_point;
5218 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5219 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5220 /* C3. See if we need to send a Fwd-TSN */
5221 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5223 * ISSUE with ECN, see FWD-TSN processing.
5225 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5226 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5227 0xee, cum_ack, asoc->advanced_peer_ack_point,
5228 old_adv_peer_ack_point);
5230 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5231 send_forward_tsn(stcb, asoc);
5233 /* try to FR fwd-tsn's that get lost too */
5234 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5235 send_forward_tsn(stcb, asoc);
5240 /* Assure a timer is up */
5241 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5242 stcb->sctp_ep, stcb, lchk->whoTo);
5245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5246 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5248 stcb->asoc.peers_rwnd,
5249 stcb->asoc.total_flight,
5250 stcb->asoc.total_output_queue_size);
5255 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5258 uint32_t cum_ack, a_rwnd;
5260 cum_ack = ntohl(cp->cumulative_tsn_ack);
5261 /* Arrange so a_rwnd does NOT change */
5262 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5264 /* Now call the express sack handling */
5265 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5269 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5270 struct sctp_stream_in *strmin)
5272 struct sctp_queued_to_read *control, *ncontrol;
5273 struct sctp_association *asoc;
5275 int need_reasm_check = 0;
5278 mid = strmin->last_mid_delivered;
5280 * First deliver anything prior to and including the stream no that
5283 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5284 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5285 /* this is deliverable now */
5286 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5287 if (control->on_strm_q) {
5288 if (control->on_strm_q == SCTP_ON_ORDERED) {
5289 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5290 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5291 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5294 panic("strmin: %p ctl: %p unknown %d",
5295 strmin, control, control->on_strm_q);
5298 control->on_strm_q = 0;
5300 /* subtract pending on streams */
5301 if (asoc->size_on_all_streams >= control->length) {
5302 asoc->size_on_all_streams -= control->length;
5305 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5307 asoc->size_on_all_streams = 0;
5310 sctp_ucount_decr(asoc->cnt_on_all_streams);
5311 /* deliver it to at least the delivery-q */
5312 if (stcb->sctp_socket) {
5313 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5314 sctp_add_to_readq(stcb->sctp_ep, stcb,
5316 &stcb->sctp_socket->so_rcv,
5317 1, SCTP_READ_LOCK_HELD,
5318 SCTP_SO_NOT_LOCKED);
5321 /* Its a fragmented message */
5322 if (control->first_frag_seen) {
5324 * Make it so this is next to
5325 * deliver, we restore later
5327 strmin->last_mid_delivered = control->mid - 1;
5328 need_reasm_check = 1;
5333 /* no more delivery now. */
5337 if (need_reasm_check) {
5340 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5341 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5342 /* Restore the next to deliver unless we are ahead */
5343 strmin->last_mid_delivered = mid;
5346 /* Left the front Partial one on */
5349 need_reasm_check = 0;
5352 * now we must deliver things in queue the normal way if any are
5355 mid = strmin->last_mid_delivered + 1;
5356 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5357 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5358 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5359 /* this is deliverable now */
5360 if (control->on_strm_q) {
5361 if (control->on_strm_q == SCTP_ON_ORDERED) {
5362 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5363 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5364 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5367 panic("strmin: %p ctl: %p unknown %d",
5368 strmin, control, control->on_strm_q);
5371 control->on_strm_q = 0;
5373 /* subtract pending on streams */
5374 if (asoc->size_on_all_streams >= control->length) {
5375 asoc->size_on_all_streams -= control->length;
5378 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5380 asoc->size_on_all_streams = 0;
5383 sctp_ucount_decr(asoc->cnt_on_all_streams);
5384 /* deliver it to at least the delivery-q */
5385 strmin->last_mid_delivered = control->mid;
5386 if (stcb->sctp_socket) {
5387 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5388 sctp_add_to_readq(stcb->sctp_ep, stcb,
5390 &stcb->sctp_socket->so_rcv, 1,
5391 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5394 mid = strmin->last_mid_delivered + 1;
5396 /* Its a fragmented message */
5397 if (control->first_frag_seen) {
5399 * Make it so this is next to
5402 strmin->last_mid_delivered = control->mid - 1;
5403 need_reasm_check = 1;
5411 if (need_reasm_check) {
5412 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5419 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5420 struct sctp_association *asoc,
5421 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5423 struct sctp_queued_to_read *control;
5424 struct sctp_stream_in *strm;
5425 struct sctp_tmit_chunk *chk, *nchk;
5426 int cnt_removed = 0;
5429 * For now large messages held on the stream reasm that are complete
5430 * will be tossed too. We could in theory do more work to spin
5431 * through and stop after dumping one msg aka seeing the start of a
5432 * new msg at the head, and call the delivery function... to see if
5433 * it can be delivered... But for now we just dump everything on the
5436 strm = &asoc->strmin[stream];
5437 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5438 if (control == NULL) {
5442 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5445 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5446 /* Purge hanging chunks */
5447 if (!asoc->idata_supported && (ordered == 0)) {
5448 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5453 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5454 if (asoc->size_on_reasm_queue >= chk->send_size) {
5455 asoc->size_on_reasm_queue -= chk->send_size;
5458 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5460 asoc->size_on_reasm_queue = 0;
5463 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5465 sctp_m_freem(chk->data);
5468 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5470 if (!TAILQ_EMPTY(&control->reasm)) {
5471 /* This has to be old data, unordered */
5472 if (control->data) {
5473 sctp_m_freem(control->data);
5474 control->data = NULL;
5476 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5477 chk = TAILQ_FIRST(&control->reasm);
5478 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5479 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5480 sctp_add_chk_to_control(control, strm, stcb, asoc,
5481 chk, SCTP_READ_LOCK_HELD);
5483 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5486 if (control->on_strm_q == SCTP_ON_ORDERED) {
5487 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5488 if (asoc->size_on_all_streams >= control->length) {
5489 asoc->size_on_all_streams -= control->length;
5492 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5494 asoc->size_on_all_streams = 0;
5497 sctp_ucount_decr(asoc->cnt_on_all_streams);
5498 control->on_strm_q = 0;
5499 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5500 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5501 control->on_strm_q = 0;
5503 } else if (control->on_strm_q) {
5504 panic("strm: %p ctl: %p unknown %d",
5505 strm, control, control->on_strm_q);
5508 control->on_strm_q = 0;
5509 if (control->on_read_q == 0) {
5510 sctp_free_remote_addr(control->whoFrom);
5511 if (control->data) {
5512 sctp_m_freem(control->data);
5513 control->data = NULL;
5515 sctp_free_a_readq(stcb, control);
5520 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5521 struct sctp_forward_tsn_chunk *fwd,
5522 int *abort_flag, struct mbuf *m, int offset)
5524 /* The pr-sctp fwd tsn */
5526 * here we will perform all the data receiver side steps for
5527 * processing FwdTSN, as required in by pr-sctp draft:
5529 * Assume we get FwdTSN(x):
5531 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5532 * + others we have 3) examine and update re-ordering queue on
5533 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5534 * report where we are.
5536 struct sctp_association *asoc;
5537 uint32_t new_cum_tsn, gap;
5538 unsigned int i, fwd_sz, m_size;
5540 struct sctp_stream_in *strm;
5541 struct sctp_queued_to_read *control, *sv;
5544 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5545 SCTPDBG(SCTP_DEBUG_INDATA1,
5546 "Bad size too small/big fwd-tsn\n");
5549 m_size = (stcb->asoc.mapping_array_size << 3);
5550 /*************************************************************/
5551 /* 1. Here we update local cumTSN and shift the bitmap array */
5552 /*************************************************************/
5553 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5555 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5556 /* Already got there ... */
5560 * now we know the new TSN is more advanced, let's find the actual
5563 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5564 asoc->cumulative_tsn = new_cum_tsn;
5565 if (gap >= m_size) {
5566 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5567 struct mbuf *op_err;
5568 char msg[SCTP_DIAG_INFO_LEN];
5571 * out of range (of single byte chunks in the rwnd I
5572 * give out). This must be an attacker.
5575 snprintf(msg, sizeof(msg),
5576 "New cum ack %8.8x too high, highest TSN %8.8x",
5577 new_cum_tsn, asoc->highest_tsn_inside_map);
5578 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5579 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5580 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5583 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5585 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5586 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5587 asoc->highest_tsn_inside_map = new_cum_tsn;
5589 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5590 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5592 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5593 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5596 SCTP_TCB_LOCK_ASSERT(stcb);
5597 for (i = 0; i <= gap; i++) {
5598 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5599 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5600 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5601 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5602 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5607 /*************************************************************/
5608 /* 2. Clear up re-assembly queue */
5609 /*************************************************************/
5611 /* This is now done as part of clearing up the stream/seq */
5612 if (asoc->idata_supported == 0) {
5615 /* Flush all the un-ordered data based on cum-tsn */
5616 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5617 for (sid = 0; sid < asoc->streamincnt; sid++) {
5618 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5620 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5622 /*******************************************************/
5623 /* 3. Update the PR-stream re-ordering queues and fix */
5624 /* delivery issues as needed. */
5625 /*******************************************************/
5626 fwd_sz -= sizeof(*fwd);
5629 unsigned int num_str;
5630 uint32_t mid, cur_mid;
5632 uint16_t ordered, flags;
5633 struct sctp_strseq *stseq, strseqbuf;
5634 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5636 offset += sizeof(*fwd);
5638 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5639 if (asoc->idata_supported) {
5640 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5642 num_str = fwd_sz / sizeof(struct sctp_strseq);
5644 for (i = 0; i < num_str; i++) {
5645 if (asoc->idata_supported) {
5646 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5647 sizeof(struct sctp_strseq_mid),
5648 (uint8_t *)&strseqbuf_m);
5649 offset += sizeof(struct sctp_strseq_mid);
5650 if (stseq_m == NULL) {
5653 sid = ntohs(stseq_m->sid);
5654 mid = ntohl(stseq_m->mid);
5655 flags = ntohs(stseq_m->flags);
5656 if (flags & PR_SCTP_UNORDERED_FLAG) {
5662 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5663 sizeof(struct sctp_strseq),
5664 (uint8_t *)&strseqbuf);
5665 offset += sizeof(struct sctp_strseq);
5666 if (stseq == NULL) {
5669 sid = ntohs(stseq->sid);
5670 mid = (uint32_t)ntohs(stseq->ssn);
5678 * Ok we now look for the stream/seq on the read
5679 * queue where its not all delivered. If we find it
5680 * we transmute the read entry into a PDI_ABORTED.
5682 if (sid >= asoc->streamincnt) {
5683 /* screwed up streams, stop! */
5686 if ((asoc->str_of_pdapi == sid) &&
5687 (asoc->ssn_of_pdapi == mid)) {
5689 * If this is the one we were partially
5690 * delivering now then we no longer are.
5691 * Note this will change with the reassembly
5694 asoc->fragmented_delivery_inprogress = 0;
5696 strm = &asoc->strmin[sid];
5697 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5698 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5700 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5701 if ((control->sinfo_stream == sid) &&
5702 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5703 str_seq = (sid << 16) | (0x0000ffff & mid);
5704 control->pdapi_aborted = 1;
5705 sv = stcb->asoc.control_pdapi;
5706 control->end_added = 1;
5707 if (control->on_strm_q == SCTP_ON_ORDERED) {
5708 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5709 if (asoc->size_on_all_streams >= control->length) {
5710 asoc->size_on_all_streams -= control->length;
5713 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5715 asoc->size_on_all_streams = 0;
5718 sctp_ucount_decr(asoc->cnt_on_all_streams);
5719 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5720 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5722 } else if (control->on_strm_q) {
5723 panic("strm: %p ctl: %p unknown %d",
5724 strm, control, control->on_strm_q);
5727 control->on_strm_q = 0;
5728 stcb->asoc.control_pdapi = control;
5729 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5731 SCTP_PARTIAL_DELIVERY_ABORTED,
5733 SCTP_SO_NOT_LOCKED);
5734 stcb->asoc.control_pdapi = sv;
5736 } else if ((control->sinfo_stream == sid) &&
5737 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5738 /* We are past our victim SSN */
5742 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5743 /* Update the sequence number */
5744 strm->last_mid_delivered = mid;
5746 /* now kick the stream the new way */
5747 /* sa_ignore NO_NULL_CHK */
5748 sctp_kick_prsctp_reorder_queue(stcb, strm);
5750 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5753 * Now slide thing forward.
5755 sctp_slide_mapping_arrays(stcb);