2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int lock_held);
73 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
75 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
78 /* Calculate what the rwnd would be */
80 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
85 * This is really set wrong with respect to a 1-2-m socket. Since
86 * the sb_cc is the count that everyone as put up. When we re-write
87 * sctp_soreceive then we will fix this so that ONLY this
88 * associations data is taken into account.
90 if (stcb->sctp_socket == NULL) {
93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 if (stcb->asoc.sb_cc == 0 &&
98 asoc->cnt_on_reasm_queue == 0 &&
99 asoc->cnt_on_all_streams == 0) {
100 /* Full rwnd granted */
101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
104 /* get actual space */
105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 * take out what has NOT been put on socket queue and we yet hold
110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 asoc->cnt_on_reasm_queue * MSIZE));
112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 asoc->cnt_on_all_streams * MSIZE));
118 /* what is the overhead of all these rwnd's */
119 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
121 * If the window gets too small due to ctrl-stuff, reduce it to 1,
122 * even it is 0. SWS engaged
124 if (calc < stcb->asoc.my_rwnd_control_len) {
133 * Build out our readq entry based on the incoming packet.
135 struct sctp_queued_to_read *
136 sctp_build_readq_entry(struct sctp_tcb *stcb,
137 struct sctp_nets *net,
138 uint32_t tsn, uint32_t ppid,
139 uint32_t context, uint16_t sid,
140 uint32_t mid, uint8_t flags,
143 struct sctp_queued_to_read *read_queue_e = NULL;
145 sctp_alloc_a_readq(stcb, read_queue_e);
146 if (read_queue_e == NULL) {
149 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
150 read_queue_e->sinfo_stream = sid;
151 read_queue_e->sinfo_flags = (flags << 8);
152 read_queue_e->sinfo_ppid = ppid;
153 read_queue_e->sinfo_context = context;
154 read_queue_e->sinfo_tsn = tsn;
155 read_queue_e->sinfo_cumtsn = tsn;
156 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
157 read_queue_e->mid = mid;
158 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
159 TAILQ_INIT(&read_queue_e->reasm);
160 read_queue_e->whoFrom = net;
161 atomic_add_int(&net->ref_count, 1);
162 read_queue_e->data = dm;
163 read_queue_e->stcb = stcb;
164 read_queue_e->port_from = stcb->rport;
166 return (read_queue_e);
170 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
172 struct sctp_extrcvinfo *seinfo;
173 struct sctp_sndrcvinfo *outinfo;
174 struct sctp_rcvinfo *rcvinfo;
175 struct sctp_nxtinfo *nxtinfo;
182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
185 /* user does not want any ancillary data */
189 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
190 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
192 seinfo = (struct sctp_extrcvinfo *)sinfo;
193 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
194 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
196 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
200 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
203 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
206 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
212 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
217 SCTP_BUF_LEN(ret) = 0;
219 /* We need a CMSG header followed by the struct */
220 cmh = mtod(ret, struct cmsghdr *);
222 * Make sure that there is no un-initialized padding between the
223 * cmsg header and cmsg data and after the cmsg data.
226 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
227 cmh->cmsg_level = IPPROTO_SCTP;
228 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
229 cmh->cmsg_type = SCTP_RCVINFO;
230 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
231 rcvinfo->rcv_sid = sinfo->sinfo_stream;
232 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
233 rcvinfo->rcv_flags = sinfo->sinfo_flags;
234 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
235 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
236 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
237 rcvinfo->rcv_context = sinfo->sinfo_context;
238 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
239 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
240 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
243 cmh->cmsg_level = IPPROTO_SCTP;
244 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
245 cmh->cmsg_type = SCTP_NXTINFO;
246 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
247 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
248 nxtinfo->nxt_flags = 0;
249 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
250 nxtinfo->nxt_flags |= SCTP_UNORDERED;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
253 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
256 nxtinfo->nxt_flags |= SCTP_COMPLETE;
258 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
259 nxtinfo->nxt_length = seinfo->serinfo_next_length;
260 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
261 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
262 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
265 cmh->cmsg_level = IPPROTO_SCTP;
266 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
268 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
269 cmh->cmsg_type = SCTP_EXTRCV;
270 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
271 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
273 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
274 cmh->cmsg_type = SCTP_SNDRCV;
276 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
284 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
286 uint32_t gap, i, cumackp1;
288 int in_r = 0, in_nr = 0;
290 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
293 cumackp1 = asoc->cumulative_tsn + 1;
294 if (SCTP_TSN_GT(cumackp1, tsn)) {
296 * this tsn is behind the cum ack and thus we don't need to
297 * worry about it being moved from one to the other.
301 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
302 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 if ((in_r == 0) && (in_nr == 0)) {
306 panic("Things are really messed up now");
308 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
309 sctp_print_mapping_array(asoc);
313 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
315 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
316 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
317 asoc->highest_tsn_inside_nr_map = tsn;
319 if (tsn == asoc->highest_tsn_inside_map) {
320 /* We must back down to see what the new highest is */
321 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
322 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
323 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
324 asoc->highest_tsn_inside_map = i;
330 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
336 sctp_place_control_in_stream(struct sctp_stream_in *strm,
337 struct sctp_association *asoc,
338 struct sctp_queued_to_read *control)
340 struct sctp_queued_to_read *at;
341 struct sctp_readhead *q;
342 uint8_t flags, unordered;
344 flags = (control->sinfo_flags >> 8);
345 unordered = flags & SCTP_DATA_UNORDERED;
347 q = &strm->uno_inqueue;
348 if (asoc->idata_supported == 0) {
349 if (!TAILQ_EMPTY(q)) {
351 * Only one stream can be here in old style
356 TAILQ_INSERT_TAIL(q, control, next_instrm);
357 control->on_strm_q = SCTP_ON_UNORDERED;
363 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
364 control->end_added = 1;
365 control->first_frag_seen = 1;
366 control->last_frag_seen = 1;
368 if (TAILQ_EMPTY(q)) {
370 TAILQ_INSERT_HEAD(q, control, next_instrm);
372 control->on_strm_q = SCTP_ON_UNORDERED;
374 control->on_strm_q = SCTP_ON_ORDERED;
378 TAILQ_FOREACH(at, q, next_instrm) {
379 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
381 * one in queue is bigger than the new one,
382 * insert before this one
384 TAILQ_INSERT_BEFORE(at, control, next_instrm);
386 control->on_strm_q = SCTP_ON_UNORDERED;
388 control->on_strm_q = SCTP_ON_ORDERED;
391 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
393 * Gak, He sent me a duplicate msg id
394 * number?? return -1 to abort.
398 if (TAILQ_NEXT(at, next_instrm) == NULL) {
400 * We are at the end, insert it
403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
404 sctp_log_strm_del(control, at,
405 SCTP_STR_LOG_FROM_INSERT_TL);
407 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
409 control->on_strm_q = SCTP_ON_UNORDERED;
411 control->on_strm_q = SCTP_ON_ORDERED;
422 sctp_abort_in_reasm(struct sctp_tcb *stcb,
423 struct sctp_queued_to_read *control,
424 struct sctp_tmit_chunk *chk,
425 int *abort_flag, int opspot)
427 char msg[SCTP_DIAG_INFO_LEN];
430 if (stcb->asoc.idata_supported) {
431 snprintf(msg, sizeof(msg),
432 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
434 control->fsn_included,
437 chk->rec.data.fsn, chk->rec.data.mid);
439 snprintf(msg, sizeof(msg),
440 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
442 control->fsn_included,
446 (uint16_t)chk->rec.data.mid);
448 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
449 sctp_m_freem(chk->data);
451 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
452 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
453 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
458 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
461 * The control could not be placed and must be cleaned.
463 struct sctp_tmit_chunk *chk, *nchk;
465 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
466 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
468 sctp_m_freem(chk->data);
470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
472 sctp_free_a_readq(stcb, control);
476 * Queue the chunk either right into the socket buffer if it is the next one
477 * to go OR put it in the correct place in the delivery queue. If we do
478 * append to the so_buf, keep doing so until we are out of order as
479 * long as the control's entered are non-fragmented.
482 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
483 struct sctp_association *asoc,
484 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
487 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
488 * all the data in one stream this could happen quite rapidly. One
489 * could use the TSN to keep track of things, but this scheme breaks
490 * down in the other type of stream usage that could occur. Send a
491 * single msg to stream 0, send 4Billion messages to stream 1, now
492 * send a message to stream 0. You have a situation where the TSN
493 * has wrapped but not in the stream. Is this worth worrying about
494 * or should we just change our queue sort at the bottom to be by
497 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
498 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
499 * assignment this could happen... and I don't see how this would be
500 * a violation. So for now I am undecided an will leave the sort by
501 * SSN alone. Maybe a hybred approach is the answer
504 struct sctp_queued_to_read *at;
508 struct sctp_stream_in *strm;
509 char msg[SCTP_DIAG_INFO_LEN];
511 strm = &asoc->strmin[control->sinfo_stream];
512 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
513 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
515 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
516 /* The incoming sseq is behind where we last delivered? */
517 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
518 strm->last_mid_delivered, control->mid);
520 * throw it in the stream so it gets cleaned up in
521 * association destruction
523 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
524 if (asoc->idata_supported) {
525 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
526 strm->last_mid_delivered, control->sinfo_tsn,
527 control->sinfo_stream, control->mid);
529 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
530 (uint16_t)strm->last_mid_delivered,
532 control->sinfo_stream,
533 (uint16_t)control->mid);
535 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
536 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
537 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
543 asoc->size_on_all_streams += control->length;
544 sctp_ucount_incr(asoc->cnt_on_all_streams);
545 nxt_todel = strm->last_mid_delivered + 1;
546 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
547 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
550 so = SCTP_INP_SO(stcb->sctp_ep);
551 atomic_add_int(&stcb->asoc.refcnt, 1);
552 SCTP_TCB_UNLOCK(stcb);
553 SCTP_SOCKET_LOCK(so, 1);
555 atomic_subtract_int(&stcb->asoc.refcnt, 1);
556 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
557 SCTP_SOCKET_UNLOCK(so, 1);
561 /* can be delivered right away? */
562 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
563 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
565 /* EY it wont be queued if it could be delivered directly */
567 if (asoc->size_on_all_streams >= control->length) {
568 asoc->size_on_all_streams -= control->length;
571 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
573 asoc->size_on_all_streams = 0;
576 sctp_ucount_decr(asoc->cnt_on_all_streams);
577 strm->last_mid_delivered++;
578 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
579 sctp_add_to_readq(stcb->sctp_ep, stcb,
581 &stcb->sctp_socket->so_rcv, 1,
582 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
583 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
585 nxt_todel = strm->last_mid_delivered + 1;
586 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
587 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
588 if (control->on_strm_q == SCTP_ON_ORDERED) {
589 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
590 if (asoc->size_on_all_streams >= control->length) {
591 asoc->size_on_all_streams -= control->length;
594 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
596 asoc->size_on_all_streams = 0;
599 sctp_ucount_decr(asoc->cnt_on_all_streams);
602 panic("Huh control: %p is on_strm_q: %d",
603 control, control->on_strm_q);
606 control->on_strm_q = 0;
607 strm->last_mid_delivered++;
609 * We ignore the return of deliver_data here
610 * since we always can hold the chunk on the
611 * d-queue. And we have a finite number that
612 * can be delivered from the strq.
614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
615 sctp_log_strm_del(control, NULL,
616 SCTP_STR_LOG_FROM_IMMED_DEL);
618 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
619 sctp_add_to_readq(stcb->sctp_ep, stcb,
621 &stcb->sctp_socket->so_rcv, 1,
622 SCTP_READ_LOCK_NOT_HELD,
625 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
630 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
631 SCTP_SOCKET_UNLOCK(so, 1);
636 * Ok, we did not deliver this guy, find the correct place
637 * to put it on the queue.
639 if (sctp_place_control_in_stream(strm, asoc, control)) {
640 snprintf(msg, sizeof(msg),
641 "Queue to str MID: %u duplicate",
643 sctp_clean_up_control(stcb, control);
644 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
645 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
646 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
654 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
656 struct mbuf *m, *prev = NULL;
657 struct sctp_tcb *stcb;
659 stcb = control->stcb;
660 control->held_length = 0;
664 if (SCTP_BUF_LEN(m) == 0) {
665 /* Skip mbufs with NO length */
668 control->data = sctp_m_free(m);
671 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
672 m = SCTP_BUF_NEXT(prev);
675 control->tail_mbuf = prev;
680 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
681 if (control->on_read_q) {
683 * On read queue so we must increment the SB stuff,
684 * we assume caller has done any locks of SB.
686 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
688 m = SCTP_BUF_NEXT(m);
691 control->tail_mbuf = prev;
696 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
698 struct mbuf *prev = NULL;
699 struct sctp_tcb *stcb;
701 stcb = control->stcb;
704 panic("Control broken");
709 if (control->tail_mbuf == NULL) {
712 sctp_setup_tail_pointer(control);
715 control->tail_mbuf->m_next = m;
717 if (SCTP_BUF_LEN(m) == 0) {
718 /* Skip mbufs with NO length */
721 control->tail_mbuf->m_next = sctp_m_free(m);
722 m = control->tail_mbuf->m_next;
724 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
725 m = SCTP_BUF_NEXT(prev);
728 control->tail_mbuf = prev;
733 if (control->on_read_q) {
735 * On read queue so we must increment the SB stuff,
736 * we assume caller has done any locks of SB.
738 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
740 *added += SCTP_BUF_LEN(m);
741 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
742 m = SCTP_BUF_NEXT(m);
745 control->tail_mbuf = prev;
750 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
752 memset(nc, 0, sizeof(struct sctp_queued_to_read));
753 nc->sinfo_stream = control->sinfo_stream;
754 nc->mid = control->mid;
755 TAILQ_INIT(&nc->reasm);
756 nc->top_fsn = control->top_fsn;
757 nc->mid = control->mid;
758 nc->sinfo_flags = control->sinfo_flags;
759 nc->sinfo_ppid = control->sinfo_ppid;
760 nc->sinfo_context = control->sinfo_context;
761 nc->fsn_included = 0xffffffff;
762 nc->sinfo_tsn = control->sinfo_tsn;
763 nc->sinfo_cumtsn = control->sinfo_cumtsn;
764 nc->sinfo_assoc_id = control->sinfo_assoc_id;
765 nc->whoFrom = control->whoFrom;
766 atomic_add_int(&nc->whoFrom->ref_count, 1);
767 nc->stcb = control->stcb;
768 nc->port_from = control->port_from;
772 sctp_reset_a_control(struct sctp_queued_to_read *control,
773 struct sctp_inpcb *inp, uint32_t tsn)
775 control->fsn_included = tsn;
776 if (control->on_read_q) {
778 * We have to purge it from there, hopefully this will work
781 TAILQ_REMOVE(&inp->read_queue, control, next);
782 control->on_read_q = 0;
787 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
788 struct sctp_association *asoc,
789 struct sctp_stream_in *strm,
790 struct sctp_queued_to_read *control,
792 int inp_read_lock_held)
795 * Special handling for the old un-ordered data chunk. All the
796 * chunks/TSN's go to mid 0. So we have to do the old style watching
797 * to see if we have it all. If you return one, no other control
798 * entries on the un-ordered queue will be looked at. In theory
799 * there should be no others entries in reality, unless the guy is
800 * sending both unordered NDATA and unordered DATA...
802 struct sctp_tmit_chunk *chk, *lchk, *tchk;
804 struct sctp_queued_to_read *nc;
807 if (control->first_frag_seen == 0) {
808 /* Nothing we can do, we have not seen the first piece yet */
811 /* Collapse any we can */
814 fsn = control->fsn_included + 1;
815 /* Now what can we add? */
816 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
817 if (chk->rec.data.fsn == fsn) {
819 sctp_alloc_a_readq(stcb, nc);
823 memset(nc, 0, sizeof(struct sctp_queued_to_read));
824 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
825 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
829 if (control->end_added) {
831 if (!TAILQ_EMPTY(&control->reasm)) {
833 * Ok we have to move anything left
834 * on the control queue to a new
837 sctp_build_readq_entry_from_ctl(nc, control);
838 tchk = TAILQ_FIRST(&control->reasm);
839 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
840 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
841 if (asoc->size_on_reasm_queue >= tchk->send_size) {
842 asoc->size_on_reasm_queue -= tchk->send_size;
845 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
847 asoc->size_on_reasm_queue = 0;
850 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
851 nc->first_frag_seen = 1;
852 nc->fsn_included = tchk->rec.data.fsn;
853 nc->data = tchk->data;
854 nc->sinfo_ppid = tchk->rec.data.ppid;
855 nc->sinfo_tsn = tchk->rec.data.tsn;
856 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
858 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
859 sctp_setup_tail_pointer(nc);
860 tchk = TAILQ_FIRST(&control->reasm);
862 /* Spin the rest onto the queue */
864 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
865 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
866 tchk = TAILQ_FIRST(&control->reasm);
869 * Now lets add it to the queue
870 * after removing control
872 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
873 nc->on_strm_q = SCTP_ON_UNORDERED;
874 if (control->on_strm_q) {
875 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
876 control->on_strm_q = 0;
879 if (control->pdapi_started) {
880 strm->pd_api_started = 0;
881 control->pdapi_started = 0;
883 if (control->on_strm_q) {
884 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
885 control->on_strm_q = 0;
886 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
888 if (control->on_read_q == 0) {
889 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
890 &stcb->sctp_socket->so_rcv, control->end_added,
891 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
893 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
894 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
896 * Switch to the new guy and
902 if (nc->on_strm_q == 0) {
903 sctp_free_a_readq(stcb, nc);
908 sctp_free_a_readq(stcb, nc);
915 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
916 strm->pd_api_started = 1;
917 control->pdapi_started = 1;
918 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
919 &stcb->sctp_socket->so_rcv, control->end_added,
920 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
921 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
929 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
930 struct sctp_association *asoc,
931 struct sctp_queued_to_read *control,
932 struct sctp_tmit_chunk *chk,
935 struct sctp_tmit_chunk *at;
939 * Here we need to place the chunk into the control structure sorted
940 * in the correct order.
942 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
943 /* Its the very first one. */
944 SCTPDBG(SCTP_DEBUG_XXX,
945 "chunk is a first fsn: %u becomes fsn_included\n",
947 if (control->first_frag_seen) {
949 * In old un-ordered we can reassembly on one
950 * control multiple messages. As long as the next
951 * FIRST is greater then the old first (TSN i.e. FSN
957 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
959 * Easy way the start of a new guy beyond
964 if ((chk->rec.data.fsn == control->fsn_included) ||
965 (control->pdapi_started)) {
967 * Ok this should not happen, if it does we
968 * started the pd-api on the higher TSN
969 * (since the equals part is a TSN failure
972 * We are completly hosed in that case since
973 * I have no way to recover. This really
974 * will only happen if we can get more TSN's
975 * higher before the pd-api-point.
977 sctp_abort_in_reasm(stcb, control, chk,
979 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
984 * Ok we have two firsts and the one we just got is
985 * smaller than the one we previously placed.. yuck!
986 * We must swap them out.
989 tdata = control->data;
990 control->data = chk->data;
992 /* Save the lengths */
993 chk->send_size = control->length;
994 /* Recompute length of control and tail pointer */
995 sctp_setup_tail_pointer(control);
996 /* Fix the FSN included */
997 tmp = control->fsn_included;
998 control->fsn_included = chk->rec.data.fsn;
999 chk->rec.data.fsn = tmp;
1000 /* Fix the TSN included */
1001 tmp = control->sinfo_tsn;
1002 control->sinfo_tsn = chk->rec.data.tsn;
1003 chk->rec.data.tsn = tmp;
1004 /* Fix the PPID included */
1005 tmp = control->sinfo_ppid;
1006 control->sinfo_ppid = chk->rec.data.ppid;
1007 chk->rec.data.ppid = tmp;
1008 /* Fix tail pointer */
1011 control->first_frag_seen = 1;
1012 control->fsn_included = chk->rec.data.fsn;
1013 control->top_fsn = chk->rec.data.fsn;
1014 control->sinfo_tsn = chk->rec.data.tsn;
1015 control->sinfo_ppid = chk->rec.data.ppid;
1016 control->data = chk->data;
1017 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1019 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1020 sctp_setup_tail_pointer(control);
1025 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1026 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1028 * This one in queue is bigger than the new one,
1029 * insert the new one before at.
1031 asoc->size_on_reasm_queue += chk->send_size;
1032 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1034 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1036 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1038 * They sent a duplicate fsn number. This really
1039 * should not happen since the FSN is a TSN and it
1040 * should have been dropped earlier.
1042 sctp_abort_in_reasm(stcb, control, chk,
1044 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1048 if (inserted == 0) {
1049 /* Its at the end */
1050 asoc->size_on_reasm_queue += chk->send_size;
1051 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1052 control->top_fsn = chk->rec.data.fsn;
1053 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1058 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1059 struct sctp_stream_in *strm, int inp_read_lock_held)
1062 * Given a stream, strm, see if any of the SSN's on it that are
1063 * fragmented are ready to deliver. If so go ahead and place them on
1064 * the read queue. In so placing if we have hit the end, then we
1065 * need to remove them from the stream's queue.
1067 struct sctp_queued_to_read *control, *nctl = NULL;
1068 uint32_t next_to_del;
1072 if (stcb->sctp_socket) {
1073 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1074 stcb->sctp_ep->partial_delivery_point);
1076 pd_point = stcb->sctp_ep->partial_delivery_point;
1078 control = TAILQ_FIRST(&strm->uno_inqueue);
1080 if ((control != NULL) &&
1081 (asoc->idata_supported == 0)) {
1082 /* Special handling needed for "old" data format */
1083 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1087 if (strm->pd_api_started) {
1088 /* Can't add more */
1092 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1093 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1094 nctl = TAILQ_NEXT(control, next_instrm);
1095 if (control->end_added) {
1096 /* We just put the last bit on */
1097 if (control->on_strm_q) {
1099 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1100 panic("Huh control: %p on_q: %d -- not unordered?",
1101 control, control->on_strm_q);
1104 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1105 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1106 control->on_strm_q = 0;
1108 if (control->on_read_q == 0) {
1109 sctp_add_to_readq(stcb->sctp_ep, stcb,
1111 &stcb->sctp_socket->so_rcv, control->end_added,
1112 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1115 /* Can we do a PD-API for this un-ordered guy? */
1116 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1117 strm->pd_api_started = 1;
1118 control->pdapi_started = 1;
1119 sctp_add_to_readq(stcb->sctp_ep, stcb,
1121 &stcb->sctp_socket->so_rcv, control->end_added,
1122 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1130 control = TAILQ_FIRST(&strm->inqueue);
1131 if (strm->pd_api_started) {
1132 /* Can't add more */
1135 if (control == NULL) {
1138 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1140 * Ok the guy at the top was being partially delivered
1141 * completed, so we remove it. Note the pd_api flag was
1142 * taken off when the chunk was merged on in
1143 * sctp_queue_data_for_reasm below.
1145 nctl = TAILQ_NEXT(control, next_instrm);
1146 SCTPDBG(SCTP_DEBUG_XXX,
1147 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1148 control, control->end_added, control->mid,
1149 control->top_fsn, control->fsn_included,
1150 strm->last_mid_delivered);
1151 if (control->end_added) {
1152 if (control->on_strm_q) {
1154 if (control->on_strm_q != SCTP_ON_ORDERED) {
1155 panic("Huh control: %p on_q: %d -- not ordered?",
1156 control, control->on_strm_q);
1159 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1160 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1161 if (asoc->size_on_all_streams >= control->length) {
1162 asoc->size_on_all_streams -= control->length;
1165 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1167 asoc->size_on_all_streams = 0;
1170 sctp_ucount_decr(asoc->cnt_on_all_streams);
1171 control->on_strm_q = 0;
1173 if (strm->pd_api_started && control->pdapi_started) {
1174 control->pdapi_started = 0;
1175 strm->pd_api_started = 0;
1177 if (control->on_read_q == 0) {
1178 sctp_add_to_readq(stcb->sctp_ep, stcb,
1180 &stcb->sctp_socket->so_rcv, control->end_added,
1181 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1186 if (strm->pd_api_started) {
1188 * Can't add more must have gotten an un-ordered above being
1189 * partially delivered.
1194 next_to_del = strm->last_mid_delivered + 1;
1196 SCTPDBG(SCTP_DEBUG_XXX,
1197 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1198 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1200 nctl = TAILQ_NEXT(control, next_instrm);
1201 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1202 (control->first_frag_seen)) {
1205 /* Ok we can deliver it onto the stream. */
1206 if (control->end_added) {
1207 /* We are done with it afterwards */
1208 if (control->on_strm_q) {
1210 if (control->on_strm_q != SCTP_ON_ORDERED) {
1211 panic("Huh control: %p on_q: %d -- not ordered?",
1212 control, control->on_strm_q);
1215 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1216 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1217 if (asoc->size_on_all_streams >= control->length) {
1218 asoc->size_on_all_streams -= control->length;
1221 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1223 asoc->size_on_all_streams = 0;
1226 sctp_ucount_decr(asoc->cnt_on_all_streams);
1227 control->on_strm_q = 0;
1231 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1233 * A singleton now slipping through - mark
1234 * it non-revokable too
1236 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1237 } else if (control->end_added == 0) {
1239 * Check if we can defer adding until its
1242 if ((control->length < pd_point) || (strm->pd_api_started)) {
1244 * Don't need it or cannot add more
1245 * (one being delivered that way)
1250 done = (control->end_added) && (control->last_frag_seen);
1251 if (control->on_read_q == 0) {
1253 if (asoc->size_on_all_streams >= control->length) {
1254 asoc->size_on_all_streams -= control->length;
1257 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1259 asoc->size_on_all_streams = 0;
1262 strm->pd_api_started = 1;
1263 control->pdapi_started = 1;
1265 sctp_add_to_readq(stcb->sctp_ep, stcb,
1267 &stcb->sctp_socket->so_rcv, control->end_added,
1268 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1270 strm->last_mid_delivered = next_to_del;
1283 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1284 struct sctp_stream_in *strm,
1285 struct sctp_tcb *stcb, struct sctp_association *asoc,
1286 struct sctp_tmit_chunk *chk, int hold_rlock)
1289 * Given a control and a chunk, merge the data from the chk onto the
1290 * control and free up the chunk resources.
1295 if (control->on_read_q && (hold_rlock == 0)) {
1297 * Its being pd-api'd so we must do some locks.
1299 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1302 if (control->data == NULL) {
1303 control->data = chk->data;
1304 sctp_setup_tail_pointer(control);
1306 sctp_add_to_tail_pointer(control, chk->data, &added);
1308 control->fsn_included = chk->rec.data.fsn;
1309 asoc->size_on_reasm_queue -= chk->send_size;
1310 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1311 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1313 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1314 control->first_frag_seen = 1;
1315 control->sinfo_tsn = chk->rec.data.tsn;
1316 control->sinfo_ppid = chk->rec.data.ppid;
1318 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1320 if ((control->on_strm_q) && (control->on_read_q)) {
1321 if (control->pdapi_started) {
1322 control->pdapi_started = 0;
1323 strm->pd_api_started = 0;
1325 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1327 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1328 control->on_strm_q = 0;
1329 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1331 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1333 * Don't need to decrement
1334 * size_on_all_streams, since control is on
1337 sctp_ucount_decr(asoc->cnt_on_all_streams);
1338 control->on_strm_q = 0;
1340 } else if (control->on_strm_q) {
1341 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1342 control->on_strm_q);
1346 control->end_added = 1;
1347 control->last_frag_seen = 1;
1350 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1352 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1357 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1358 * queue, see if anthing can be delivered. If so pull it off (or as much as
1359 * we can. If we run out of space then we must dump what we can and set the
1360 * appropriate flag to say we queued what we could.
1363 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1364 struct sctp_queued_to_read *control,
1365 struct sctp_tmit_chunk *chk,
1366 int created_control,
1367 int *abort_flag, uint32_t tsn)
1370 struct sctp_tmit_chunk *at, *nat;
1371 struct sctp_stream_in *strm;
1372 int do_wakeup, unordered;
1375 strm = &asoc->strmin[control->sinfo_stream];
1377 * For old un-ordered data chunks.
1379 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1384 /* Must be added to the stream-in queue */
1385 if (created_control) {
1386 if (unordered == 0) {
1387 sctp_ucount_incr(asoc->cnt_on_all_streams);
1389 if (sctp_place_control_in_stream(strm, asoc, control)) {
1390 /* Duplicate SSN? */
1391 sctp_abort_in_reasm(stcb, control, chk,
1393 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1394 sctp_clean_up_control(stcb, control);
1397 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1399 * Ok we created this control and now lets validate
1400 * that its legal i.e. there is a B bit set, if not
1401 * and we have up to the cum-ack then its invalid.
1403 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1404 sctp_abort_in_reasm(stcb, control, chk,
1406 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1411 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1412 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1416 * Ok we must queue the chunk into the reasembly portion: o if its
1417 * the first it goes to the control mbuf. o if its not first but the
1418 * next in sequence it goes to the control, and each succeeding one
1419 * in order also goes. o if its not in order we place it on the list
1422 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1423 /* Its the very first one. */
1424 SCTPDBG(SCTP_DEBUG_XXX,
1425 "chunk is a first fsn: %u becomes fsn_included\n",
1427 if (control->first_frag_seen) {
1429 * Error on senders part, they either sent us two
1430 * data chunks with FIRST, or they sent two
1431 * un-ordered chunks that were fragmented at the
1432 * same time in the same stream.
1434 sctp_abort_in_reasm(stcb, control, chk,
1436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1439 control->first_frag_seen = 1;
1440 control->sinfo_ppid = chk->rec.data.ppid;
1441 control->sinfo_tsn = chk->rec.data.tsn;
1442 control->fsn_included = chk->rec.data.fsn;
1443 control->data = chk->data;
1444 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1446 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1447 sctp_setup_tail_pointer(control);
1448 asoc->size_on_all_streams += control->length;
1450 /* Place the chunk in our list */
1453 if (control->last_frag_seen == 0) {
1454 /* Still willing to raise highest FSN seen */
1455 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1456 SCTPDBG(SCTP_DEBUG_XXX,
1457 "We have a new top_fsn: %u\n",
1459 control->top_fsn = chk->rec.data.fsn;
1461 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1462 SCTPDBG(SCTP_DEBUG_XXX,
1463 "The last fsn is now in place fsn: %u\n",
1465 control->last_frag_seen = 1;
1467 if (asoc->idata_supported || control->first_frag_seen) {
1469 * For IDATA we always check since we know
1470 * that the first fragment is 0. For old
1471 * DATA we have to receive the first before
1472 * we know the first FSN (which is the TSN).
1474 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1476 * We have already delivered up to
1479 sctp_abort_in_reasm(stcb, control, chk,
1481 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1486 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1487 /* Second last? huh? */
1488 SCTPDBG(SCTP_DEBUG_XXX,
1489 "Duplicate last fsn: %u (top: %u) -- abort\n",
1490 chk->rec.data.fsn, control->top_fsn);
1491 sctp_abort_in_reasm(stcb, control,
1493 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1496 if (asoc->idata_supported || control->first_frag_seen) {
1498 * For IDATA we always check since we know
1499 * that the first fragment is 0. For old
1500 * DATA we have to receive the first before
1501 * we know the first FSN (which is the TSN).
1504 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1506 * We have already delivered up to
1509 SCTPDBG(SCTP_DEBUG_XXX,
1510 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1511 chk->rec.data.fsn, control->fsn_included);
1512 sctp_abort_in_reasm(stcb, control, chk,
1514 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1519 * validate not beyond top FSN if we have seen last
1522 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1523 SCTPDBG(SCTP_DEBUG_XXX,
1524 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1527 sctp_abort_in_reasm(stcb, control, chk,
1529 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1534 * If we reach here, we need to place the new chunk in the
1535 * reassembly for this control.
1537 SCTPDBG(SCTP_DEBUG_XXX,
1538 "chunk is a not first fsn: %u needs to be inserted\n",
1540 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1541 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1543 * This one in queue is bigger than the new
1544 * one, insert the new one before at.
1546 SCTPDBG(SCTP_DEBUG_XXX,
1547 "Insert it before fsn: %u\n",
1549 asoc->size_on_reasm_queue += chk->send_size;
1550 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1551 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1554 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1556 * Gak, He sent me a duplicate str seq
1560 * foo bar, I guess I will just free this
1561 * new guy, should we abort too? FIX ME
1562 * MAYBE? Or it COULD be that the SSN's have
1563 * wrapped. Maybe I should compare to TSN
1564 * somehow... sigh for now just blow away
1567 SCTPDBG(SCTP_DEBUG_XXX,
1568 "Duplicate to fsn: %u -- abort\n",
1570 sctp_abort_in_reasm(stcb, control,
1572 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1576 if (inserted == 0) {
1577 /* Goes on the end */
1578 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1580 asoc->size_on_reasm_queue += chk->send_size;
1581 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1582 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1586 * Ok lets see if we can suck any up into the control structure that
1587 * are in seq if it makes sense.
1591 * If the first fragment has not been seen there is no sense in
1594 if (control->first_frag_seen) {
1595 next_fsn = control->fsn_included + 1;
1596 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1597 if (at->rec.data.fsn == next_fsn) {
1598 /* We can add this one now to the control */
1599 SCTPDBG(SCTP_DEBUG_XXX,
1600 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1603 next_fsn, control->fsn_included);
1604 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1605 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1606 if (control->on_read_q) {
1610 * We only add to the
1611 * size-on-all-streams if its not on
1612 * the read q. The read q flag will
1613 * cause a sballoc so its accounted
1616 asoc->size_on_all_streams += lenadded;
1619 if (control->end_added && control->pdapi_started) {
1620 if (strm->pd_api_started) {
1621 strm->pd_api_started = 0;
1622 control->pdapi_started = 0;
1624 if (control->on_read_q == 0) {
1625 sctp_add_to_readq(stcb->sctp_ep, stcb,
1627 &stcb->sctp_socket->so_rcv, control->end_added,
1628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1638 /* Need to wakeup the reader */
1639 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1643 static struct sctp_queued_to_read *
1644 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1646 struct sctp_queued_to_read *control;
1649 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1650 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1655 if (idata_supported) {
1656 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1657 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1662 control = TAILQ_FIRST(&strm->uno_inqueue);
1669 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1670 struct mbuf **m, int offset, int chk_length,
1671 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1672 int *break_flag, int last_chunk, uint8_t chk_type)
1674 /* Process a data chunk */
1675 /* struct sctp_tmit_chunk *chk; */
1676 struct sctp_tmit_chunk *chk;
1677 uint32_t tsn, fsn, gap, mid;
1680 int need_reasm_check = 0;
1682 struct mbuf *op_err;
1683 char msg[SCTP_DIAG_INFO_LEN];
1684 struct sctp_queued_to_read *control, *ncontrol;
1687 struct sctp_stream_reset_list *liste;
1690 int created_control = 0;
1692 if (chk_type == SCTP_IDATA) {
1693 struct sctp_idata_chunk *chunk, chunk_buf;
1695 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1696 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1697 chk_flags = chunk->ch.chunk_flags;
1698 clen = sizeof(struct sctp_idata_chunk);
1699 tsn = ntohl(chunk->dp.tsn);
1700 sid = ntohs(chunk->dp.sid);
1701 mid = ntohl(chunk->dp.mid);
1702 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1704 ppid = chunk->dp.ppid_fsn.ppid;
1706 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1707 ppid = 0xffffffff; /* Use as an invalid value. */
1710 struct sctp_data_chunk *chunk, chunk_buf;
1712 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1713 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1714 chk_flags = chunk->ch.chunk_flags;
1715 clen = sizeof(struct sctp_data_chunk);
1716 tsn = ntohl(chunk->dp.tsn);
1717 sid = ntohs(chunk->dp.sid);
1718 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1720 ppid = chunk->dp.ppid;
1722 if ((size_t)chk_length == clen) {
1724 * Need to send an abort since we had a empty data chunk.
1726 op_err = sctp_generate_no_user_data_cause(tsn);
1727 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1728 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1732 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1733 asoc->send_sack = 1;
1735 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1737 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1742 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1743 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1744 /* It is a duplicate */
1745 SCTP_STAT_INCR(sctps_recvdupdata);
1746 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1747 /* Record a dup for the next outbound sack */
1748 asoc->dup_tsns[asoc->numduptsns] = tsn;
1751 asoc->send_sack = 1;
1754 /* Calculate the number of TSN's between the base and this TSN */
1755 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1756 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1757 /* Can't hold the bit in the mapping at max array, toss it */
1760 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1761 SCTP_TCB_LOCK_ASSERT(stcb);
1762 if (sctp_expand_mapping_array(asoc, gap)) {
1763 /* Can't expand, drop it */
1767 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1770 /* See if we have received this one already */
1771 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1772 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1773 SCTP_STAT_INCR(sctps_recvdupdata);
1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1775 /* Record a dup for the next outbound sack */
1776 asoc->dup_tsns[asoc->numduptsns] = tsn;
1779 asoc->send_sack = 1;
1783 * Check to see about the GONE flag, duplicates would cause a sack
1784 * to be sent up above
1786 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1787 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1788 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1790 * wait a minute, this guy is gone, there is no longer a
1791 * receiver. Send peer an ABORT!
1793 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1794 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1799 * Now before going further we see if there is room. If NOT then we
1800 * MAY let one through only IF this TSN is the one we are waiting
1801 * for on a partial delivery API.
1804 /* Is the stream valid? */
1805 if (sid >= asoc->streamincnt) {
1806 struct sctp_error_invalid_stream *cause;
1808 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1809 0, M_NOWAIT, 1, MT_DATA);
1810 if (op_err != NULL) {
1811 /* add some space up front so prepend will work well */
1812 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1813 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1815 * Error causes are just param's and this one has
1816 * two back to back phdr, one with the error type
1817 * and size, the other with the streamid and a rsvd
1819 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1820 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1821 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1822 cause->stream_id = htons(sid);
1823 cause->reserved = htons(0);
1824 sctp_queue_op_err(stcb, op_err);
1826 SCTP_STAT_INCR(sctps_badsid);
1827 SCTP_TCB_LOCK_ASSERT(stcb);
1828 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1829 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1830 asoc->highest_tsn_inside_nr_map = tsn;
1832 if (tsn == (asoc->cumulative_tsn + 1)) {
1833 /* Update cum-ack */
1834 asoc->cumulative_tsn = tsn;
1839 * If its a fragmented message, lets see if we can find the control
1840 * on the reassembly queues.
1842 if ((chk_type == SCTP_IDATA) &&
1843 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1846 * The first *must* be fsn 0, and other (middle/end) pieces
1847 * can *not* be fsn 0. XXX: This can happen in case of a
1848 * wrap around. Ignore is for now.
1850 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
1854 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1855 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1856 chk_flags, control);
1857 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1858 /* See if we can find the re-assembly entity */
1859 if (control != NULL) {
1860 /* We found something, does it belong? */
1861 if (ordered && (mid != control->mid)) {
1862 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1864 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1865 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1866 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1870 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1872 * We can't have a switched order with an
1875 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1879 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1881 * We can't have a switched unordered with a
1884 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1891 * Its a complete segment. Lets validate we don't have a
1892 * re-assembly going on with the same Stream/Seq (for
1893 * ordered) or in the same Stream for unordered.
1895 if (control != NULL) {
1896 if (ordered || asoc->idata_supported) {
1897 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1899 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1902 if ((tsn == control->fsn_included + 1) &&
1903 (control->end_added == 0)) {
1904 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
1912 /* now do the tests */
1913 if (((asoc->cnt_on_all_streams +
1914 asoc->cnt_on_reasm_queue +
1915 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1916 (((int)asoc->my_rwnd) <= 0)) {
1918 * When we have NO room in the rwnd we check to make sure
1919 * the reader is doing its job...
1921 if (stcb->sctp_socket->so_rcv.sb_cc) {
1922 /* some to read, wake-up */
1923 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1926 so = SCTP_INP_SO(stcb->sctp_ep);
1927 atomic_add_int(&stcb->asoc.refcnt, 1);
1928 SCTP_TCB_UNLOCK(stcb);
1929 SCTP_SOCKET_LOCK(so, 1);
1930 SCTP_TCB_LOCK(stcb);
1931 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1932 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1933 /* assoc was freed while we were unlocked */
1934 SCTP_SOCKET_UNLOCK(so, 1);
1938 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1939 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1940 SCTP_SOCKET_UNLOCK(so, 1);
1943 /* now is it in the mapping array of what we have accepted? */
1944 if (chk_type == SCTP_DATA) {
1945 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1946 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1947 /* Nope not in the valid range dump it */
1949 sctp_set_rwnd(stcb, asoc);
1950 if ((asoc->cnt_on_all_streams +
1951 asoc->cnt_on_reasm_queue +
1952 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1953 SCTP_STAT_INCR(sctps_datadropchklmt);
1955 SCTP_STAT_INCR(sctps_datadroprwnd);
1961 if (control == NULL) {
1964 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1969 #ifdef SCTP_ASOCLOG_OF_TSNS
1970 SCTP_TCB_LOCK_ASSERT(stcb);
1971 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1972 asoc->tsn_in_at = 0;
1973 asoc->tsn_in_wrapped = 1;
1975 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1976 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1977 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1978 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1979 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1980 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1981 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1982 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1986 * Before we continue lets validate that we are not being fooled by
1987 * an evil attacker. We can only have Nk chunks based on our TSN
1988 * spread allowed by the mapping array N * 8 bits, so there is no
1989 * way our stream sequence numbers could have wrapped. We of course
1990 * only validate the FIRST fragment so the bit must be set.
1992 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1993 (TAILQ_EMPTY(&asoc->resetHead)) &&
1994 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
1995 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
1996 /* The incoming sseq is behind where we last delivered? */
1997 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
1998 mid, asoc->strmin[sid].last_mid_delivered);
2000 if (asoc->idata_supported) {
2001 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2002 asoc->strmin[sid].last_mid_delivered,
2007 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2008 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2013 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
2015 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
2019 if (chk_type == SCTP_IDATA) {
2020 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2022 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2024 if (last_chunk == 0) {
2025 if (chk_type == SCTP_IDATA) {
2026 dmbuf = SCTP_M_COPYM(*m,
2027 (offset + sizeof(struct sctp_idata_chunk)),
2030 dmbuf = SCTP_M_COPYM(*m,
2031 (offset + sizeof(struct sctp_data_chunk)),
2034 #ifdef SCTP_MBUF_LOGGING
2035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2036 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2040 /* We can steal the last chunk */
2044 /* lop off the top part */
2045 if (chk_type == SCTP_IDATA) {
2046 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2048 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2050 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2051 l_len = SCTP_BUF_LEN(dmbuf);
2054 * need to count up the size hopefully does not hit
2060 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2061 l_len += SCTP_BUF_LEN(lat);
2064 if (l_len > the_len) {
2065 /* Trim the end round bytes off too */
2066 m_adj(dmbuf, -(l_len - the_len));
2069 if (dmbuf == NULL) {
2070 SCTP_STAT_INCR(sctps_nomem);
2074 * Now no matter what, we need a control, get one if we don't have
2075 * one (we may have gotten it above when we found the message was
2078 if (control == NULL) {
2079 sctp_alloc_a_readq(stcb, control);
2080 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2085 if (control == NULL) {
2086 SCTP_STAT_INCR(sctps_nomem);
2089 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2092 control->data = dmbuf;
2093 for (mm = control->data; mm; mm = mm->m_next) {
2094 control->length += SCTP_BUF_LEN(mm);
2096 control->tail_mbuf = NULL;
2097 control->end_added = 1;
2098 control->last_frag_seen = 1;
2099 control->first_frag_seen = 1;
2100 control->fsn_included = fsn;
2101 control->top_fsn = fsn;
2103 created_control = 1;
2105 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2106 chk_flags, ordered, mid, control);
2107 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2108 TAILQ_EMPTY(&asoc->resetHead) &&
2110 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2111 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2112 /* Candidate for express delivery */
2114 * Its not fragmented, No PD-API is up, Nothing in the
2115 * delivery queue, Its un-ordered OR ordered and the next to
2116 * deliver AND nothing else is stuck on the stream queue,
2117 * And there is room for it in the socket buffer. Lets just
2118 * stuff it up the buffer....
2120 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2121 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2122 asoc->highest_tsn_inside_nr_map = tsn;
2124 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2127 sctp_add_to_readq(stcb->sctp_ep, stcb,
2128 control, &stcb->sctp_socket->so_rcv,
2129 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2131 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2132 /* for ordered, bump what we delivered */
2133 asoc->strmin[sid].last_mid_delivered++;
2135 SCTP_STAT_INCR(sctps_recvexpress);
2136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2137 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2138 SCTP_STR_LOG_FROM_EXPRS_DEL);
2141 goto finish_express_del;
2143 /* Now will we need a chunk too? */
2144 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2145 sctp_alloc_a_chunk(stcb, chk);
2147 /* No memory so we drop the chunk */
2148 SCTP_STAT_INCR(sctps_nomem);
2149 if (last_chunk == 0) {
2150 /* we copied it, free the copy */
2151 sctp_m_freem(dmbuf);
2155 chk->rec.data.tsn = tsn;
2156 chk->no_fr_allowed = 0;
2157 chk->rec.data.fsn = fsn;
2158 chk->rec.data.mid = mid;
2159 chk->rec.data.sid = sid;
2160 chk->rec.data.ppid = ppid;
2161 chk->rec.data.context = stcb->asoc.context;
2162 chk->rec.data.doing_fast_retransmit = 0;
2163 chk->rec.data.rcv_flags = chk_flags;
2165 chk->send_size = the_len;
2167 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2170 atomic_add_int(&net->ref_count, 1);
2173 /* Set the appropriate TSN mark */
2174 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2175 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2176 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2177 asoc->highest_tsn_inside_nr_map = tsn;
2180 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2181 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2182 asoc->highest_tsn_inside_map = tsn;
2185 /* Now is it complete (i.e. not fragmented)? */
2186 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2188 * Special check for when streams are resetting. We could be
2189 * more smart about this and check the actual stream to see
2190 * if it is not being reset.. that way we would not create a
2191 * HOLB when amongst streams being reset and those not being
2195 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2196 SCTP_TSN_GT(tsn, liste->tsn)) {
2198 * yep its past where we need to reset... go ahead
2201 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2203 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2205 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2206 unsigned char inserted = 0;
2208 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2209 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2214 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2219 if (inserted == 0) {
2221 * must be put at end, use prevP
2222 * (all setup from loop) to setup
2225 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2228 goto finish_express_del;
2230 if (chk_flags & SCTP_DATA_UNORDERED) {
2231 /* queue directly into socket buffer */
2232 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2234 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2235 sctp_add_to_readq(stcb->sctp_ep, stcb,
2237 &stcb->sctp_socket->so_rcv, 1,
2238 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2241 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2243 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2251 goto finish_express_del;
2253 /* If we reach here its a reassembly */
2254 need_reasm_check = 1;
2255 SCTPDBG(SCTP_DEBUG_XXX,
2256 "Queue data to stream for reasm control: %p MID: %u\n",
2258 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2261 * the assoc is now gone and chk was put onto the reasm
2262 * queue, which has all been freed.
2270 /* Here we tidy up things */
2271 if (tsn == (asoc->cumulative_tsn + 1)) {
2272 /* Update cum-ack */
2273 asoc->cumulative_tsn = tsn;
2279 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2281 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2283 SCTP_STAT_INCR(sctps_recvdata);
2284 /* Set it present please */
2285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2286 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2288 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2289 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2290 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2292 if (need_reasm_check) {
2293 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2294 need_reasm_check = 0;
2296 /* check the special flag for stream resets */
2297 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2298 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2300 * we have finished working through the backlogged TSN's now
2301 * time to reset streams. 1: call reset function. 2: free
2302 * pending_reply space 3: distribute any chunks in
2303 * pending_reply_queue.
2305 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2306 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2307 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2308 SCTP_FREE(liste, SCTP_M_STRESET);
2309 /* sa_ignore FREED_MEMORY */
2310 liste = TAILQ_FIRST(&asoc->resetHead);
2311 if (TAILQ_EMPTY(&asoc->resetHead)) {
2312 /* All can be removed */
2313 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2314 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2315 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2319 if (need_reasm_check) {
2320 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2321 need_reasm_check = 0;
2325 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2326 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2330 * if control->sinfo_tsn is <= liste->tsn we
2331 * can process it which is the NOT of
2332 * control->sinfo_tsn > liste->tsn
2334 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2335 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2339 if (need_reasm_check) {
2340 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
2341 need_reasm_check = 0;
2349 static const int8_t sctp_map_lookup_tab[256] = {
2350 0, 1, 0, 2, 0, 1, 0, 3,
2351 0, 1, 0, 2, 0, 1, 0, 4,
2352 0, 1, 0, 2, 0, 1, 0, 3,
2353 0, 1, 0, 2, 0, 1, 0, 5,
2354 0, 1, 0, 2, 0, 1, 0, 3,
2355 0, 1, 0, 2, 0, 1, 0, 4,
2356 0, 1, 0, 2, 0, 1, 0, 3,
2357 0, 1, 0, 2, 0, 1, 0, 6,
2358 0, 1, 0, 2, 0, 1, 0, 3,
2359 0, 1, 0, 2, 0, 1, 0, 4,
2360 0, 1, 0, 2, 0, 1, 0, 3,
2361 0, 1, 0, 2, 0, 1, 0, 5,
2362 0, 1, 0, 2, 0, 1, 0, 3,
2363 0, 1, 0, 2, 0, 1, 0, 4,
2364 0, 1, 0, 2, 0, 1, 0, 3,
2365 0, 1, 0, 2, 0, 1, 0, 7,
2366 0, 1, 0, 2, 0, 1, 0, 3,
2367 0, 1, 0, 2, 0, 1, 0, 4,
2368 0, 1, 0, 2, 0, 1, 0, 3,
2369 0, 1, 0, 2, 0, 1, 0, 5,
2370 0, 1, 0, 2, 0, 1, 0, 3,
2371 0, 1, 0, 2, 0, 1, 0, 4,
2372 0, 1, 0, 2, 0, 1, 0, 3,
2373 0, 1, 0, 2, 0, 1, 0, 6,
2374 0, 1, 0, 2, 0, 1, 0, 3,
2375 0, 1, 0, 2, 0, 1, 0, 4,
2376 0, 1, 0, 2, 0, 1, 0, 3,
2377 0, 1, 0, 2, 0, 1, 0, 5,
2378 0, 1, 0, 2, 0, 1, 0, 3,
2379 0, 1, 0, 2, 0, 1, 0, 4,
2380 0, 1, 0, 2, 0, 1, 0, 3,
2381 0, 1, 0, 2, 0, 1, 0, 8
2386 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2389 * Now we also need to check the mapping array in a couple of ways.
2390 * 1) Did we move the cum-ack point?
2392 * When you first glance at this you might think that all entries
2393 * that make up the position of the cum-ack would be in the
2394 * nr-mapping array only.. i.e. things up to the cum-ack are always
2395 * deliverable. Thats true with one exception, when its a fragmented
2396 * message we may not deliver the data until some threshold (or all
2397 * of it) is in place. So we must OR the nr_mapping_array and
2398 * mapping_array to get a true picture of the cum-ack.
2400 struct sctp_association *asoc;
2403 int slide_from, slide_end, lgap, distance;
2404 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2408 old_cumack = asoc->cumulative_tsn;
2409 old_base = asoc->mapping_array_base_tsn;
2410 old_highest = asoc->highest_tsn_inside_map;
2412 * We could probably improve this a small bit by calculating the
2413 * offset of the current cum-ack as the starting point.
2416 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2417 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2421 /* there is a 0 bit */
2422 at += sctp_map_lookup_tab[val];
2426 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2428 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2429 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2431 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2432 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2434 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2435 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2436 sctp_print_mapping_array(asoc);
2437 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2438 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2440 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2441 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2444 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2445 highest_tsn = asoc->highest_tsn_inside_nr_map;
2447 highest_tsn = asoc->highest_tsn_inside_map;
2449 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2450 /* The complete array was completed by a single FR */
2451 /* highest becomes the cum-ack */
2457 /* clear the array */
2458 clr = ((at + 7) >> 3);
2459 if (clr > asoc->mapping_array_size) {
2460 clr = asoc->mapping_array_size;
2462 memset(asoc->mapping_array, 0, clr);
2463 memset(asoc->nr_mapping_array, 0, clr);
2465 for (i = 0; i < asoc->mapping_array_size; i++) {
2466 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2467 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2468 sctp_print_mapping_array(asoc);
2472 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2473 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2474 } else if (at >= 8) {
2475 /* we can slide the mapping array down */
2476 /* slide_from holds where we hit the first NON 0xff byte */
2479 * now calculate the ceiling of the move using our highest
2482 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2483 slide_end = (lgap >> 3);
2484 if (slide_end < slide_from) {
2485 sctp_print_mapping_array(asoc);
2487 panic("impossible slide");
2489 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2490 lgap, slide_end, slide_from, at);
2494 if (slide_end > asoc->mapping_array_size) {
2496 panic("would overrun buffer");
2498 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2499 asoc->mapping_array_size, slide_end);
2500 slide_end = asoc->mapping_array_size;
2503 distance = (slide_end - slide_from) + 1;
2504 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2505 sctp_log_map(old_base, old_cumack, old_highest,
2506 SCTP_MAP_PREPARE_SLIDE);
2507 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2508 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2510 if (distance + slide_from > asoc->mapping_array_size ||
2513 * Here we do NOT slide forward the array so that
2514 * hopefully when more data comes in to fill it up
2515 * we will be able to slide it forward. Really I
2516 * don't think this should happen :-0
2519 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2520 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2521 (uint32_t)asoc->mapping_array_size,
2522 SCTP_MAP_SLIDE_NONE);
2527 for (ii = 0; ii < distance; ii++) {
2528 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2529 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2532 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2533 asoc->mapping_array[ii] = 0;
2534 asoc->nr_mapping_array[ii] = 0;
2536 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2537 asoc->highest_tsn_inside_map += (slide_from << 3);
2539 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2540 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2542 asoc->mapping_array_base_tsn += (slide_from << 3);
2543 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2544 sctp_log_map(asoc->mapping_array_base_tsn,
2545 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2546 SCTP_MAP_SLIDE_RESULT);
2553 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2555 struct sctp_association *asoc;
2556 uint32_t highest_tsn;
2559 sctp_slide_mapping_arrays(stcb);
2561 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2562 highest_tsn = asoc->highest_tsn_inside_nr_map;
2564 highest_tsn = asoc->highest_tsn_inside_map;
2566 /* Is there a gap now? */
2567 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2570 * Now we need to see if we need to queue a sack or just start the
2571 * timer (if allowed).
2573 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2575 * Ok special case, in SHUTDOWN-SENT case. here we maker
2576 * sure SACK timer is off and instead send a SHUTDOWN and a
2579 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2580 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2581 stcb->sctp_ep, stcb, NULL,
2582 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2584 sctp_send_shutdown(stcb,
2585 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2587 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2591 * CMT DAC algorithm: increase number of packets received
2594 stcb->asoc.cmt_dac_pkts_rcvd++;
2596 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2598 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2600 (stcb->asoc.numduptsns) || /* we have dup's */
2601 (is_a_gap) || /* is still a gap */
2602 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2603 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2606 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2607 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2608 (stcb->asoc.send_sack == 0) &&
2609 (stcb->asoc.numduptsns == 0) &&
2610 (stcb->asoc.delayed_ack) &&
2611 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2614 * CMT DAC algorithm: With CMT, delay acks
2615 * even in the face of
2617 * reordering. Therefore, if acks that do
2618 * not have to be sent because of the above
2619 * reasons, will be delayed. That is, acks
2620 * that would have been sent due to gap
2621 * reports will be delayed with DAC. Start
2622 * the delayed ack timer.
2624 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2625 stcb->sctp_ep, stcb, NULL);
2628 * Ok we must build a SACK since the timer
2629 * is pending, we got our first packet OR
2630 * there are gaps or duplicates.
2632 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2633 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2636 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2637 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2638 stcb->sctp_ep, stcb, NULL);
2645 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2646 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2647 struct sctp_nets *net, uint32_t *high_tsn)
2649 struct sctp_chunkhdr *ch, chunk_buf;
2650 struct sctp_association *asoc;
2651 int num_chunks = 0; /* number of control chunks processed */
2653 int break_flag, last_chunk;
2654 int abort_flag = 0, was_a_gap;
2656 uint32_t highest_tsn;
2657 uint16_t chk_length;
2660 sctp_set_rwnd(stcb, &stcb->asoc);
2663 SCTP_TCB_LOCK_ASSERT(stcb);
2665 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2666 highest_tsn = asoc->highest_tsn_inside_nr_map;
2668 highest_tsn = asoc->highest_tsn_inside_map;
2670 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2672 * setup where we got the last DATA packet from for any SACK that
2673 * may need to go out. Don't bump the net. This is done ONLY when a
2674 * chunk is assigned.
2676 asoc->last_data_chunk_from = net;
2679 * Now before we proceed we must figure out if this is a wasted
2680 * cluster... i.e. it is a small packet sent in and yet the driver
2681 * underneath allocated a full cluster for it. If so we must copy it
2682 * to a smaller mbuf and free up the cluster mbuf. This will help
2683 * with cluster starvation. Note for __Panda__ we don't do this
2684 * since it has clusters all the way down to 64 bytes.
2686 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2687 /* we only handle mbufs that are singletons.. not chains */
2688 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2690 /* ok lets see if we can copy the data up */
2693 /* get the pointers and copy */
2694 to = mtod(m, caddr_t *);
2695 from = mtod((*mm), caddr_t *);
2696 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2697 /* copy the length and free up the old */
2698 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2700 /* success, back copy */
2703 /* We are in trouble in the mbuf world .. yikes */
2707 /* get pointer to the first chunk header */
2708 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2709 sizeof(struct sctp_chunkhdr),
2710 (uint8_t *)&chunk_buf);
2715 * process all DATA chunks...
2717 *high_tsn = asoc->cumulative_tsn;
2719 asoc->data_pkts_seen++;
2720 while (stop_proc == 0) {
2721 /* validate chunk length */
2722 chk_length = ntohs(ch->chunk_length);
2723 if (length - *offset < chk_length) {
2724 /* all done, mutulated chunk */
2728 if ((asoc->idata_supported == 1) &&
2729 (ch->chunk_type == SCTP_DATA)) {
2730 struct mbuf *op_err;
2731 char msg[SCTP_DIAG_INFO_LEN];
2733 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2734 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2735 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2736 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2739 if ((asoc->idata_supported == 0) &&
2740 (ch->chunk_type == SCTP_IDATA)) {
2741 struct mbuf *op_err;
2742 char msg[SCTP_DIAG_INFO_LEN];
2744 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2745 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2746 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2747 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2750 if ((ch->chunk_type == SCTP_DATA) ||
2751 (ch->chunk_type == SCTP_IDATA)) {
2754 if (ch->chunk_type == SCTP_DATA) {
2755 clen = sizeof(struct sctp_data_chunk);
2757 clen = sizeof(struct sctp_idata_chunk);
2759 if (chk_length < clen) {
2761 * Need to send an abort since we had a
2762 * invalid data chunk.
2764 struct mbuf *op_err;
2765 char msg[SCTP_DIAG_INFO_LEN];
2767 snprintf(msg, sizeof(msg), "%s chunk of length %u",
2768 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2770 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2771 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2772 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2775 #ifdef SCTP_AUDITING_ENABLED
2776 sctp_audit_log(0xB1, 0);
2778 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2783 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2784 chk_length, net, high_tsn, &abort_flag, &break_flag,
2785 last_chunk, ch->chunk_type)) {
2793 * Set because of out of rwnd space and no
2794 * drop rep space left.
2800 /* not a data chunk in the data region */
2801 switch (ch->chunk_type) {
2802 case SCTP_INITIATION:
2803 case SCTP_INITIATION_ACK:
2804 case SCTP_SELECTIVE_ACK:
2805 case SCTP_NR_SELECTIVE_ACK:
2806 case SCTP_HEARTBEAT_REQUEST:
2807 case SCTP_HEARTBEAT_ACK:
2808 case SCTP_ABORT_ASSOCIATION:
2810 case SCTP_SHUTDOWN_ACK:
2811 case SCTP_OPERATION_ERROR:
2812 case SCTP_COOKIE_ECHO:
2813 case SCTP_COOKIE_ACK:
2816 case SCTP_SHUTDOWN_COMPLETE:
2817 case SCTP_AUTHENTICATION:
2818 case SCTP_ASCONF_ACK:
2819 case SCTP_PACKET_DROPPED:
2820 case SCTP_STREAM_RESET:
2821 case SCTP_FORWARD_CUM_TSN:
2825 * Now, what do we do with KNOWN
2826 * chunks that are NOT in the right
2829 * For now, I do nothing but ignore
2830 * them. We may later want to add
2831 * sysctl stuff to switch out and do
2832 * either an ABORT() or possibly
2835 struct mbuf *op_err;
2836 char msg[SCTP_DIAG_INFO_LEN];
2838 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2840 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2841 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2846 * Unknown chunk type: use bit rules after
2849 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2851 * Need to send an abort since we
2852 * had a invalid chunk.
2854 struct mbuf *op_err;
2855 char msg[SCTP_DIAG_INFO_LEN];
2857 snprintf(msg, sizeof(msg), "Chunk of length %u",
2859 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
2861 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
2864 if (ch->chunk_type & 0x40) {
2865 /* Add a error report to the queue */
2866 struct mbuf *op_err;
2867 struct sctp_gen_error_cause *cause;
2869 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2870 0, M_NOWAIT, 1, MT_DATA);
2871 if (op_err != NULL) {
2872 cause = mtod(op_err, struct sctp_gen_error_cause *);
2873 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2874 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2875 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2876 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2877 if (SCTP_BUF_NEXT(op_err) != NULL) {
2878 sctp_queue_op_err(stcb, op_err);
2880 sctp_m_freem(op_err);
2884 if ((ch->chunk_type & 0x80) == 0) {
2885 /* discard the rest of this packet */
2887 } /* else skip this bad chunk and
2890 } /* switch of chunk type */
2892 *offset += SCTP_SIZE32(chk_length);
2893 if ((*offset >= length) || stop_proc) {
2894 /* no more data left in the mbuf chain */
2898 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2899 sizeof(struct sctp_chunkhdr),
2900 (uint8_t *)&chunk_buf);
2909 * we need to report rwnd overrun drops.
2911 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2915 * Did we get data, if so update the time for auto-close and
2916 * give peer credit for being alive.
2918 SCTP_STAT_INCR(sctps_recvpktwithdata);
2919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2920 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2921 stcb->asoc.overall_error_count,
2923 SCTP_FROM_SCTP_INDATA,
2926 stcb->asoc.overall_error_count = 0;
2927 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2929 /* now service all of the reassm queue if needed */
2930 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2931 /* Assure that we ack right away */
2932 stcb->asoc.send_sack = 1;
2934 /* Start a sack timer or QUEUE a SACK for sending */
2935 sctp_sack_check(stcb, was_a_gap);
2940 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2941 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2943 uint32_t *biggest_newly_acked_tsn,
2944 uint32_t *this_sack_lowest_newack,
2947 struct sctp_tmit_chunk *tp1;
2948 unsigned int theTSN;
2949 int j, wake_him = 0, circled = 0;
2951 /* Recover the tp1 we last saw */
2954 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2956 for (j = frag_strt; j <= frag_end; j++) {
2957 theTSN = j + last_tsn;
2959 if (tp1->rec.data.doing_fast_retransmit)
2963 * CMT: CUCv2 algorithm. For each TSN being
2964 * processed from the sent queue, track the
2965 * next expected pseudo-cumack, or
2966 * rtx_pseudo_cumack, if required. Separate
2967 * cumack trackers for first transmissions,
2968 * and retransmissions.
2970 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2971 (tp1->whoTo->find_pseudo_cumack == 1) &&
2972 (tp1->snd_count == 1)) {
2973 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2974 tp1->whoTo->find_pseudo_cumack = 0;
2976 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2977 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2978 (tp1->snd_count > 1)) {
2979 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2980 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2982 if (tp1->rec.data.tsn == theTSN) {
2983 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2985 * must be held until
2988 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2990 * If it is less than RESEND, it is
2991 * now no-longer in flight.
2992 * Higher values may already be set
2993 * via previous Gap Ack Blocks...
2994 * i.e. ACKED or RESEND.
2996 if (SCTP_TSN_GT(tp1->rec.data.tsn,
2997 *biggest_newly_acked_tsn)) {
2998 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3001 * CMT: SFR algo (and HTNA) - set
3002 * saw_newack to 1 for dest being
3003 * newly acked. update
3004 * this_sack_highest_newack if
3007 if (tp1->rec.data.chunk_was_revoked == 0)
3008 tp1->whoTo->saw_newack = 1;
3010 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3011 tp1->whoTo->this_sack_highest_newack)) {
3012 tp1->whoTo->this_sack_highest_newack =
3016 * CMT DAC algo: also update
3017 * this_sack_lowest_newack
3019 if (*this_sack_lowest_newack == 0) {
3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3021 sctp_log_sack(*this_sack_lowest_newack,
3026 SCTP_LOG_TSN_ACKED);
3028 *this_sack_lowest_newack = tp1->rec.data.tsn;
3031 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3032 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3033 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3034 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3035 * Separate pseudo_cumack trackers for first transmissions and
3038 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3039 if (tp1->rec.data.chunk_was_revoked == 0) {
3040 tp1->whoTo->new_pseudo_cumack = 1;
3042 tp1->whoTo->find_pseudo_cumack = 1;
3044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3045 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3047 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3048 if (tp1->rec.data.chunk_was_revoked == 0) {
3049 tp1->whoTo->new_pseudo_cumack = 1;
3051 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3054 sctp_log_sack(*biggest_newly_acked_tsn,
3059 SCTP_LOG_TSN_ACKED);
3061 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3062 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3063 tp1->whoTo->flight_size,
3065 (uint32_t)(uintptr_t)tp1->whoTo,
3068 sctp_flight_size_decrease(tp1);
3069 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3070 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3073 sctp_total_flight_decrease(stcb, tp1);
3075 tp1->whoTo->net_ack += tp1->send_size;
3076 if (tp1->snd_count < 2) {
3078 * True non-retransmited chunk
3080 tp1->whoTo->net_ack2 += tp1->send_size;
3088 sctp_calculate_rto(stcb,
3091 &tp1->sent_rcv_time,
3092 SCTP_RTT_FROM_DATA);
3095 if (tp1->whoTo->rto_needed == 0) {
3096 tp1->whoTo->rto_needed = 1;
3102 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3103 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3104 stcb->asoc.this_sack_highest_gap)) {
3105 stcb->asoc.this_sack_highest_gap =
3108 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3109 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3110 #ifdef SCTP_AUDITING_ENABLED
3111 sctp_audit_log(0xB2,
3112 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3117 * All chunks NOT UNSENT fall through here and are marked
3118 * (leave PR-SCTP ones that are to skip alone though)
3120 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3121 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3122 tp1->sent = SCTP_DATAGRAM_MARKED;
3124 if (tp1->rec.data.chunk_was_revoked) {
3125 /* deflate the cwnd */
3126 tp1->whoTo->cwnd -= tp1->book_size;
3127 tp1->rec.data.chunk_was_revoked = 0;
3129 /* NR Sack code here */
3131 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3132 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3133 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3136 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3139 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3140 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3141 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3142 stcb->asoc.trigger_reset = 1;
3144 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3150 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3151 sctp_m_freem(tp1->data);
3158 } /* if (tp1->tsn == theTSN) */
3159 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3162 tp1 = TAILQ_NEXT(tp1, sctp_next);
3163 if ((tp1 == NULL) && (circled == 0)) {
3165 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3167 } /* end while (tp1) */
3170 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3172 /* In case the fragments were not in order we must reset */
3173 } /* end for (j = fragStart */
3175 return (wake_him); /* Return value only used for nr-sack */
3180 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3181 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3182 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3183 int num_seg, int num_nr_seg, int *rto_ok)
3185 struct sctp_gap_ack_block *frag, block;
3186 struct sctp_tmit_chunk *tp1;
3191 uint16_t frag_strt, frag_end, prev_frag_end;
3193 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3197 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3200 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3202 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3203 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3204 *offset += sizeof(block);
3206 return (chunk_freed);
3208 frag_strt = ntohs(frag->start);
3209 frag_end = ntohs(frag->end);
3211 if (frag_strt > frag_end) {
3212 /* This gap report is malformed, skip it. */
3215 if (frag_strt <= prev_frag_end) {
3216 /* This gap report is not in order, so restart. */
3217 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3219 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3220 *biggest_tsn_acked = last_tsn + frag_end;
3227 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3228 non_revocable, &num_frs, biggest_newly_acked_tsn,
3229 this_sack_lowest_newack, rto_ok)) {
3232 prev_frag_end = frag_end;
3234 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3236 sctp_log_fr(*biggest_tsn_acked,
3237 *biggest_newly_acked_tsn,
3238 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3240 return (chunk_freed);
3244 sctp_check_for_revoked(struct sctp_tcb *stcb,
3245 struct sctp_association *asoc, uint32_t cumack,
3246 uint32_t biggest_tsn_acked)
3248 struct sctp_tmit_chunk *tp1;
3250 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3251 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3253 * ok this guy is either ACK or MARKED. If it is
3254 * ACKED it has been previously acked but not this
3255 * time i.e. revoked. If it is MARKED it was ACK'ed
3258 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3261 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3262 /* it has been revoked */
3263 tp1->sent = SCTP_DATAGRAM_SENT;
3264 tp1->rec.data.chunk_was_revoked = 1;
3266 * We must add this stuff back in to assure
3267 * timers and such get started.
3269 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3270 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3271 tp1->whoTo->flight_size,
3273 (uint32_t)(uintptr_t)tp1->whoTo,
3276 sctp_flight_size_increase(tp1);
3277 sctp_total_flight_increase(stcb, tp1);
3279 * We inflate the cwnd to compensate for our
3280 * artificial inflation of the flight_size.
3282 tp1->whoTo->cwnd += tp1->book_size;
3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3284 sctp_log_sack(asoc->last_acked_seq,
3289 SCTP_LOG_TSN_REVOKED);
3291 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3292 /* it has been re-acked in this SACK */
3293 tp1->sent = SCTP_DATAGRAM_ACKED;
3296 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3303 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3304 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3306 struct sctp_tmit_chunk *tp1;
3307 int strike_flag = 0;
3309 int tot_retrans = 0;
3310 uint32_t sending_seq;
3311 struct sctp_nets *net;
3312 int num_dests_sacked = 0;
3315 * select the sending_seq, this is either the next thing ready to be
3316 * sent but not transmitted, OR, the next seq we assign.
3318 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3320 sending_seq = asoc->sending_seq;
3322 sending_seq = tp1->rec.data.tsn;
3325 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3326 if ((asoc->sctp_cmt_on_off > 0) &&
3327 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3328 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3329 if (net->saw_newack)
3333 if (stcb->asoc.prsctp_supported) {
3334 (void)SCTP_GETTIME_TIMEVAL(&now);
3336 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3338 if (tp1->no_fr_allowed) {
3339 /* this one had a timeout or something */
3342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3343 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3344 sctp_log_fr(biggest_tsn_newly_acked,
3347 SCTP_FR_LOG_CHECK_STRIKE);
3349 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3350 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3354 if (stcb->asoc.prsctp_supported) {
3355 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3356 /* Is it expired? */
3357 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3358 /* Yes so drop it */
3359 if (tp1->data != NULL) {
3360 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3361 SCTP_SO_NOT_LOCKED);
3367 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3368 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3369 /* we are beyond the tsn in the sack */
3372 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3373 /* either a RESEND, ACKED, or MARKED */
3375 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3376 /* Continue strikin FWD-TSN chunks */
3377 tp1->rec.data.fwd_tsn_cnt++;
3382 * CMT : SFR algo (covers part of DAC and HTNA as well)
3384 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3386 * No new acks were receieved for data sent to this
3387 * dest. Therefore, according to the SFR algo for
3388 * CMT, no data sent to this dest can be marked for
3389 * FR using this SACK.
3392 } else if (tp1->whoTo &&
3393 SCTP_TSN_GT(tp1->rec.data.tsn,
3394 tp1->whoTo->this_sack_highest_newack) &&
3395 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3397 * CMT: New acks were receieved for data sent to
3398 * this dest. But no new acks were seen for data
3399 * sent after tp1. Therefore, according to the SFR
3400 * algo for CMT, tp1 cannot be marked for FR using
3401 * this SACK. This step covers part of the DAC algo
3402 * and the HTNA algo as well.
3407 * Here we check to see if we were have already done a FR
3408 * and if so we see if the biggest TSN we saw in the sack is
3409 * smaller than the recovery point. If so we don't strike
3410 * the tsn... otherwise we CAN strike the TSN.
3413 * @@@ JRI: Check for CMT if (accum_moved &&
3414 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3417 if (accum_moved && asoc->fast_retran_loss_recovery) {
3419 * Strike the TSN if in fast-recovery and cum-ack
3422 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3423 sctp_log_fr(biggest_tsn_newly_acked,
3426 SCTP_FR_LOG_STRIKE_CHUNK);
3428 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3431 if ((asoc->sctp_cmt_on_off > 0) &&
3432 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3434 * CMT DAC algorithm: If SACK flag is set to
3435 * 0, then lowest_newack test will not pass
3436 * because it would have been set to the
3437 * cumack earlier. If not already to be
3438 * rtx'd, If not a mixed sack and if tp1 is
3439 * not between two sacked TSNs, then mark by
3440 * one more. NOTE that we are marking by one
3441 * additional time since the SACK DAC flag
3442 * indicates that two packets have been
3443 * received after this missing TSN.
3445 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3446 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3448 sctp_log_fr(16 + num_dests_sacked,
3451 SCTP_FR_LOG_STRIKE_CHUNK);
3456 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3457 (asoc->sctp_cmt_on_off == 0)) {
3459 * For those that have done a FR we must take
3460 * special consideration if we strike. I.e the
3461 * biggest_newly_acked must be higher than the
3462 * sending_seq at the time we did the FR.
3465 #ifdef SCTP_FR_TO_ALTERNATE
3467 * If FR's go to new networks, then we must only do
3468 * this for singly homed asoc's. However if the FR's
3469 * go to the same network (Armando's work) then its
3470 * ok to FR multiple times.
3478 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3479 tp1->rec.data.fast_retran_tsn)) {
3481 * Strike the TSN, since this ack is
3482 * beyond where things were when we
3485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3486 sctp_log_fr(biggest_tsn_newly_acked,
3489 SCTP_FR_LOG_STRIKE_CHUNK);
3491 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3495 if ((asoc->sctp_cmt_on_off > 0) &&
3496 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3498 * CMT DAC algorithm: If
3499 * SACK flag is set to 0,
3500 * then lowest_newack test
3501 * will not pass because it
3502 * would have been set to
3503 * the cumack earlier. If
3504 * not already to be rtx'd,
3505 * If not a mixed sack and
3506 * if tp1 is not between two
3507 * sacked TSNs, then mark by
3508 * one more. NOTE that we
3509 * are marking by one
3510 * additional time since the
3511 * SACK DAC flag indicates
3512 * that two packets have
3513 * been received after this
3516 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3517 (num_dests_sacked == 1) &&
3518 SCTP_TSN_GT(this_sack_lowest_newack,
3519 tp1->rec.data.tsn)) {
3520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3521 sctp_log_fr(32 + num_dests_sacked,
3524 SCTP_FR_LOG_STRIKE_CHUNK);
3526 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3534 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3537 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3538 biggest_tsn_newly_acked)) {
3540 * We don't strike these: This is the HTNA
3541 * algorithm i.e. we don't strike If our TSN is
3542 * larger than the Highest TSN Newly Acked.
3546 /* Strike the TSN */
3547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3548 sctp_log_fr(biggest_tsn_newly_acked,
3551 SCTP_FR_LOG_STRIKE_CHUNK);
3553 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3556 if ((asoc->sctp_cmt_on_off > 0) &&
3557 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3559 * CMT DAC algorithm: If SACK flag is set to
3560 * 0, then lowest_newack test will not pass
3561 * because it would have been set to the
3562 * cumack earlier. If not already to be
3563 * rtx'd, If not a mixed sack and if tp1 is
3564 * not between two sacked TSNs, then mark by
3565 * one more. NOTE that we are marking by one
3566 * additional time since the SACK DAC flag
3567 * indicates that two packets have been
3568 * received after this missing TSN.
3570 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3571 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3573 sctp_log_fr(48 + num_dests_sacked,
3576 SCTP_FR_LOG_STRIKE_CHUNK);
3582 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3583 struct sctp_nets *alt;
3585 /* fix counts and things */
3586 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3587 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3588 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3590 (uint32_t)(uintptr_t)tp1->whoTo,
3594 tp1->whoTo->net_ack++;
3595 sctp_flight_size_decrease(tp1);
3596 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3597 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3601 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3602 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3603 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3605 /* add back to the rwnd */
3606 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3608 /* remove from the total flight */
3609 sctp_total_flight_decrease(stcb, tp1);
3611 if ((stcb->asoc.prsctp_supported) &&
3612 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3614 * Has it been retransmitted tv_sec times? -
3615 * we store the retran count there.
3617 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3618 /* Yes, so drop it */
3619 if (tp1->data != NULL) {
3620 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3621 SCTP_SO_NOT_LOCKED);
3623 /* Make sure to flag we had a FR */
3624 if (tp1->whoTo != NULL) {
3625 tp1->whoTo->net_ack++;
3631 * SCTP_PRINTF("OK, we are now ready to FR this
3634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3635 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3639 /* This is a subsequent FR */
3640 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3642 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3643 if (asoc->sctp_cmt_on_off > 0) {
3645 * CMT: Using RTX_SSTHRESH policy for CMT.
3646 * If CMT is being used, then pick dest with
3647 * largest ssthresh for any retransmission.
3649 tp1->no_fr_allowed = 1;
3651 /* sa_ignore NO_NULL_CHK */
3652 if (asoc->sctp_cmt_pf > 0) {
3654 * JRS 5/18/07 - If CMT PF is on,
3655 * use the PF version of
3658 alt = sctp_find_alternate_net(stcb, alt, 2);
3661 * JRS 5/18/07 - If only CMT is on,
3662 * use the CMT version of
3665 /* sa_ignore NO_NULL_CHK */
3666 alt = sctp_find_alternate_net(stcb, alt, 1);
3672 * CUCv2: If a different dest is picked for
3673 * the retransmission, then new
3674 * (rtx-)pseudo_cumack needs to be tracked
3675 * for orig dest. Let CUCv2 track new (rtx-)
3676 * pseudo-cumack always.
3679 tp1->whoTo->find_pseudo_cumack = 1;
3680 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3682 } else { /* CMT is OFF */
3684 #ifdef SCTP_FR_TO_ALTERNATE
3685 /* Can we find an alternate? */
3686 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3689 * default behavior is to NOT retransmit
3690 * FR's to an alternate. Armando Caro's
3691 * paper details why.
3697 tp1->rec.data.doing_fast_retransmit = 1;
3699 /* mark the sending seq for possible subsequent FR's */
3701 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3702 * (uint32_t)tpi->rec.data.tsn);
3704 if (TAILQ_EMPTY(&asoc->send_queue)) {
3706 * If the queue of send is empty then its
3707 * the next sequence number that will be
3708 * assigned so we subtract one from this to
3709 * get the one we last sent.
3711 tp1->rec.data.fast_retran_tsn = sending_seq;
3714 * If there are chunks on the send queue
3715 * (unsent data that has made it from the
3716 * stream queues but not out the door, we
3717 * take the first one (which will have the
3718 * lowest TSN) and subtract one to get the
3721 struct sctp_tmit_chunk *ttt;
3723 ttt = TAILQ_FIRST(&asoc->send_queue);
3724 tp1->rec.data.fast_retran_tsn =
3730 * this guy had a RTO calculation pending on
3733 if ((tp1->whoTo != NULL) &&
3734 (tp1->whoTo->rto_needed == 0)) {
3735 tp1->whoTo->rto_needed = 1;
3739 if (alt != tp1->whoTo) {
3740 /* yes, there is an alternate. */
3741 sctp_free_remote_addr(tp1->whoTo);
3742 /* sa_ignore FREED_MEMORY */
3744 atomic_add_int(&alt->ref_count, 1);
3750 struct sctp_tmit_chunk *
3751 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3752 struct sctp_association *asoc)
3754 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3758 if (asoc->prsctp_supported == 0) {
3761 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3762 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3763 tp1->sent != SCTP_DATAGRAM_RESEND &&
3764 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3765 /* no chance to advance, out of here */
3768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3769 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3770 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3771 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3772 asoc->advanced_peer_ack_point,
3773 tp1->rec.data.tsn, 0, 0);
3776 if (!PR_SCTP_ENABLED(tp1->flags)) {
3778 * We can't fwd-tsn past any that are reliable aka
3779 * retransmitted until the asoc fails.
3784 (void)SCTP_GETTIME_TIMEVAL(&now);
3788 * now we got a chunk which is marked for another
3789 * retransmission to a PR-stream but has run out its chances
3790 * already maybe OR has been marked to skip now. Can we skip
3791 * it if its a resend?
3793 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3794 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3796 * Now is this one marked for resend and its time is
3799 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3800 /* Yes so drop it */
3802 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3803 1, SCTP_SO_NOT_LOCKED);
3807 * No, we are done when hit one for resend
3808 * whos time as not expired.
3814 * Ok now if this chunk is marked to drop it we can clean up
3815 * the chunk, advance our peer ack point and we can check
3818 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3819 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3820 /* advance PeerAckPoint goes forward */
3821 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3822 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3824 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3825 /* No update but we do save the chk */
3830 * If it is still in RESEND we can advance no
3840 sctp_fs_audit(struct sctp_association *asoc)
3842 struct sctp_tmit_chunk *chk;
3843 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3846 int entry_flight, entry_cnt;
3851 entry_flight = asoc->total_flight;
3852 entry_cnt = asoc->total_flight_count;
3854 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3857 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3858 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3859 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3864 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3866 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3868 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3875 if ((inflight > 0) || (inbetween > 0)) {
3877 panic("Flight size-express incorrect? \n");
3879 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3880 entry_flight, entry_cnt);
3882 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3883 inflight, inbetween, resend, above, acked);
3892 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3893 struct sctp_association *asoc,
3894 struct sctp_tmit_chunk *tp1)
3896 tp1->window_probe = 0;
3897 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3898 /* TSN's skipped we do NOT move back. */
3899 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3900 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3902 (uint32_t)(uintptr_t)tp1->whoTo,
3906 /* First setup this by shrinking flight */
3907 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3908 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3911 sctp_flight_size_decrease(tp1);
3912 sctp_total_flight_decrease(stcb, tp1);
3913 /* Now mark for resend */
3914 tp1->sent = SCTP_DATAGRAM_RESEND;
3915 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3917 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3918 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3919 tp1->whoTo->flight_size,
3921 (uint32_t)(uintptr_t)tp1->whoTo,
3927 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3928 uint32_t rwnd, int *abort_now, int ecne_seen)
3930 struct sctp_nets *net;
3931 struct sctp_association *asoc;
3932 struct sctp_tmit_chunk *tp1, *tp2;
3934 int win_probe_recovery = 0;
3935 int win_probe_recovered = 0;
3936 int j, done_once = 0;
3940 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3941 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3942 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3944 SCTP_TCB_LOCK_ASSERT(stcb);
3945 #ifdef SCTP_ASOCLOG_OF_TSNS
3946 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3947 stcb->asoc.cumack_log_at++;
3948 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3949 stcb->asoc.cumack_log_at = 0;
3953 old_rwnd = asoc->peers_rwnd;
3954 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3957 } else if (asoc->last_acked_seq == cumack) {
3958 /* Window update sack */
3959 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3960 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3961 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3962 /* SWS sender side engages */
3963 asoc->peers_rwnd = 0;
3965 if (asoc->peers_rwnd > old_rwnd) {
3970 /* First setup for CC stuff */
3971 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3972 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3973 /* Drag along the window_tsn for cwr's */
3974 net->cwr_window_tsn = cumack;
3976 net->prev_cwnd = net->cwnd;
3981 * CMT: Reset CUC and Fast recovery algo variables before
3984 net->new_pseudo_cumack = 0;
3985 net->will_exit_fast_recovery = 0;
3986 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3987 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3990 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3991 tp1 = TAILQ_LAST(&asoc->sent_queue,
3992 sctpchunk_listhead);
3993 send_s = tp1->rec.data.tsn + 1;
3995 send_s = asoc->sending_seq;
3997 if (SCTP_TSN_GE(cumack, send_s)) {
3998 struct mbuf *op_err;
3999 char msg[SCTP_DIAG_INFO_LEN];
4003 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4005 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4006 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
4007 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4010 asoc->this_sack_highest_gap = cumack;
4011 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4012 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4013 stcb->asoc.overall_error_count,
4015 SCTP_FROM_SCTP_INDATA,
4018 stcb->asoc.overall_error_count = 0;
4019 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4020 /* process the new consecutive TSN first */
4021 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4022 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4023 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4024 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4026 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4028 * If it is less than ACKED, it is
4029 * now no-longer in flight. Higher
4030 * values may occur during marking
4032 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4033 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4034 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4035 tp1->whoTo->flight_size,
4037 (uint32_t)(uintptr_t)tp1->whoTo,
4040 sctp_flight_size_decrease(tp1);
4041 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4042 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4045 /* sa_ignore NO_NULL_CHK */
4046 sctp_total_flight_decrease(stcb, tp1);
4048 tp1->whoTo->net_ack += tp1->send_size;
4049 if (tp1->snd_count < 2) {
4051 * True non-retransmited
4054 tp1->whoTo->net_ack2 +=
4057 /* update RTO too? */
4065 sctp_calculate_rto(stcb,
4067 &tp1->sent_rcv_time,
4068 SCTP_RTT_FROM_DATA);
4071 if (tp1->whoTo->rto_needed == 0) {
4072 tp1->whoTo->rto_needed = 1;
4078 * CMT: CUCv2 algorithm. From the
4079 * cumack'd TSNs, for each TSN being
4080 * acked for the first time, set the
4081 * following variables for the
4082 * corresp destination.
4083 * new_pseudo_cumack will trigger a
4085 * find_(rtx_)pseudo_cumack will
4086 * trigger search for the next
4087 * expected (rtx-)pseudo-cumack.
4089 tp1->whoTo->new_pseudo_cumack = 1;
4090 tp1->whoTo->find_pseudo_cumack = 1;
4091 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4094 /* sa_ignore NO_NULL_CHK */
4095 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4098 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4099 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4101 if (tp1->rec.data.chunk_was_revoked) {
4102 /* deflate the cwnd */
4103 tp1->whoTo->cwnd -= tp1->book_size;
4104 tp1->rec.data.chunk_was_revoked = 0;
4106 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4107 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4108 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4111 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4115 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4116 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4117 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4118 asoc->trigger_reset = 1;
4120 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4122 /* sa_ignore NO_NULL_CHK */
4123 sctp_free_bufspace(stcb, asoc, tp1, 1);
4124 sctp_m_freem(tp1->data);
4127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4128 sctp_log_sack(asoc->last_acked_seq,
4133 SCTP_LOG_FREE_SENT);
4135 asoc->sent_queue_cnt--;
4136 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4143 /* sa_ignore NO_NULL_CHK */
4144 if (stcb->sctp_socket) {
4145 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4149 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4151 /* sa_ignore NO_NULL_CHK */
4152 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4154 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4155 so = SCTP_INP_SO(stcb->sctp_ep);
4156 atomic_add_int(&stcb->asoc.refcnt, 1);
4157 SCTP_TCB_UNLOCK(stcb);
4158 SCTP_SOCKET_LOCK(so, 1);
4159 SCTP_TCB_LOCK(stcb);
4160 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4161 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4162 /* assoc was freed while we were unlocked */
4163 SCTP_SOCKET_UNLOCK(so, 1);
4167 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4168 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4169 SCTP_SOCKET_UNLOCK(so, 1);
4172 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4173 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4177 /* JRS - Use the congestion control given in the CC module */
4178 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4179 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4180 if (net->net_ack2 > 0) {
4182 * Karn's rule applies to clearing error
4183 * count, this is optional.
4185 net->error_count = 0;
4186 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4187 /* addr came good */
4188 net->dest_state |= SCTP_ADDR_REACHABLE;
4189 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4190 0, (void *)net, SCTP_SO_NOT_LOCKED);
4192 if (net == stcb->asoc.primary_destination) {
4193 if (stcb->asoc.alternate) {
4195 * release the alternate,
4198 sctp_free_remote_addr(stcb->asoc.alternate);
4199 stcb->asoc.alternate = NULL;
4202 if (net->dest_state & SCTP_ADDR_PF) {
4203 net->dest_state &= ~SCTP_ADDR_PF;
4204 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4205 stcb->sctp_ep, stcb, net,
4206 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4207 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4208 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4209 /* Done with this net */
4212 /* restore any doubled timers */
4213 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4214 if (net->RTO < stcb->asoc.minrto) {
4215 net->RTO = stcb->asoc.minrto;
4217 if (net->RTO > stcb->asoc.maxrto) {
4218 net->RTO = stcb->asoc.maxrto;
4222 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4224 asoc->last_acked_seq = cumack;
4226 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4227 /* nothing left in-flight */
4228 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4229 net->flight_size = 0;
4230 net->partial_bytes_acked = 0;
4232 asoc->total_flight = 0;
4233 asoc->total_flight_count = 0;
4236 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4237 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4238 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4239 /* SWS sender side engages */
4240 asoc->peers_rwnd = 0;
4242 if (asoc->peers_rwnd > old_rwnd) {
4243 win_probe_recovery = 1;
4245 /* Now assure a timer where data is queued at */
4248 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4249 if (win_probe_recovery && (net->window_probe)) {
4250 win_probe_recovered = 1;
4252 * Find first chunk that was used with window probe
4253 * and clear the sent
4255 /* sa_ignore FREED_MEMORY */
4256 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4257 if (tp1->window_probe) {
4258 /* move back to data send queue */
4259 sctp_window_probe_recovery(stcb, asoc, tp1);
4264 if (net->flight_size) {
4266 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4267 if (net->window_probe) {
4268 net->window_probe = 0;
4271 if (net->window_probe) {
4273 * In window probes we must assure a timer
4274 * is still running there
4276 net->window_probe = 0;
4277 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4278 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4280 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4281 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4283 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
4288 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4289 (asoc->sent_queue_retran_cnt == 0) &&
4290 (win_probe_recovered == 0) &&
4293 * huh, this should not happen unless all packets are
4294 * PR-SCTP and marked to skip of course.
4296 if (sctp_fs_audit(asoc)) {
4297 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4298 net->flight_size = 0;
4300 asoc->total_flight = 0;
4301 asoc->total_flight_count = 0;
4302 asoc->sent_queue_retran_cnt = 0;
4303 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4304 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4305 sctp_flight_size_increase(tp1);
4306 sctp_total_flight_increase(stcb, tp1);
4307 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4308 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4315 /**********************************/
4316 /* Now what about shutdown issues */
4317 /**********************************/
4318 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4319 /* nothing left on sendqueue.. consider done */
4321 if ((asoc->stream_queue_cnt == 1) &&
4322 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4323 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4324 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4325 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4327 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4328 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4329 (asoc->stream_queue_cnt == 1) &&
4330 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4331 struct mbuf *op_err;
4335 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4336 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4337 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4340 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4341 (asoc->stream_queue_cnt == 0)) {
4342 struct sctp_nets *netp;
4344 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4345 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4346 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4348 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4349 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4350 sctp_stop_timers_for_shutdown(stcb);
4351 if (asoc->alternate) {
4352 netp = asoc->alternate;
4354 netp = asoc->primary_destination;
4356 sctp_send_shutdown(stcb, netp);
4357 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4358 stcb->sctp_ep, stcb, netp);
4359 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4360 stcb->sctp_ep, stcb, netp);
4361 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4362 (asoc->stream_queue_cnt == 0)) {
4363 struct sctp_nets *netp;
4365 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4366 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4367 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4368 sctp_stop_timers_for_shutdown(stcb);
4369 if (asoc->alternate) {
4370 netp = asoc->alternate;
4372 netp = asoc->primary_destination;
4374 sctp_send_shutdown_ack(stcb, netp);
4375 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4376 stcb->sctp_ep, stcb, netp);
4379 /*********************************************/
4380 /* Here we perform PR-SCTP procedures */
4382 /*********************************************/
4383 /* C1. update advancedPeerAckPoint */
4384 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4385 asoc->advanced_peer_ack_point = cumack;
4387 /* PR-Sctp issues need to be addressed too */
4388 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4389 struct sctp_tmit_chunk *lchk;
4390 uint32_t old_adv_peer_ack_point;
4392 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4393 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4394 /* C3. See if we need to send a Fwd-TSN */
4395 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4397 * ISSUE with ECN, see FWD-TSN processing.
4399 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4400 send_forward_tsn(stcb, asoc);
4402 /* try to FR fwd-tsn's that get lost too */
4403 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4404 send_forward_tsn(stcb, asoc);
4409 /* Assure a timer is up */
4410 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4411 stcb->sctp_ep, stcb, lchk->whoTo);
4414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4415 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4417 stcb->asoc.peers_rwnd,
4418 stcb->asoc.total_flight,
4419 stcb->asoc.total_output_queue_size);
4424 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4425 struct sctp_tcb *stcb,
4426 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4427 int *abort_now, uint8_t flags,
4428 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4430 struct sctp_association *asoc;
4431 struct sctp_tmit_chunk *tp1, *tp2;
4432 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4433 uint16_t wake_him = 0;
4434 uint32_t send_s = 0;
4436 int accum_moved = 0;
4437 int will_exit_fast_recovery = 0;
4438 uint32_t a_rwnd, old_rwnd;
4439 int win_probe_recovery = 0;
4440 int win_probe_recovered = 0;
4441 struct sctp_nets *net = NULL;
4444 uint8_t reneged_all = 0;
4445 uint8_t cmt_dac_flag;
4448 * we take any chance we can to service our queues since we cannot
4449 * get awoken when the socket is read from :<
4452 * Now perform the actual SACK handling: 1) Verify that it is not an
4453 * old sack, if so discard. 2) If there is nothing left in the send
4454 * queue (cum-ack is equal to last acked) then you have a duplicate
4455 * too, update any rwnd change and verify no timers are running.
4456 * then return. 3) Process any new consequtive data i.e. cum-ack
4457 * moved process these first and note that it moved. 4) Process any
4458 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4459 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4460 * sync up flightsizes and things, stop all timers and also check
4461 * for shutdown_pending state. If so then go ahead and send off the
4462 * shutdown. If in shutdown recv, send off the shutdown-ack and
4463 * start that timer, Ret. 9) Strike any non-acked things and do FR
4464 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4465 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4466 * if in shutdown_recv state.
4468 SCTP_TCB_LOCK_ASSERT(stcb);
4470 this_sack_lowest_newack = 0;
4471 SCTP_STAT_INCR(sctps_slowpath_sack);
4473 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4474 #ifdef SCTP_ASOCLOG_OF_TSNS
4475 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4476 stcb->asoc.cumack_log_at++;
4477 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4478 stcb->asoc.cumack_log_at = 0;
4483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4484 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4485 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4487 old_rwnd = stcb->asoc.peers_rwnd;
4488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4489 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4490 stcb->asoc.overall_error_count,
4492 SCTP_FROM_SCTP_INDATA,
4495 stcb->asoc.overall_error_count = 0;
4497 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4498 sctp_log_sack(asoc->last_acked_seq,
4505 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4507 uint32_t *dupdata, dblock;
4509 for (i = 0; i < num_dup; i++) {
4510 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4511 sizeof(uint32_t), (uint8_t *)&dblock);
4512 if (dupdata == NULL) {
4515 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4519 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4520 tp1 = TAILQ_LAST(&asoc->sent_queue,
4521 sctpchunk_listhead);
4522 send_s = tp1->rec.data.tsn + 1;
4525 send_s = asoc->sending_seq;
4527 if (SCTP_TSN_GE(cum_ack, send_s)) {
4528 struct mbuf *op_err;
4529 char msg[SCTP_DIAG_INFO_LEN];
4532 * no way, we have not even sent this TSN out yet. Peer is
4533 * hopelessly messed up with us.
4535 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4538 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4539 tp1->rec.data.tsn, (void *)tp1);
4544 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4546 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4547 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4548 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4551 /**********************/
4552 /* 1) check the range */
4553 /**********************/
4554 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4555 /* acking something behind */
4558 /* update the Rwnd of the peer */
4559 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4560 TAILQ_EMPTY(&asoc->send_queue) &&
4561 (asoc->stream_queue_cnt == 0)) {
4562 /* nothing left on send/sent and strmq */
4563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4564 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4565 asoc->peers_rwnd, 0, 0, a_rwnd);
4567 asoc->peers_rwnd = a_rwnd;
4568 if (asoc->sent_queue_retran_cnt) {
4569 asoc->sent_queue_retran_cnt = 0;
4571 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4572 /* SWS sender side engages */
4573 asoc->peers_rwnd = 0;
4575 /* stop any timers */
4576 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4577 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4578 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4579 net->partial_bytes_acked = 0;
4580 net->flight_size = 0;
4582 asoc->total_flight = 0;
4583 asoc->total_flight_count = 0;
4587 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4588 * things. The total byte count acked is tracked in netAckSz AND
4589 * netAck2 is used to track the total bytes acked that are un-
4590 * amibguious and were never retransmitted. We track these on a per
4591 * destination address basis.
4593 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4594 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4595 /* Drag along the window_tsn for cwr's */
4596 net->cwr_window_tsn = cum_ack;
4598 net->prev_cwnd = net->cwnd;
4603 * CMT: Reset CUC and Fast recovery algo variables before
4606 net->new_pseudo_cumack = 0;
4607 net->will_exit_fast_recovery = 0;
4608 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4609 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4612 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4613 * to be greater than the cumack. Also reset saw_newack to 0
4616 net->saw_newack = 0;
4617 net->this_sack_highest_newack = last_tsn;
4619 /* process the new consecutive TSN first */
4620 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4621 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4622 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4624 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4626 * If it is less than ACKED, it is
4627 * now no-longer in flight. Higher
4628 * values may occur during marking
4630 if ((tp1->whoTo->dest_state &
4631 SCTP_ADDR_UNCONFIRMED) &&
4632 (tp1->snd_count < 2)) {
4634 * If there was no retran
4635 * and the address is
4636 * un-confirmed and we sent
4638 * sacked.. its confirmed,
4641 tp1->whoTo->dest_state &=
4642 ~SCTP_ADDR_UNCONFIRMED;
4644 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4645 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4646 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4647 tp1->whoTo->flight_size,
4649 (uint32_t)(uintptr_t)tp1->whoTo,
4652 sctp_flight_size_decrease(tp1);
4653 sctp_total_flight_decrease(stcb, tp1);
4654 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4655 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4659 tp1->whoTo->net_ack += tp1->send_size;
4661 /* CMT SFR and DAC algos */
4662 this_sack_lowest_newack = tp1->rec.data.tsn;
4663 tp1->whoTo->saw_newack = 1;
4665 if (tp1->snd_count < 2) {
4667 * True non-retransmited
4670 tp1->whoTo->net_ack2 +=
4673 /* update RTO too? */
4677 sctp_calculate_rto(stcb,
4679 &tp1->sent_rcv_time,
4680 SCTP_RTT_FROM_DATA);
4683 if (tp1->whoTo->rto_needed == 0) {
4684 tp1->whoTo->rto_needed = 1;
4690 * CMT: CUCv2 algorithm. From the
4691 * cumack'd TSNs, for each TSN being
4692 * acked for the first time, set the
4693 * following variables for the
4694 * corresp destination.
4695 * new_pseudo_cumack will trigger a
4697 * find_(rtx_)pseudo_cumack will
4698 * trigger search for the next
4699 * expected (rtx-)pseudo-cumack.
4701 tp1->whoTo->new_pseudo_cumack = 1;
4702 tp1->whoTo->find_pseudo_cumack = 1;
4703 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4706 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4707 sctp_log_sack(asoc->last_acked_seq,
4712 SCTP_LOG_TSN_ACKED);
4714 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4715 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4718 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4719 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4720 #ifdef SCTP_AUDITING_ENABLED
4721 sctp_audit_log(0xB3,
4722 (asoc->sent_queue_retran_cnt & 0x000000ff));
4725 if (tp1->rec.data.chunk_was_revoked) {
4726 /* deflate the cwnd */
4727 tp1->whoTo->cwnd -= tp1->book_size;
4728 tp1->rec.data.chunk_was_revoked = 0;
4730 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4731 tp1->sent = SCTP_DATAGRAM_ACKED;
4738 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4739 /* always set this up to cum-ack */
4740 asoc->this_sack_highest_gap = last_tsn;
4742 if ((num_seg > 0) || (num_nr_seg > 0)) {
4745 * thisSackHighestGap will increase while handling NEW
4746 * segments this_sack_highest_newack will increase while
4747 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4748 * used for CMT DAC algo. saw_newack will also change.
4750 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4751 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4752 num_seg, num_nr_seg, &rto_ok)) {
4756 * validate the biggest_tsn_acked in the gap acks if strict
4757 * adherence is wanted.
4759 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4761 * peer is either confused or we are under attack.
4764 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4765 biggest_tsn_acked, send_s);
4769 /*******************************************/
4770 /* cancel ALL T3-send timer if accum moved */
4771 /*******************************************/
4772 if (asoc->sctp_cmt_on_off > 0) {
4773 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4774 if (net->new_pseudo_cumack)
4775 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4777 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4782 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4783 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4784 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4788 /********************************************/
4789 /* drop the acked chunks from the sentqueue */
4790 /********************************************/
4791 asoc->last_acked_seq = cum_ack;
4793 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4794 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4797 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4798 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4799 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4802 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4806 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4807 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4808 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4809 asoc->trigger_reset = 1;
4811 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4812 if (PR_SCTP_ENABLED(tp1->flags)) {
4813 if (asoc->pr_sctp_cnt != 0)
4814 asoc->pr_sctp_cnt--;
4816 asoc->sent_queue_cnt--;
4818 /* sa_ignore NO_NULL_CHK */
4819 sctp_free_bufspace(stcb, asoc, tp1, 1);
4820 sctp_m_freem(tp1->data);
4822 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4823 asoc->sent_queue_cnt_removeable--;
4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4827 sctp_log_sack(asoc->last_acked_seq,
4832 SCTP_LOG_FREE_SENT);
4834 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4837 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4839 panic("Warning flight size is positive and should be 0");
4841 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4842 asoc->total_flight);
4844 asoc->total_flight = 0;
4846 /* sa_ignore NO_NULL_CHK */
4847 if ((wake_him) && (stcb->sctp_socket)) {
4848 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4852 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4853 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4854 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4856 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4857 so = SCTP_INP_SO(stcb->sctp_ep);
4858 atomic_add_int(&stcb->asoc.refcnt, 1);
4859 SCTP_TCB_UNLOCK(stcb);
4860 SCTP_SOCKET_LOCK(so, 1);
4861 SCTP_TCB_LOCK(stcb);
4862 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4863 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4864 /* assoc was freed while we were unlocked */
4865 SCTP_SOCKET_UNLOCK(so, 1);
4869 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4870 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4871 SCTP_SOCKET_UNLOCK(so, 1);
4874 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4875 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4879 if (asoc->fast_retran_loss_recovery && accum_moved) {
4880 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4881 /* Setup so we will exit RFC2582 fast recovery */
4882 will_exit_fast_recovery = 1;
4886 * Check for revoked fragments:
4888 * if Previous sack - Had no frags then we can't have any revoked if
4889 * Previous sack - Had frag's then - If we now have frags aka
4890 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4891 * some of them. else - The peer revoked all ACKED fragments, since
4892 * we had some before and now we have NONE.
4896 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4897 asoc->saw_sack_with_frags = 1;
4898 } else if (asoc->saw_sack_with_frags) {
4899 int cnt_revoked = 0;
4901 /* Peer revoked all dg's marked or acked */
4902 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4903 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4904 tp1->sent = SCTP_DATAGRAM_SENT;
4905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4906 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4907 tp1->whoTo->flight_size,
4909 (uint32_t)(uintptr_t)tp1->whoTo,
4912 sctp_flight_size_increase(tp1);
4913 sctp_total_flight_increase(stcb, tp1);
4914 tp1->rec.data.chunk_was_revoked = 1;
4916 * To ensure that this increase in
4917 * flightsize, which is artificial, does not
4918 * throttle the sender, we also increase the
4919 * cwnd artificially.
4921 tp1->whoTo->cwnd += tp1->book_size;
4928 asoc->saw_sack_with_frags = 0;
4931 asoc->saw_sack_with_nr_frags = 1;
4933 asoc->saw_sack_with_nr_frags = 0;
4935 /* JRS - Use the congestion control given in the CC module */
4936 if (ecne_seen == 0) {
4937 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4938 if (net->net_ack2 > 0) {
4940 * Karn's rule applies to clearing error
4941 * count, this is optional.
4943 net->error_count = 0;
4944 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4945 /* addr came good */
4946 net->dest_state |= SCTP_ADDR_REACHABLE;
4947 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4948 0, (void *)net, SCTP_SO_NOT_LOCKED);
4950 if (net == stcb->asoc.primary_destination) {
4951 if (stcb->asoc.alternate) {
4953 * release the alternate,
4956 sctp_free_remote_addr(stcb->asoc.alternate);
4957 stcb->asoc.alternate = NULL;
4960 if (net->dest_state & SCTP_ADDR_PF) {
4961 net->dest_state &= ~SCTP_ADDR_PF;
4962 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4963 stcb->sctp_ep, stcb, net,
4964 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4965 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4966 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4967 /* Done with this net */
4970 /* restore any doubled timers */
4971 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4972 if (net->RTO < stcb->asoc.minrto) {
4973 net->RTO = stcb->asoc.minrto;
4975 if (net->RTO > stcb->asoc.maxrto) {
4976 net->RTO = stcb->asoc.maxrto;
4980 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4982 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4983 /* nothing left in-flight */
4984 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4985 /* stop all timers */
4986 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4988 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4989 net->flight_size = 0;
4990 net->partial_bytes_acked = 0;
4992 asoc->total_flight = 0;
4993 asoc->total_flight_count = 0;
4995 /**********************************/
4996 /* Now what about shutdown issues */
4997 /**********************************/
4998 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4999 /* nothing left on sendqueue.. consider done */
5000 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5001 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5002 asoc->peers_rwnd, 0, 0, a_rwnd);
5004 asoc->peers_rwnd = a_rwnd;
5005 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5006 /* SWS sender side engages */
5007 asoc->peers_rwnd = 0;
5010 if ((asoc->stream_queue_cnt == 1) &&
5011 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5012 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5013 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
5014 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5016 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5017 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
5018 (asoc->stream_queue_cnt == 1) &&
5019 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5020 struct mbuf *op_err;
5024 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
5025 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
5026 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5029 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
5030 (asoc->stream_queue_cnt == 0)) {
5031 struct sctp_nets *netp;
5033 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
5034 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
5035 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5037 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
5038 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5039 sctp_stop_timers_for_shutdown(stcb);
5040 if (asoc->alternate) {
5041 netp = asoc->alternate;
5043 netp = asoc->primary_destination;
5045 sctp_send_shutdown(stcb, netp);
5046 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5047 stcb->sctp_ep, stcb, netp);
5048 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5049 stcb->sctp_ep, stcb, netp);
5051 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5052 (asoc->stream_queue_cnt == 0)) {
5053 struct sctp_nets *netp;
5055 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5056 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
5057 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
5058 sctp_stop_timers_for_shutdown(stcb);
5059 if (asoc->alternate) {
5060 netp = asoc->alternate;
5062 netp = asoc->primary_destination;
5064 sctp_send_shutdown_ack(stcb, netp);
5065 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5066 stcb->sctp_ep, stcb, netp);
5071 * Now here we are going to recycle net_ack for a different use...
5074 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5079 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5080 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5081 * automatically ensure that.
5083 if ((asoc->sctp_cmt_on_off > 0) &&
5084 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5085 (cmt_dac_flag == 0)) {
5086 this_sack_lowest_newack = cum_ack;
5088 if ((num_seg > 0) || (num_nr_seg > 0)) {
5089 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5090 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5092 /* JRS - Use the congestion control given in the CC module */
5093 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5095 /* Now are we exiting loss recovery ? */
5096 if (will_exit_fast_recovery) {
5097 /* Ok, we must exit fast recovery */
5098 asoc->fast_retran_loss_recovery = 0;
5100 if ((asoc->sat_t3_loss_recovery) &&
5101 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5102 /* end satellite t3 loss recovery */
5103 asoc->sat_t3_loss_recovery = 0;
5108 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5109 if (net->will_exit_fast_recovery) {
5110 /* Ok, we must exit fast recovery */
5111 net->fast_retran_loss_recovery = 0;
5115 /* Adjust and set the new rwnd value */
5116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5117 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5118 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5120 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5121 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5122 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5123 /* SWS sender side engages */
5124 asoc->peers_rwnd = 0;
5126 if (asoc->peers_rwnd > old_rwnd) {
5127 win_probe_recovery = 1;
5130 * Now we must setup so we have a timer up for anyone with
5136 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5137 if (win_probe_recovery && (net->window_probe)) {
5138 win_probe_recovered = 1;
5140 * Find first chunk that was used with
5141 * window probe and clear the event. Put
5142 * it back into the send queue as if has
5145 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5146 if (tp1->window_probe) {
5147 sctp_window_probe_recovery(stcb, asoc, tp1);
5152 if (net->flight_size) {
5154 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5155 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5156 stcb->sctp_ep, stcb, net);
5158 if (net->window_probe) {
5159 net->window_probe = 0;
5162 if (net->window_probe) {
5164 * In window probes we must assure a timer
5165 * is still running there
5167 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5168 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5169 stcb->sctp_ep, stcb, net);
5172 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5173 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5175 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
5180 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5181 (asoc->sent_queue_retran_cnt == 0) &&
5182 (win_probe_recovered == 0) &&
5185 * huh, this should not happen unless all packets are
5186 * PR-SCTP and marked to skip of course.
5188 if (sctp_fs_audit(asoc)) {
5189 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5190 net->flight_size = 0;
5192 asoc->total_flight = 0;
5193 asoc->total_flight_count = 0;
5194 asoc->sent_queue_retran_cnt = 0;
5195 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5196 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5197 sctp_flight_size_increase(tp1);
5198 sctp_total_flight_increase(stcb, tp1);
5199 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5200 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5207 /*********************************************/
5208 /* Here we perform PR-SCTP procedures */
5210 /*********************************************/
5211 /* C1. update advancedPeerAckPoint */
5212 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5213 asoc->advanced_peer_ack_point = cum_ack;
5215 /* C2. try to further move advancedPeerAckPoint ahead */
5216 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5217 struct sctp_tmit_chunk *lchk;
5218 uint32_t old_adv_peer_ack_point;
5220 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5221 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5222 /* C3. See if we need to send a Fwd-TSN */
5223 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5225 * ISSUE with ECN, see FWD-TSN processing.
5227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5228 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5229 0xee, cum_ack, asoc->advanced_peer_ack_point,
5230 old_adv_peer_ack_point);
5232 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5233 send_forward_tsn(stcb, asoc);
5235 /* try to FR fwd-tsn's that get lost too */
5236 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5237 send_forward_tsn(stcb, asoc);
5242 /* Assure a timer is up */
5243 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5244 stcb->sctp_ep, stcb, lchk->whoTo);
5247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5248 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5250 stcb->asoc.peers_rwnd,
5251 stcb->asoc.total_flight,
5252 stcb->asoc.total_output_queue_size);
5257 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5260 uint32_t cum_ack, a_rwnd;
5262 cum_ack = ntohl(cp->cumulative_tsn_ack);
5263 /* Arrange so a_rwnd does NOT change */
5264 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5266 /* Now call the express sack handling */
5267 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5271 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5272 struct sctp_stream_in *strmin)
5274 struct sctp_queued_to_read *control, *ncontrol;
5275 struct sctp_association *asoc;
5277 int need_reasm_check = 0;
5280 mid = strmin->last_mid_delivered;
5282 * First deliver anything prior to and including the stream no that
5285 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5286 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5287 /* this is deliverable now */
5288 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5289 if (control->on_strm_q) {
5290 if (control->on_strm_q == SCTP_ON_ORDERED) {
5291 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5292 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5293 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5296 panic("strmin: %p ctl: %p unknown %d",
5297 strmin, control, control->on_strm_q);
5300 control->on_strm_q = 0;
5302 /* subtract pending on streams */
5303 if (asoc->size_on_all_streams >= control->length) {
5304 asoc->size_on_all_streams -= control->length;
5307 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5309 asoc->size_on_all_streams = 0;
5312 sctp_ucount_decr(asoc->cnt_on_all_streams);
5313 /* deliver it to at least the delivery-q */
5314 if (stcb->sctp_socket) {
5315 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5316 sctp_add_to_readq(stcb->sctp_ep, stcb,
5318 &stcb->sctp_socket->so_rcv,
5319 1, SCTP_READ_LOCK_HELD,
5320 SCTP_SO_NOT_LOCKED);
5323 /* Its a fragmented message */
5324 if (control->first_frag_seen) {
5326 * Make it so this is next to
5327 * deliver, we restore later
5329 strmin->last_mid_delivered = control->mid - 1;
5330 need_reasm_check = 1;
5335 /* no more delivery now. */
5339 if (need_reasm_check) {
5342 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5343 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5344 /* Restore the next to deliver unless we are ahead */
5345 strmin->last_mid_delivered = mid;
5348 /* Left the front Partial one on */
5351 need_reasm_check = 0;
5354 * now we must deliver things in queue the normal way if any are
5357 mid = strmin->last_mid_delivered + 1;
5358 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5359 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5360 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5361 /* this is deliverable now */
5362 if (control->on_strm_q) {
5363 if (control->on_strm_q == SCTP_ON_ORDERED) {
5364 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5365 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5366 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5369 panic("strmin: %p ctl: %p unknown %d",
5370 strmin, control, control->on_strm_q);
5373 control->on_strm_q = 0;
5375 /* subtract pending on streams */
5376 if (asoc->size_on_all_streams >= control->length) {
5377 asoc->size_on_all_streams -= control->length;
5380 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5382 asoc->size_on_all_streams = 0;
5385 sctp_ucount_decr(asoc->cnt_on_all_streams);
5386 /* deliver it to at least the delivery-q */
5387 strmin->last_mid_delivered = control->mid;
5388 if (stcb->sctp_socket) {
5389 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5390 sctp_add_to_readq(stcb->sctp_ep, stcb,
5392 &stcb->sctp_socket->so_rcv, 1,
5393 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5396 mid = strmin->last_mid_delivered + 1;
5398 /* Its a fragmented message */
5399 if (control->first_frag_seen) {
5401 * Make it so this is next to
5404 strmin->last_mid_delivered = control->mid - 1;
5405 need_reasm_check = 1;
5413 if (need_reasm_check) {
5414 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5421 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5422 struct sctp_association *asoc,
5423 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
5425 struct sctp_queued_to_read *control;
5426 struct sctp_stream_in *strm;
5427 struct sctp_tmit_chunk *chk, *nchk;
5428 int cnt_removed = 0;
5431 * For now large messages held on the stream reasm that are complete
5432 * will be tossed too. We could in theory do more work to spin
5433 * through and stop after dumping one msg aka seeing the start of a
5434 * new msg at the head, and call the delivery function... to see if
5435 * it can be delivered... But for now we just dump everything on the
5438 strm = &asoc->strmin[stream];
5439 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
5440 if (control == NULL) {
5444 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5447 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5448 /* Purge hanging chunks */
5449 if (!asoc->idata_supported && (ordered == 0)) {
5450 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5455 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5456 if (asoc->size_on_reasm_queue >= chk->send_size) {
5457 asoc->size_on_reasm_queue -= chk->send_size;
5460 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5462 asoc->size_on_reasm_queue = 0;
5465 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5467 sctp_m_freem(chk->data);
5470 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5472 if (!TAILQ_EMPTY(&control->reasm)) {
5473 /* This has to be old data, unordered */
5474 if (control->data) {
5475 sctp_m_freem(control->data);
5476 control->data = NULL;
5478 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5479 chk = TAILQ_FIRST(&control->reasm);
5480 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5481 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5482 sctp_add_chk_to_control(control, strm, stcb, asoc,
5483 chk, SCTP_READ_LOCK_HELD);
5485 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5488 if (control->on_strm_q == SCTP_ON_ORDERED) {
5489 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5490 if (asoc->size_on_all_streams >= control->length) {
5491 asoc->size_on_all_streams -= control->length;
5494 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5496 asoc->size_on_all_streams = 0;
5499 sctp_ucount_decr(asoc->cnt_on_all_streams);
5500 control->on_strm_q = 0;
5501 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5502 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5503 control->on_strm_q = 0;
5505 } else if (control->on_strm_q) {
5506 panic("strm: %p ctl: %p unknown %d",
5507 strm, control, control->on_strm_q);
5510 control->on_strm_q = 0;
5511 if (control->on_read_q == 0) {
5512 sctp_free_remote_addr(control->whoFrom);
5513 if (control->data) {
5514 sctp_m_freem(control->data);
5515 control->data = NULL;
5517 sctp_free_a_readq(stcb, control);
5522 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5523 struct sctp_forward_tsn_chunk *fwd,
5524 int *abort_flag, struct mbuf *m, int offset)
5526 /* The pr-sctp fwd tsn */
5528 * here we will perform all the data receiver side steps for
5529 * processing FwdTSN, as required in by pr-sctp draft:
5531 * Assume we get FwdTSN(x):
5533 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5534 * + others we have 3) examine and update re-ordering queue on
5535 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5536 * report where we are.
5538 struct sctp_association *asoc;
5539 uint32_t new_cum_tsn, gap;
5540 unsigned int i, fwd_sz, m_size;
5542 struct sctp_stream_in *strm;
5543 struct sctp_queued_to_read *control, *sv;
5546 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5547 SCTPDBG(SCTP_DEBUG_INDATA1,
5548 "Bad size too small/big fwd-tsn\n");
5551 m_size = (stcb->asoc.mapping_array_size << 3);
5552 /*************************************************************/
5553 /* 1. Here we update local cumTSN and shift the bitmap array */
5554 /*************************************************************/
5555 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5557 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5558 /* Already got there ... */
5562 * now we know the new TSN is more advanced, let's find the actual
5565 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5566 asoc->cumulative_tsn = new_cum_tsn;
5567 if (gap >= m_size) {
5568 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5569 struct mbuf *op_err;
5570 char msg[SCTP_DIAG_INFO_LEN];
5573 * out of range (of single byte chunks in the rwnd I
5574 * give out). This must be an attacker.
5577 snprintf(msg, sizeof(msg),
5578 "New cum ack %8.8x too high, highest TSN %8.8x",
5579 new_cum_tsn, asoc->highest_tsn_inside_map);
5580 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5581 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5582 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5585 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5587 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5588 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5589 asoc->highest_tsn_inside_map = new_cum_tsn;
5591 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5592 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5595 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5598 SCTP_TCB_LOCK_ASSERT(stcb);
5599 for (i = 0; i <= gap; i++) {
5600 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5601 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5602 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5603 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5604 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5609 /*************************************************************/
5610 /* 2. Clear up re-assembly queue */
5611 /*************************************************************/
5613 /* This is now done as part of clearing up the stream/seq */
5614 if (asoc->idata_supported == 0) {
5617 /* Flush all the un-ordered data based on cum-tsn */
5618 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5619 for (sid = 0; sid < asoc->streamincnt; sid++) {
5620 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
5622 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5624 /*******************************************************/
5625 /* 3. Update the PR-stream re-ordering queues and fix */
5626 /* delivery issues as needed. */
5627 /*******************************************************/
5628 fwd_sz -= sizeof(*fwd);
5631 unsigned int num_str;
5632 uint32_t mid, cur_mid;
5634 uint16_t ordered, flags;
5635 struct sctp_strseq *stseq, strseqbuf;
5636 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5638 offset += sizeof(*fwd);
5640 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5641 if (asoc->idata_supported) {
5642 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5644 num_str = fwd_sz / sizeof(struct sctp_strseq);
5646 for (i = 0; i < num_str; i++) {
5647 if (asoc->idata_supported) {
5648 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5649 sizeof(struct sctp_strseq_mid),
5650 (uint8_t *)&strseqbuf_m);
5651 offset += sizeof(struct sctp_strseq_mid);
5652 if (stseq_m == NULL) {
5655 sid = ntohs(stseq_m->sid);
5656 mid = ntohl(stseq_m->mid);
5657 flags = ntohs(stseq_m->flags);
5658 if (flags & PR_SCTP_UNORDERED_FLAG) {
5664 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5665 sizeof(struct sctp_strseq),
5666 (uint8_t *)&strseqbuf);
5667 offset += sizeof(struct sctp_strseq);
5668 if (stseq == NULL) {
5671 sid = ntohs(stseq->sid);
5672 mid = (uint32_t)ntohs(stseq->ssn);
5680 * Ok we now look for the stream/seq on the read
5681 * queue where its not all delivered. If we find it
5682 * we transmute the read entry into a PDI_ABORTED.
5684 if (sid >= asoc->streamincnt) {
5685 /* screwed up streams, stop! */
5688 if ((asoc->str_of_pdapi == sid) &&
5689 (asoc->ssn_of_pdapi == mid)) {
5691 * If this is the one we were partially
5692 * delivering now then we no longer are.
5693 * Note this will change with the reassembly
5696 asoc->fragmented_delivery_inprogress = 0;
5698 strm = &asoc->strmin[sid];
5699 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
5700 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
5702 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5703 if ((control->sinfo_stream == sid) &&
5704 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5705 str_seq = (sid << 16) | (0x0000ffff & mid);
5706 control->pdapi_aborted = 1;
5707 sv = stcb->asoc.control_pdapi;
5708 control->end_added = 1;
5709 if (control->on_strm_q == SCTP_ON_ORDERED) {
5710 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5711 if (asoc->size_on_all_streams >= control->length) {
5712 asoc->size_on_all_streams -= control->length;
5715 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5717 asoc->size_on_all_streams = 0;
5720 sctp_ucount_decr(asoc->cnt_on_all_streams);
5721 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5722 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5724 } else if (control->on_strm_q) {
5725 panic("strm: %p ctl: %p unknown %d",
5726 strm, control, control->on_strm_q);
5729 control->on_strm_q = 0;
5730 stcb->asoc.control_pdapi = control;
5731 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5733 SCTP_PARTIAL_DELIVERY_ABORTED,
5735 SCTP_SO_NOT_LOCKED);
5736 stcb->asoc.control_pdapi = sv;
5738 } else if ((control->sinfo_stream == sid) &&
5739 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5740 /* We are past our victim SSN */
5744 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5745 /* Update the sequence number */
5746 strm->last_mid_delivered = mid;
5748 /* now kick the stream the new way */
5749 /* sa_ignore NO_NULL_CHK */
5750 sctp_kick_prsctp_reorder_queue(stcb, strm);
5752 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5755 * Now slide thing forward.
5757 sctp_slide_mapping_arrays(stcb);